]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/s2io.c
net: s2io: convert to hw_features
[mirror_ubuntu-bionic-kernel.git] / drivers / net / s2io.c
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2010 Exar Corp.
4 *
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explanation of all the variables.
29 *
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
45 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46 * Possible values '1' for enable and '0' for disable. Default is '0'
47 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48 * Possible values '1' for enable , '0' for disable.
49 * Default is '2' - which means disable in promisc mode
50 * and enable in non-promiscuous mode.
51 * multiq: This parameter used to enable/disable MULTIQUEUE support.
52 * Possible values '1' for enable and '0' for disable. Default is '0'
53 ************************************************************************/
54
55 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/mdio.h>
67 #include <linux/skbuff.h>
68 #include <linux/init.h>
69 #include <linux/delay.h>
70 #include <linux/stddef.h>
71 #include <linux/ioctl.h>
72 #include <linux/timex.h>
73 #include <linux/ethtool.h>
74 #include <linux/workqueue.h>
75 #include <linux/if_vlan.h>
76 #include <linux/ip.h>
77 #include <linux/tcp.h>
78 #include <linux/uaccess.h>
79 #include <linux/io.h>
80 #include <linux/slab.h>
81 #include <net/tcp.h>
82
83 #include <asm/system.h>
84 #include <asm/div64.h>
85 #include <asm/irq.h>
86
87 /* local include */
88 #include "s2io.h"
89 #include "s2io-regs.h"
90
91 #define DRV_VERSION "2.0.26.28"
92
93 /* S2io Driver name & version. */
94 static const char s2io_driver_name[] = "Neterion";
95 static const char s2io_driver_version[] = DRV_VERSION;
96
97 static const int rxd_size[2] = {32, 48};
98 static const int rxd_count[2] = {127, 85};
99
100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101 {
102 int ret;
103
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
106
107 return ret;
108 }
109
110 /*
111 * Cards with following subsystem_id have a link state indication
112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113 * macro below identifies these cards given the subsystem_id.
114 */
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
119
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
122
123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
124 {
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 }
127
128 /* Ethtool related variables and Macros. */
129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
135 };
136
137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138 {"tmac_frms"},
139 {"tmac_data_octets"},
140 {"tmac_drop_frms"},
141 {"tmac_mcst_frms"},
142 {"tmac_bcst_frms"},
143 {"tmac_pause_ctrl_frms"},
144 {"tmac_ttl_octets"},
145 {"tmac_ucst_frms"},
146 {"tmac_nucst_frms"},
147 {"tmac_any_err_frms"},
148 {"tmac_ttl_less_fb_octets"},
149 {"tmac_vld_ip_octets"},
150 {"tmac_vld_ip"},
151 {"tmac_drop_ip"},
152 {"tmac_icmp"},
153 {"tmac_rst_tcp"},
154 {"tmac_tcp"},
155 {"tmac_udp"},
156 {"rmac_vld_frms"},
157 {"rmac_data_octets"},
158 {"rmac_fcs_err_frms"},
159 {"rmac_drop_frms"},
160 {"rmac_vld_mcst_frms"},
161 {"rmac_vld_bcst_frms"},
162 {"rmac_in_rng_len_err_frms"},
163 {"rmac_out_rng_len_err_frms"},
164 {"rmac_long_frms"},
165 {"rmac_pause_ctrl_frms"},
166 {"rmac_unsup_ctrl_frms"},
167 {"rmac_ttl_octets"},
168 {"rmac_accepted_ucst_frms"},
169 {"rmac_accepted_nucst_frms"},
170 {"rmac_discarded_frms"},
171 {"rmac_drop_events"},
172 {"rmac_ttl_less_fb_octets"},
173 {"rmac_ttl_frms"},
174 {"rmac_usized_frms"},
175 {"rmac_osized_frms"},
176 {"rmac_frag_frms"},
177 {"rmac_jabber_frms"},
178 {"rmac_ttl_64_frms"},
179 {"rmac_ttl_65_127_frms"},
180 {"rmac_ttl_128_255_frms"},
181 {"rmac_ttl_256_511_frms"},
182 {"rmac_ttl_512_1023_frms"},
183 {"rmac_ttl_1024_1518_frms"},
184 {"rmac_ip"},
185 {"rmac_ip_octets"},
186 {"rmac_hdr_err_ip"},
187 {"rmac_drop_ip"},
188 {"rmac_icmp"},
189 {"rmac_tcp"},
190 {"rmac_udp"},
191 {"rmac_err_drp_udp"},
192 {"rmac_xgmii_err_sym"},
193 {"rmac_frms_q0"},
194 {"rmac_frms_q1"},
195 {"rmac_frms_q2"},
196 {"rmac_frms_q3"},
197 {"rmac_frms_q4"},
198 {"rmac_frms_q5"},
199 {"rmac_frms_q6"},
200 {"rmac_frms_q7"},
201 {"rmac_full_q0"},
202 {"rmac_full_q1"},
203 {"rmac_full_q2"},
204 {"rmac_full_q3"},
205 {"rmac_full_q4"},
206 {"rmac_full_q5"},
207 {"rmac_full_q6"},
208 {"rmac_full_q7"},
209 {"rmac_pause_cnt"},
210 {"rmac_xgmii_data_err_cnt"},
211 {"rmac_xgmii_ctrl_err_cnt"},
212 {"rmac_accepted_ip"},
213 {"rmac_err_tcp"},
214 {"rd_req_cnt"},
215 {"new_rd_req_cnt"},
216 {"new_rd_req_rtry_cnt"},
217 {"rd_rtry_cnt"},
218 {"wr_rtry_rd_ack_cnt"},
219 {"wr_req_cnt"},
220 {"new_wr_req_cnt"},
221 {"new_wr_req_rtry_cnt"},
222 {"wr_rtry_cnt"},
223 {"wr_disc_cnt"},
224 {"rd_rtry_wr_ack_cnt"},
225 {"txp_wr_cnt"},
226 {"txd_rd_cnt"},
227 {"txd_wr_cnt"},
228 {"rxd_rd_cnt"},
229 {"rxd_wr_cnt"},
230 {"txf_rd_cnt"},
231 {"rxf_wr_cnt"}
232 };
233
234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235 {"rmac_ttl_1519_4095_frms"},
236 {"rmac_ttl_4096_8191_frms"},
237 {"rmac_ttl_8192_max_frms"},
238 {"rmac_ttl_gt_max_frms"},
239 {"rmac_osized_alt_frms"},
240 {"rmac_jabber_alt_frms"},
241 {"rmac_gt_max_alt_frms"},
242 {"rmac_vlan_frms"},
243 {"rmac_len_discard"},
244 {"rmac_fcs_discard"},
245 {"rmac_pf_discard"},
246 {"rmac_da_discard"},
247 {"rmac_red_discard"},
248 {"rmac_rts_discard"},
249 {"rmac_ingm_full_discard"},
250 {"link_fault_cnt"}
251 };
252
253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254 {"\n DRIVER STATISTICS"},
255 {"single_bit_ecc_errs"},
256 {"double_bit_ecc_errs"},
257 {"parity_err_cnt"},
258 {"serious_err_cnt"},
259 {"soft_reset_cnt"},
260 {"fifo_full_cnt"},
261 {"ring_0_full_cnt"},
262 {"ring_1_full_cnt"},
263 {"ring_2_full_cnt"},
264 {"ring_3_full_cnt"},
265 {"ring_4_full_cnt"},
266 {"ring_5_full_cnt"},
267 {"ring_6_full_cnt"},
268 {"ring_7_full_cnt"},
269 {"alarm_transceiver_temp_high"},
270 {"alarm_transceiver_temp_low"},
271 {"alarm_laser_bias_current_high"},
272 {"alarm_laser_bias_current_low"},
273 {"alarm_laser_output_power_high"},
274 {"alarm_laser_output_power_low"},
275 {"warn_transceiver_temp_high"},
276 {"warn_transceiver_temp_low"},
277 {"warn_laser_bias_current_high"},
278 {"warn_laser_bias_current_low"},
279 {"warn_laser_output_power_high"},
280 {"warn_laser_output_power_low"},
281 {"lro_aggregated_pkts"},
282 {"lro_flush_both_count"},
283 {"lro_out_of_sequence_pkts"},
284 {"lro_flush_due_to_max_pkts"},
285 {"lro_avg_aggr_pkts"},
286 {"mem_alloc_fail_cnt"},
287 {"pci_map_fail_cnt"},
288 {"watchdog_timer_cnt"},
289 {"mem_allocated"},
290 {"mem_freed"},
291 {"link_up_cnt"},
292 {"link_down_cnt"},
293 {"link_up_time"},
294 {"link_down_time"},
295 {"tx_tcode_buf_abort_cnt"},
296 {"tx_tcode_desc_abort_cnt"},
297 {"tx_tcode_parity_err_cnt"},
298 {"tx_tcode_link_loss_cnt"},
299 {"tx_tcode_list_proc_err_cnt"},
300 {"rx_tcode_parity_err_cnt"},
301 {"rx_tcode_abort_cnt"},
302 {"rx_tcode_parity_abort_cnt"},
303 {"rx_tcode_rda_fail_cnt"},
304 {"rx_tcode_unkn_prot_cnt"},
305 {"rx_tcode_fcs_err_cnt"},
306 {"rx_tcode_buf_size_err_cnt"},
307 {"rx_tcode_rxd_corrupt_cnt"},
308 {"rx_tcode_unkn_err_cnt"},
309 {"tda_err_cnt"},
310 {"pfc_err_cnt"},
311 {"pcc_err_cnt"},
312 {"tti_err_cnt"},
313 {"tpa_err_cnt"},
314 {"sm_err_cnt"},
315 {"lso_err_cnt"},
316 {"mac_tmac_err_cnt"},
317 {"mac_rmac_err_cnt"},
318 {"xgxs_txgxs_err_cnt"},
319 {"xgxs_rxgxs_err_cnt"},
320 {"rc_err_cnt"},
321 {"prc_pcix_err_cnt"},
322 {"rpa_err_cnt"},
323 {"rda_err_cnt"},
324 {"rti_err_cnt"},
325 {"mc_err_cnt"}
326 };
327
328 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
331
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
334
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
337
338 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
340
341 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
342 init_timer(&timer); \
343 timer.function = handle; \
344 timer.data = (unsigned long)arg; \
345 mod_timer(&timer, (jiffies + exp)) \
346
347 /* copy mac addr to def_mac_addr array */
348 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
349 {
350 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
351 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
352 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
353 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
354 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
355 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
356 }
357
358 /* Add the vlan */
359 static void s2io_vlan_rx_register(struct net_device *dev,
360 struct vlan_group *grp)
361 {
362 int i;
363 struct s2io_nic *nic = netdev_priv(dev);
364 unsigned long flags[MAX_TX_FIFOS];
365 struct config_param *config = &nic->config;
366 struct mac_info *mac_control = &nic->mac_control;
367
368 for (i = 0; i < config->tx_fifo_num; i++) {
369 struct fifo_info *fifo = &mac_control->fifos[i];
370
371 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
372 }
373
374 nic->vlgrp = grp;
375
376 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
377 struct fifo_info *fifo = &mac_control->fifos[i];
378
379 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
380 }
381 }
382
383 /* Unregister the vlan */
384 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
385 {
386 int i;
387 struct s2io_nic *nic = netdev_priv(dev);
388 unsigned long flags[MAX_TX_FIFOS];
389 struct config_param *config = &nic->config;
390 struct mac_info *mac_control = &nic->mac_control;
391
392 for (i = 0; i < config->tx_fifo_num; i++) {
393 struct fifo_info *fifo = &mac_control->fifos[i];
394
395 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
396 }
397
398 if (nic->vlgrp)
399 vlan_group_set_device(nic->vlgrp, vid, NULL);
400
401 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
402 struct fifo_info *fifo = &mac_control->fifos[i];
403
404 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
405 }
406 }
407
408 /*
409 * Constants to be programmed into the Xena's registers, to configure
410 * the XAUI.
411 */
412
413 #define END_SIGN 0x0
414 static const u64 herc_act_dtx_cfg[] = {
415 /* Set address */
416 0x8000051536750000ULL, 0x80000515367500E0ULL,
417 /* Write data */
418 0x8000051536750004ULL, 0x80000515367500E4ULL,
419 /* Set address */
420 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
421 /* Write data */
422 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
423 /* Set address */
424 0x801205150D440000ULL, 0x801205150D4400E0ULL,
425 /* Write data */
426 0x801205150D440004ULL, 0x801205150D4400E4ULL,
427 /* Set address */
428 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
429 /* Write data */
430 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
431 /* Done */
432 END_SIGN
433 };
434
435 static const u64 xena_dtx_cfg[] = {
436 /* Set address */
437 0x8000051500000000ULL, 0x80000515000000E0ULL,
438 /* Write data */
439 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
440 /* Set address */
441 0x8001051500000000ULL, 0x80010515000000E0ULL,
442 /* Write data */
443 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
444 /* Set address */
445 0x8002051500000000ULL, 0x80020515000000E0ULL,
446 /* Write data */
447 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
448 END_SIGN
449 };
450
451 /*
452 * Constants for Fixing the MacAddress problem seen mostly on
453 * Alpha machines.
454 */
455 static const u64 fix_mac[] = {
456 0x0060000000000000ULL, 0x0060600000000000ULL,
457 0x0040600000000000ULL, 0x0000600000000000ULL,
458 0x0020600000000000ULL, 0x0060600000000000ULL,
459 0x0020600000000000ULL, 0x0060600000000000ULL,
460 0x0020600000000000ULL, 0x0060600000000000ULL,
461 0x0020600000000000ULL, 0x0060600000000000ULL,
462 0x0020600000000000ULL, 0x0060600000000000ULL,
463 0x0020600000000000ULL, 0x0060600000000000ULL,
464 0x0020600000000000ULL, 0x0060600000000000ULL,
465 0x0020600000000000ULL, 0x0060600000000000ULL,
466 0x0020600000000000ULL, 0x0060600000000000ULL,
467 0x0020600000000000ULL, 0x0060600000000000ULL,
468 0x0020600000000000ULL, 0x0000600000000000ULL,
469 0x0040600000000000ULL, 0x0060600000000000ULL,
470 END_SIGN
471 };
472
473 MODULE_LICENSE("GPL");
474 MODULE_VERSION(DRV_VERSION);
475
476
477 /* Module Loadable parameters. */
478 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
479 S2IO_PARM_INT(rx_ring_num, 1);
480 S2IO_PARM_INT(multiq, 0);
481 S2IO_PARM_INT(rx_ring_mode, 1);
482 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
483 S2IO_PARM_INT(rmac_pause_time, 0x100);
484 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
485 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
486 S2IO_PARM_INT(shared_splits, 0);
487 S2IO_PARM_INT(tmac_util_period, 5);
488 S2IO_PARM_INT(rmac_util_period, 5);
489 S2IO_PARM_INT(l3l4hdr_size, 128);
490 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
491 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
492 /* Frequency of Rx desc syncs expressed as power of 2 */
493 S2IO_PARM_INT(rxsync_frequency, 3);
494 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
495 S2IO_PARM_INT(intr_type, 2);
496 /* Large receive offload feature */
497
498 /* Max pkts to be aggregated by LRO at one time. If not specified,
499 * aggregation happens until we hit max IP pkt size(64K)
500 */
501 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
502 S2IO_PARM_INT(indicate_max_pkts, 0);
503
504 S2IO_PARM_INT(napi, 1);
505 S2IO_PARM_INT(ufo, 0);
506 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
507
508 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
509 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
510 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
511 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
512 static unsigned int rts_frm_len[MAX_RX_RINGS] =
513 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
514
515 module_param_array(tx_fifo_len, uint, NULL, 0);
516 module_param_array(rx_ring_sz, uint, NULL, 0);
517 module_param_array(rts_frm_len, uint, NULL, 0);
518
519 /*
520 * S2IO device table.
521 * This table lists all the devices that this driver supports.
522 */
523 static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
524 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
525 PCI_ANY_ID, PCI_ANY_ID},
526 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
527 PCI_ANY_ID, PCI_ANY_ID},
528 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
529 PCI_ANY_ID, PCI_ANY_ID},
530 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
531 PCI_ANY_ID, PCI_ANY_ID},
532 {0,}
533 };
534
535 MODULE_DEVICE_TABLE(pci, s2io_tbl);
536
537 static struct pci_error_handlers s2io_err_handler = {
538 .error_detected = s2io_io_error_detected,
539 .slot_reset = s2io_io_slot_reset,
540 .resume = s2io_io_resume,
541 };
542
543 static struct pci_driver s2io_driver = {
544 .name = "S2IO",
545 .id_table = s2io_tbl,
546 .probe = s2io_init_nic,
547 .remove = __devexit_p(s2io_rem_nic),
548 .err_handler = &s2io_err_handler,
549 };
550
551 /* A simplifier macro used both by init and free shared_mem Fns(). */
552 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
553
554 /* netqueue manipulation helper functions */
555 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
556 {
557 if (!sp->config.multiq) {
558 int i;
559
560 for (i = 0; i < sp->config.tx_fifo_num; i++)
561 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
562 }
563 netif_tx_stop_all_queues(sp->dev);
564 }
565
566 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
567 {
568 if (!sp->config.multiq)
569 sp->mac_control.fifos[fifo_no].queue_state =
570 FIFO_QUEUE_STOP;
571
572 netif_tx_stop_all_queues(sp->dev);
573 }
574
575 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
576 {
577 if (!sp->config.multiq) {
578 int i;
579
580 for (i = 0; i < sp->config.tx_fifo_num; i++)
581 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
582 }
583 netif_tx_start_all_queues(sp->dev);
584 }
585
586 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
587 {
588 if (!sp->config.multiq)
589 sp->mac_control.fifos[fifo_no].queue_state =
590 FIFO_QUEUE_START;
591
592 netif_tx_start_all_queues(sp->dev);
593 }
594
595 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
596 {
597 if (!sp->config.multiq) {
598 int i;
599
600 for (i = 0; i < sp->config.tx_fifo_num; i++)
601 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
602 }
603 netif_tx_wake_all_queues(sp->dev);
604 }
605
606 static inline void s2io_wake_tx_queue(
607 struct fifo_info *fifo, int cnt, u8 multiq)
608 {
609
610 if (multiq) {
611 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
612 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
613 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
614 if (netif_queue_stopped(fifo->dev)) {
615 fifo->queue_state = FIFO_QUEUE_START;
616 netif_wake_queue(fifo->dev);
617 }
618 }
619 }
620
621 /**
622 * init_shared_mem - Allocation and Initialization of Memory
623 * @nic: Device private variable.
624 * Description: The function allocates all the memory areas shared
625 * between the NIC and the driver. This includes Tx descriptors,
626 * Rx descriptors and the statistics block.
627 */
628
629 static int init_shared_mem(struct s2io_nic *nic)
630 {
631 u32 size;
632 void *tmp_v_addr, *tmp_v_addr_next;
633 dma_addr_t tmp_p_addr, tmp_p_addr_next;
634 struct RxD_block *pre_rxd_blk = NULL;
635 int i, j, blk_cnt;
636 int lst_size, lst_per_page;
637 struct net_device *dev = nic->dev;
638 unsigned long tmp;
639 struct buffAdd *ba;
640 struct config_param *config = &nic->config;
641 struct mac_info *mac_control = &nic->mac_control;
642 unsigned long long mem_allocated = 0;
643
644 /* Allocation and initialization of TXDLs in FIFOs */
645 size = 0;
646 for (i = 0; i < config->tx_fifo_num; i++) {
647 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
648
649 size += tx_cfg->fifo_len;
650 }
651 if (size > MAX_AVAILABLE_TXDS) {
652 DBG_PRINT(ERR_DBG,
653 "Too many TxDs requested: %d, max supported: %d\n",
654 size, MAX_AVAILABLE_TXDS);
655 return -EINVAL;
656 }
657
658 size = 0;
659 for (i = 0; i < config->tx_fifo_num; i++) {
660 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
661
662 size = tx_cfg->fifo_len;
663 /*
664 * Legal values are from 2 to 8192
665 */
666 if (size < 2) {
667 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
668 "Valid lengths are 2 through 8192\n",
669 i, size);
670 return -EINVAL;
671 }
672 }
673
674 lst_size = (sizeof(struct TxD) * config->max_txds);
675 lst_per_page = PAGE_SIZE / lst_size;
676
677 for (i = 0; i < config->tx_fifo_num; i++) {
678 struct fifo_info *fifo = &mac_control->fifos[i];
679 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
680 int fifo_len = tx_cfg->fifo_len;
681 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
682
683 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
684 if (!fifo->list_info) {
685 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
686 return -ENOMEM;
687 }
688 mem_allocated += list_holder_size;
689 }
690 for (i = 0; i < config->tx_fifo_num; i++) {
691 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
692 lst_per_page);
693 struct fifo_info *fifo = &mac_control->fifos[i];
694 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
695
696 fifo->tx_curr_put_info.offset = 0;
697 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
698 fifo->tx_curr_get_info.offset = 0;
699 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
700 fifo->fifo_no = i;
701 fifo->nic = nic;
702 fifo->max_txds = MAX_SKB_FRAGS + 2;
703 fifo->dev = dev;
704
705 for (j = 0; j < page_num; j++) {
706 int k = 0;
707 dma_addr_t tmp_p;
708 void *tmp_v;
709 tmp_v = pci_alloc_consistent(nic->pdev,
710 PAGE_SIZE, &tmp_p);
711 if (!tmp_v) {
712 DBG_PRINT(INFO_DBG,
713 "pci_alloc_consistent failed for TxDL\n");
714 return -ENOMEM;
715 }
716 /* If we got a zero DMA address(can happen on
717 * certain platforms like PPC), reallocate.
718 * Store virtual address of page we don't want,
719 * to be freed later.
720 */
721 if (!tmp_p) {
722 mac_control->zerodma_virt_addr = tmp_v;
723 DBG_PRINT(INIT_DBG,
724 "%s: Zero DMA address for TxDL. "
725 "Virtual address %p\n",
726 dev->name, tmp_v);
727 tmp_v = pci_alloc_consistent(nic->pdev,
728 PAGE_SIZE, &tmp_p);
729 if (!tmp_v) {
730 DBG_PRINT(INFO_DBG,
731 "pci_alloc_consistent failed for TxDL\n");
732 return -ENOMEM;
733 }
734 mem_allocated += PAGE_SIZE;
735 }
736 while (k < lst_per_page) {
737 int l = (j * lst_per_page) + k;
738 if (l == tx_cfg->fifo_len)
739 break;
740 fifo->list_info[l].list_virt_addr =
741 tmp_v + (k * lst_size);
742 fifo->list_info[l].list_phy_addr =
743 tmp_p + (k * lst_size);
744 k++;
745 }
746 }
747 }
748
749 for (i = 0; i < config->tx_fifo_num; i++) {
750 struct fifo_info *fifo = &mac_control->fifos[i];
751 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
752
753 size = tx_cfg->fifo_len;
754 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
755 if (!fifo->ufo_in_band_v)
756 return -ENOMEM;
757 mem_allocated += (size * sizeof(u64));
758 }
759
760 /* Allocation and initialization of RXDs in Rings */
761 size = 0;
762 for (i = 0; i < config->rx_ring_num; i++) {
763 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
764 struct ring_info *ring = &mac_control->rings[i];
765
766 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
767 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
768 "multiple of RxDs per Block\n",
769 dev->name, i);
770 return FAILURE;
771 }
772 size += rx_cfg->num_rxd;
773 ring->block_count = rx_cfg->num_rxd /
774 (rxd_count[nic->rxd_mode] + 1);
775 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
776 }
777 if (nic->rxd_mode == RXD_MODE_1)
778 size = (size * (sizeof(struct RxD1)));
779 else
780 size = (size * (sizeof(struct RxD3)));
781
782 for (i = 0; i < config->rx_ring_num; i++) {
783 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
784 struct ring_info *ring = &mac_control->rings[i];
785
786 ring->rx_curr_get_info.block_index = 0;
787 ring->rx_curr_get_info.offset = 0;
788 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
789 ring->rx_curr_put_info.block_index = 0;
790 ring->rx_curr_put_info.offset = 0;
791 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
792 ring->nic = nic;
793 ring->ring_no = i;
794
795 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
796 /* Allocating all the Rx blocks */
797 for (j = 0; j < blk_cnt; j++) {
798 struct rx_block_info *rx_blocks;
799 int l;
800
801 rx_blocks = &ring->rx_blocks[j];
802 size = SIZE_OF_BLOCK; /* size is always page size */
803 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
804 &tmp_p_addr);
805 if (tmp_v_addr == NULL) {
806 /*
807 * In case of failure, free_shared_mem()
808 * is called, which should free any
809 * memory that was alloced till the
810 * failure happened.
811 */
812 rx_blocks->block_virt_addr = tmp_v_addr;
813 return -ENOMEM;
814 }
815 mem_allocated += size;
816 memset(tmp_v_addr, 0, size);
817
818 size = sizeof(struct rxd_info) *
819 rxd_count[nic->rxd_mode];
820 rx_blocks->block_virt_addr = tmp_v_addr;
821 rx_blocks->block_dma_addr = tmp_p_addr;
822 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
823 if (!rx_blocks->rxds)
824 return -ENOMEM;
825 mem_allocated += size;
826 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
827 rx_blocks->rxds[l].virt_addr =
828 rx_blocks->block_virt_addr +
829 (rxd_size[nic->rxd_mode] * l);
830 rx_blocks->rxds[l].dma_addr =
831 rx_blocks->block_dma_addr +
832 (rxd_size[nic->rxd_mode] * l);
833 }
834 }
835 /* Interlinking all Rx Blocks */
836 for (j = 0; j < blk_cnt; j++) {
837 int next = (j + 1) % blk_cnt;
838 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
839 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
840 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
841 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
842
843 pre_rxd_blk = (struct RxD_block *)tmp_v_addr;
844 pre_rxd_blk->reserved_2_pNext_RxD_block =
845 (unsigned long)tmp_v_addr_next;
846 pre_rxd_blk->pNext_RxD_Blk_physical =
847 (u64)tmp_p_addr_next;
848 }
849 }
850 if (nic->rxd_mode == RXD_MODE_3B) {
851 /*
852 * Allocation of Storages for buffer addresses in 2BUFF mode
853 * and the buffers as well.
854 */
855 for (i = 0; i < config->rx_ring_num; i++) {
856 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
857 struct ring_info *ring = &mac_control->rings[i];
858
859 blk_cnt = rx_cfg->num_rxd /
860 (rxd_count[nic->rxd_mode] + 1);
861 size = sizeof(struct buffAdd *) * blk_cnt;
862 ring->ba = kmalloc(size, GFP_KERNEL);
863 if (!ring->ba)
864 return -ENOMEM;
865 mem_allocated += size;
866 for (j = 0; j < blk_cnt; j++) {
867 int k = 0;
868
869 size = sizeof(struct buffAdd) *
870 (rxd_count[nic->rxd_mode] + 1);
871 ring->ba[j] = kmalloc(size, GFP_KERNEL);
872 if (!ring->ba[j])
873 return -ENOMEM;
874 mem_allocated += size;
875 while (k != rxd_count[nic->rxd_mode]) {
876 ba = &ring->ba[j][k];
877 size = BUF0_LEN + ALIGN_SIZE;
878 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
879 if (!ba->ba_0_org)
880 return -ENOMEM;
881 mem_allocated += size;
882 tmp = (unsigned long)ba->ba_0_org;
883 tmp += ALIGN_SIZE;
884 tmp &= ~((unsigned long)ALIGN_SIZE);
885 ba->ba_0 = (void *)tmp;
886
887 size = BUF1_LEN + ALIGN_SIZE;
888 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
889 if (!ba->ba_1_org)
890 return -ENOMEM;
891 mem_allocated += size;
892 tmp = (unsigned long)ba->ba_1_org;
893 tmp += ALIGN_SIZE;
894 tmp &= ~((unsigned long)ALIGN_SIZE);
895 ba->ba_1 = (void *)tmp;
896 k++;
897 }
898 }
899 }
900 }
901
902 /* Allocation and initialization of Statistics block */
903 size = sizeof(struct stat_block);
904 mac_control->stats_mem =
905 pci_alloc_consistent(nic->pdev, size,
906 &mac_control->stats_mem_phy);
907
908 if (!mac_control->stats_mem) {
909 /*
910 * In case of failure, free_shared_mem() is called, which
911 * should free any memory that was alloced till the
912 * failure happened.
913 */
914 return -ENOMEM;
915 }
916 mem_allocated += size;
917 mac_control->stats_mem_sz = size;
918
919 tmp_v_addr = mac_control->stats_mem;
920 mac_control->stats_info = (struct stat_block *)tmp_v_addr;
921 memset(tmp_v_addr, 0, size);
922 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
923 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
924 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
925 return SUCCESS;
926 }
927
928 /**
929 * free_shared_mem - Free the allocated Memory
930 * @nic: Device private variable.
931 * Description: This function is to free all memory locations allocated by
932 * the init_shared_mem() function and return it to the kernel.
933 */
934
935 static void free_shared_mem(struct s2io_nic *nic)
936 {
937 int i, j, blk_cnt, size;
938 void *tmp_v_addr;
939 dma_addr_t tmp_p_addr;
940 int lst_size, lst_per_page;
941 struct net_device *dev;
942 int page_num = 0;
943 struct config_param *config;
944 struct mac_info *mac_control;
945 struct stat_block *stats;
946 struct swStat *swstats;
947
948 if (!nic)
949 return;
950
951 dev = nic->dev;
952
953 config = &nic->config;
954 mac_control = &nic->mac_control;
955 stats = mac_control->stats_info;
956 swstats = &stats->sw_stat;
957
958 lst_size = sizeof(struct TxD) * config->max_txds;
959 lst_per_page = PAGE_SIZE / lst_size;
960
961 for (i = 0; i < config->tx_fifo_num; i++) {
962 struct fifo_info *fifo = &mac_control->fifos[i];
963 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
964
965 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
966 for (j = 0; j < page_num; j++) {
967 int mem_blks = (j * lst_per_page);
968 struct list_info_hold *fli;
969
970 if (!fifo->list_info)
971 return;
972
973 fli = &fifo->list_info[mem_blks];
974 if (!fli->list_virt_addr)
975 break;
976 pci_free_consistent(nic->pdev, PAGE_SIZE,
977 fli->list_virt_addr,
978 fli->list_phy_addr);
979 swstats->mem_freed += PAGE_SIZE;
980 }
981 /* If we got a zero DMA address during allocation,
982 * free the page now
983 */
984 if (mac_control->zerodma_virt_addr) {
985 pci_free_consistent(nic->pdev, PAGE_SIZE,
986 mac_control->zerodma_virt_addr,
987 (dma_addr_t)0);
988 DBG_PRINT(INIT_DBG,
989 "%s: Freeing TxDL with zero DMA address. "
990 "Virtual address %p\n",
991 dev->name, mac_control->zerodma_virt_addr);
992 swstats->mem_freed += PAGE_SIZE;
993 }
994 kfree(fifo->list_info);
995 swstats->mem_freed += tx_cfg->fifo_len *
996 sizeof(struct list_info_hold);
997 }
998
999 size = SIZE_OF_BLOCK;
1000 for (i = 0; i < config->rx_ring_num; i++) {
1001 struct ring_info *ring = &mac_control->rings[i];
1002
1003 blk_cnt = ring->block_count;
1004 for (j = 0; j < blk_cnt; j++) {
1005 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
1006 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1007 if (tmp_v_addr == NULL)
1008 break;
1009 pci_free_consistent(nic->pdev, size,
1010 tmp_v_addr, tmp_p_addr);
1011 swstats->mem_freed += size;
1012 kfree(ring->rx_blocks[j].rxds);
1013 swstats->mem_freed += sizeof(struct rxd_info) *
1014 rxd_count[nic->rxd_mode];
1015 }
1016 }
1017
1018 if (nic->rxd_mode == RXD_MODE_3B) {
1019 /* Freeing buffer storage addresses in 2BUFF mode. */
1020 for (i = 0; i < config->rx_ring_num; i++) {
1021 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1022 struct ring_info *ring = &mac_control->rings[i];
1023
1024 blk_cnt = rx_cfg->num_rxd /
1025 (rxd_count[nic->rxd_mode] + 1);
1026 for (j = 0; j < blk_cnt; j++) {
1027 int k = 0;
1028 if (!ring->ba[j])
1029 continue;
1030 while (k != rxd_count[nic->rxd_mode]) {
1031 struct buffAdd *ba = &ring->ba[j][k];
1032 kfree(ba->ba_0_org);
1033 swstats->mem_freed +=
1034 BUF0_LEN + ALIGN_SIZE;
1035 kfree(ba->ba_1_org);
1036 swstats->mem_freed +=
1037 BUF1_LEN + ALIGN_SIZE;
1038 k++;
1039 }
1040 kfree(ring->ba[j]);
1041 swstats->mem_freed += sizeof(struct buffAdd) *
1042 (rxd_count[nic->rxd_mode] + 1);
1043 }
1044 kfree(ring->ba);
1045 swstats->mem_freed += sizeof(struct buffAdd *) *
1046 blk_cnt;
1047 }
1048 }
1049
1050 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1051 struct fifo_info *fifo = &mac_control->fifos[i];
1052 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1053
1054 if (fifo->ufo_in_band_v) {
1055 swstats->mem_freed += tx_cfg->fifo_len *
1056 sizeof(u64);
1057 kfree(fifo->ufo_in_band_v);
1058 }
1059 }
1060
1061 if (mac_control->stats_mem) {
1062 swstats->mem_freed += mac_control->stats_mem_sz;
1063 pci_free_consistent(nic->pdev,
1064 mac_control->stats_mem_sz,
1065 mac_control->stats_mem,
1066 mac_control->stats_mem_phy);
1067 }
1068 }
1069
1070 /**
1071 * s2io_verify_pci_mode -
1072 */
1073
1074 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1075 {
1076 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1077 register u64 val64 = 0;
1078 int mode;
1079
1080 val64 = readq(&bar0->pci_mode);
1081 mode = (u8)GET_PCI_MODE(val64);
1082
1083 if (val64 & PCI_MODE_UNKNOWN_MODE)
1084 return -1; /* Unknown PCI mode */
1085 return mode;
1086 }
1087
1088 #define NEC_VENID 0x1033
1089 #define NEC_DEVID 0x0125
1090 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1091 {
1092 struct pci_dev *tdev = NULL;
1093 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1094 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1095 if (tdev->bus == s2io_pdev->bus->parent) {
1096 pci_dev_put(tdev);
1097 return 1;
1098 }
1099 }
1100 }
1101 return 0;
1102 }
1103
1104 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1105 /**
1106 * s2io_print_pci_mode -
1107 */
1108 static int s2io_print_pci_mode(struct s2io_nic *nic)
1109 {
1110 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1111 register u64 val64 = 0;
1112 int mode;
1113 struct config_param *config = &nic->config;
1114 const char *pcimode;
1115
1116 val64 = readq(&bar0->pci_mode);
1117 mode = (u8)GET_PCI_MODE(val64);
1118
1119 if (val64 & PCI_MODE_UNKNOWN_MODE)
1120 return -1; /* Unknown PCI mode */
1121
1122 config->bus_speed = bus_speed[mode];
1123
1124 if (s2io_on_nec_bridge(nic->pdev)) {
1125 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1126 nic->dev->name);
1127 return mode;
1128 }
1129
1130 switch (mode) {
1131 case PCI_MODE_PCI_33:
1132 pcimode = "33MHz PCI bus";
1133 break;
1134 case PCI_MODE_PCI_66:
1135 pcimode = "66MHz PCI bus";
1136 break;
1137 case PCI_MODE_PCIX_M1_66:
1138 pcimode = "66MHz PCIX(M1) bus";
1139 break;
1140 case PCI_MODE_PCIX_M1_100:
1141 pcimode = "100MHz PCIX(M1) bus";
1142 break;
1143 case PCI_MODE_PCIX_M1_133:
1144 pcimode = "133MHz PCIX(M1) bus";
1145 break;
1146 case PCI_MODE_PCIX_M2_66:
1147 pcimode = "133MHz PCIX(M2) bus";
1148 break;
1149 case PCI_MODE_PCIX_M2_100:
1150 pcimode = "200MHz PCIX(M2) bus";
1151 break;
1152 case PCI_MODE_PCIX_M2_133:
1153 pcimode = "266MHz PCIX(M2) bus";
1154 break;
1155 default:
1156 pcimode = "unsupported bus!";
1157 mode = -1;
1158 }
1159
1160 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1161 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1162
1163 return mode;
1164 }
1165
1166 /**
1167 * init_tti - Initialization transmit traffic interrupt scheme
1168 * @nic: device private variable
1169 * @link: link status (UP/DOWN) used to enable/disable continuous
1170 * transmit interrupts
1171 * Description: The function configures transmit traffic interrupts
1172 * Return Value: SUCCESS on success and
1173 * '-1' on failure
1174 */
1175
1176 static int init_tti(struct s2io_nic *nic, int link)
1177 {
1178 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1179 register u64 val64 = 0;
1180 int i;
1181 struct config_param *config = &nic->config;
1182
1183 for (i = 0; i < config->tx_fifo_num; i++) {
1184 /*
1185 * TTI Initialization. Default Tx timer gets us about
1186 * 250 interrupts per sec. Continuous interrupts are enabled
1187 * by default.
1188 */
1189 if (nic->device_type == XFRAME_II_DEVICE) {
1190 int count = (nic->config.bus_speed * 125)/2;
1191 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1192 } else
1193 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1194
1195 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1196 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1197 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1198 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1199 if (i == 0)
1200 if (use_continuous_tx_intrs && (link == LINK_UP))
1201 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1202 writeq(val64, &bar0->tti_data1_mem);
1203
1204 if (nic->config.intr_type == MSI_X) {
1205 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1206 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1207 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1208 TTI_DATA2_MEM_TX_UFC_D(0x300);
1209 } else {
1210 if ((nic->config.tx_steering_type ==
1211 TX_DEFAULT_STEERING) &&
1212 (config->tx_fifo_num > 1) &&
1213 (i >= nic->udp_fifo_idx) &&
1214 (i < (nic->udp_fifo_idx +
1215 nic->total_udp_fifos)))
1216 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1217 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1218 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1219 TTI_DATA2_MEM_TX_UFC_D(0x120);
1220 else
1221 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1222 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1223 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1224 TTI_DATA2_MEM_TX_UFC_D(0x80);
1225 }
1226
1227 writeq(val64, &bar0->tti_data2_mem);
1228
1229 val64 = TTI_CMD_MEM_WE |
1230 TTI_CMD_MEM_STROBE_NEW_CMD |
1231 TTI_CMD_MEM_OFFSET(i);
1232 writeq(val64, &bar0->tti_command_mem);
1233
1234 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1235 TTI_CMD_MEM_STROBE_NEW_CMD,
1236 S2IO_BIT_RESET) != SUCCESS)
1237 return FAILURE;
1238 }
1239
1240 return SUCCESS;
1241 }
1242
1243 /**
1244 * init_nic - Initialization of hardware
1245 * @nic: device private variable
1246 * Description: The function sequentially configures every block
1247 * of the H/W from their reset values.
1248 * Return Value: SUCCESS on success and
1249 * '-1' on failure (endian settings incorrect).
1250 */
1251
1252 static int init_nic(struct s2io_nic *nic)
1253 {
1254 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1255 struct net_device *dev = nic->dev;
1256 register u64 val64 = 0;
1257 void __iomem *add;
1258 u32 time;
1259 int i, j;
1260 int dtx_cnt = 0;
1261 unsigned long long mem_share;
1262 int mem_size;
1263 struct config_param *config = &nic->config;
1264 struct mac_info *mac_control = &nic->mac_control;
1265
1266 /* to set the swapper controle on the card */
1267 if (s2io_set_swapper(nic)) {
1268 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1269 return -EIO;
1270 }
1271
1272 /*
1273 * Herc requires EOI to be removed from reset before XGXS, so..
1274 */
1275 if (nic->device_type & XFRAME_II_DEVICE) {
1276 val64 = 0xA500000000ULL;
1277 writeq(val64, &bar0->sw_reset);
1278 msleep(500);
1279 val64 = readq(&bar0->sw_reset);
1280 }
1281
1282 /* Remove XGXS from reset state */
1283 val64 = 0;
1284 writeq(val64, &bar0->sw_reset);
1285 msleep(500);
1286 val64 = readq(&bar0->sw_reset);
1287
1288 /* Ensure that it's safe to access registers by checking
1289 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1290 */
1291 if (nic->device_type == XFRAME_II_DEVICE) {
1292 for (i = 0; i < 50; i++) {
1293 val64 = readq(&bar0->adapter_status);
1294 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1295 break;
1296 msleep(10);
1297 }
1298 if (i == 50)
1299 return -ENODEV;
1300 }
1301
1302 /* Enable Receiving broadcasts */
1303 add = &bar0->mac_cfg;
1304 val64 = readq(&bar0->mac_cfg);
1305 val64 |= MAC_RMAC_BCAST_ENABLE;
1306 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1307 writel((u32)val64, add);
1308 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1309 writel((u32) (val64 >> 32), (add + 4));
1310
1311 /* Read registers in all blocks */
1312 val64 = readq(&bar0->mac_int_mask);
1313 val64 = readq(&bar0->mc_int_mask);
1314 val64 = readq(&bar0->xgxs_int_mask);
1315
1316 /* Set MTU */
1317 val64 = dev->mtu;
1318 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1319
1320 if (nic->device_type & XFRAME_II_DEVICE) {
1321 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1322 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1323 &bar0->dtx_control, UF);
1324 if (dtx_cnt & 0x1)
1325 msleep(1); /* Necessary!! */
1326 dtx_cnt++;
1327 }
1328 } else {
1329 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1330 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1331 &bar0->dtx_control, UF);
1332 val64 = readq(&bar0->dtx_control);
1333 dtx_cnt++;
1334 }
1335 }
1336
1337 /* Tx DMA Initialization */
1338 val64 = 0;
1339 writeq(val64, &bar0->tx_fifo_partition_0);
1340 writeq(val64, &bar0->tx_fifo_partition_1);
1341 writeq(val64, &bar0->tx_fifo_partition_2);
1342 writeq(val64, &bar0->tx_fifo_partition_3);
1343
1344 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1345 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1346
1347 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1348 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1349
1350 if (i == (config->tx_fifo_num - 1)) {
1351 if (i % 2 == 0)
1352 i++;
1353 }
1354
1355 switch (i) {
1356 case 1:
1357 writeq(val64, &bar0->tx_fifo_partition_0);
1358 val64 = 0;
1359 j = 0;
1360 break;
1361 case 3:
1362 writeq(val64, &bar0->tx_fifo_partition_1);
1363 val64 = 0;
1364 j = 0;
1365 break;
1366 case 5:
1367 writeq(val64, &bar0->tx_fifo_partition_2);
1368 val64 = 0;
1369 j = 0;
1370 break;
1371 case 7:
1372 writeq(val64, &bar0->tx_fifo_partition_3);
1373 val64 = 0;
1374 j = 0;
1375 break;
1376 default:
1377 j++;
1378 break;
1379 }
1380 }
1381
1382 /*
1383 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1384 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1385 */
1386 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1387 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1388
1389 val64 = readq(&bar0->tx_fifo_partition_0);
1390 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1391 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1392
1393 /*
1394 * Initialization of Tx_PA_CONFIG register to ignore packet
1395 * integrity checking.
1396 */
1397 val64 = readq(&bar0->tx_pa_cfg);
1398 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1399 TX_PA_CFG_IGNORE_SNAP_OUI |
1400 TX_PA_CFG_IGNORE_LLC_CTRL |
1401 TX_PA_CFG_IGNORE_L2_ERR;
1402 writeq(val64, &bar0->tx_pa_cfg);
1403
1404 /* Rx DMA intialization. */
1405 val64 = 0;
1406 for (i = 0; i < config->rx_ring_num; i++) {
1407 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1408
1409 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1410 }
1411 writeq(val64, &bar0->rx_queue_priority);
1412
1413 /*
1414 * Allocating equal share of memory to all the
1415 * configured Rings.
1416 */
1417 val64 = 0;
1418 if (nic->device_type & XFRAME_II_DEVICE)
1419 mem_size = 32;
1420 else
1421 mem_size = 64;
1422
1423 for (i = 0; i < config->rx_ring_num; i++) {
1424 switch (i) {
1425 case 0:
1426 mem_share = (mem_size / config->rx_ring_num +
1427 mem_size % config->rx_ring_num);
1428 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1429 continue;
1430 case 1:
1431 mem_share = (mem_size / config->rx_ring_num);
1432 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1433 continue;
1434 case 2:
1435 mem_share = (mem_size / config->rx_ring_num);
1436 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1437 continue;
1438 case 3:
1439 mem_share = (mem_size / config->rx_ring_num);
1440 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1441 continue;
1442 case 4:
1443 mem_share = (mem_size / config->rx_ring_num);
1444 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1445 continue;
1446 case 5:
1447 mem_share = (mem_size / config->rx_ring_num);
1448 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1449 continue;
1450 case 6:
1451 mem_share = (mem_size / config->rx_ring_num);
1452 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1453 continue;
1454 case 7:
1455 mem_share = (mem_size / config->rx_ring_num);
1456 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1457 continue;
1458 }
1459 }
1460 writeq(val64, &bar0->rx_queue_cfg);
1461
1462 /*
1463 * Filling Tx round robin registers
1464 * as per the number of FIFOs for equal scheduling priority
1465 */
1466 switch (config->tx_fifo_num) {
1467 case 1:
1468 val64 = 0x0;
1469 writeq(val64, &bar0->tx_w_round_robin_0);
1470 writeq(val64, &bar0->tx_w_round_robin_1);
1471 writeq(val64, &bar0->tx_w_round_robin_2);
1472 writeq(val64, &bar0->tx_w_round_robin_3);
1473 writeq(val64, &bar0->tx_w_round_robin_4);
1474 break;
1475 case 2:
1476 val64 = 0x0001000100010001ULL;
1477 writeq(val64, &bar0->tx_w_round_robin_0);
1478 writeq(val64, &bar0->tx_w_round_robin_1);
1479 writeq(val64, &bar0->tx_w_round_robin_2);
1480 writeq(val64, &bar0->tx_w_round_robin_3);
1481 val64 = 0x0001000100000000ULL;
1482 writeq(val64, &bar0->tx_w_round_robin_4);
1483 break;
1484 case 3:
1485 val64 = 0x0001020001020001ULL;
1486 writeq(val64, &bar0->tx_w_round_robin_0);
1487 val64 = 0x0200010200010200ULL;
1488 writeq(val64, &bar0->tx_w_round_robin_1);
1489 val64 = 0x0102000102000102ULL;
1490 writeq(val64, &bar0->tx_w_round_robin_2);
1491 val64 = 0x0001020001020001ULL;
1492 writeq(val64, &bar0->tx_w_round_robin_3);
1493 val64 = 0x0200010200000000ULL;
1494 writeq(val64, &bar0->tx_w_round_robin_4);
1495 break;
1496 case 4:
1497 val64 = 0x0001020300010203ULL;
1498 writeq(val64, &bar0->tx_w_round_robin_0);
1499 writeq(val64, &bar0->tx_w_round_robin_1);
1500 writeq(val64, &bar0->tx_w_round_robin_2);
1501 writeq(val64, &bar0->tx_w_round_robin_3);
1502 val64 = 0x0001020300000000ULL;
1503 writeq(val64, &bar0->tx_w_round_robin_4);
1504 break;
1505 case 5:
1506 val64 = 0x0001020304000102ULL;
1507 writeq(val64, &bar0->tx_w_round_robin_0);
1508 val64 = 0x0304000102030400ULL;
1509 writeq(val64, &bar0->tx_w_round_robin_1);
1510 val64 = 0x0102030400010203ULL;
1511 writeq(val64, &bar0->tx_w_round_robin_2);
1512 val64 = 0x0400010203040001ULL;
1513 writeq(val64, &bar0->tx_w_round_robin_3);
1514 val64 = 0x0203040000000000ULL;
1515 writeq(val64, &bar0->tx_w_round_robin_4);
1516 break;
1517 case 6:
1518 val64 = 0x0001020304050001ULL;
1519 writeq(val64, &bar0->tx_w_round_robin_0);
1520 val64 = 0x0203040500010203ULL;
1521 writeq(val64, &bar0->tx_w_round_robin_1);
1522 val64 = 0x0405000102030405ULL;
1523 writeq(val64, &bar0->tx_w_round_robin_2);
1524 val64 = 0x0001020304050001ULL;
1525 writeq(val64, &bar0->tx_w_round_robin_3);
1526 val64 = 0x0203040500000000ULL;
1527 writeq(val64, &bar0->tx_w_round_robin_4);
1528 break;
1529 case 7:
1530 val64 = 0x0001020304050600ULL;
1531 writeq(val64, &bar0->tx_w_round_robin_0);
1532 val64 = 0x0102030405060001ULL;
1533 writeq(val64, &bar0->tx_w_round_robin_1);
1534 val64 = 0x0203040506000102ULL;
1535 writeq(val64, &bar0->tx_w_round_robin_2);
1536 val64 = 0x0304050600010203ULL;
1537 writeq(val64, &bar0->tx_w_round_robin_3);
1538 val64 = 0x0405060000000000ULL;
1539 writeq(val64, &bar0->tx_w_round_robin_4);
1540 break;
1541 case 8:
1542 val64 = 0x0001020304050607ULL;
1543 writeq(val64, &bar0->tx_w_round_robin_0);
1544 writeq(val64, &bar0->tx_w_round_robin_1);
1545 writeq(val64, &bar0->tx_w_round_robin_2);
1546 writeq(val64, &bar0->tx_w_round_robin_3);
1547 val64 = 0x0001020300000000ULL;
1548 writeq(val64, &bar0->tx_w_round_robin_4);
1549 break;
1550 }
1551
1552 /* Enable all configured Tx FIFO partitions */
1553 val64 = readq(&bar0->tx_fifo_partition_0);
1554 val64 |= (TX_FIFO_PARTITION_EN);
1555 writeq(val64, &bar0->tx_fifo_partition_0);
1556
1557 /* Filling the Rx round robin registers as per the
1558 * number of Rings and steering based on QoS with
1559 * equal priority.
1560 */
1561 switch (config->rx_ring_num) {
1562 case 1:
1563 val64 = 0x0;
1564 writeq(val64, &bar0->rx_w_round_robin_0);
1565 writeq(val64, &bar0->rx_w_round_robin_1);
1566 writeq(val64, &bar0->rx_w_round_robin_2);
1567 writeq(val64, &bar0->rx_w_round_robin_3);
1568 writeq(val64, &bar0->rx_w_round_robin_4);
1569
1570 val64 = 0x8080808080808080ULL;
1571 writeq(val64, &bar0->rts_qos_steering);
1572 break;
1573 case 2:
1574 val64 = 0x0001000100010001ULL;
1575 writeq(val64, &bar0->rx_w_round_robin_0);
1576 writeq(val64, &bar0->rx_w_round_robin_1);
1577 writeq(val64, &bar0->rx_w_round_robin_2);
1578 writeq(val64, &bar0->rx_w_round_robin_3);
1579 val64 = 0x0001000100000000ULL;
1580 writeq(val64, &bar0->rx_w_round_robin_4);
1581
1582 val64 = 0x8080808040404040ULL;
1583 writeq(val64, &bar0->rts_qos_steering);
1584 break;
1585 case 3:
1586 val64 = 0x0001020001020001ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_0);
1588 val64 = 0x0200010200010200ULL;
1589 writeq(val64, &bar0->rx_w_round_robin_1);
1590 val64 = 0x0102000102000102ULL;
1591 writeq(val64, &bar0->rx_w_round_robin_2);
1592 val64 = 0x0001020001020001ULL;
1593 writeq(val64, &bar0->rx_w_round_robin_3);
1594 val64 = 0x0200010200000000ULL;
1595 writeq(val64, &bar0->rx_w_round_robin_4);
1596
1597 val64 = 0x8080804040402020ULL;
1598 writeq(val64, &bar0->rts_qos_steering);
1599 break;
1600 case 4:
1601 val64 = 0x0001020300010203ULL;
1602 writeq(val64, &bar0->rx_w_round_robin_0);
1603 writeq(val64, &bar0->rx_w_round_robin_1);
1604 writeq(val64, &bar0->rx_w_round_robin_2);
1605 writeq(val64, &bar0->rx_w_round_robin_3);
1606 val64 = 0x0001020300000000ULL;
1607 writeq(val64, &bar0->rx_w_round_robin_4);
1608
1609 val64 = 0x8080404020201010ULL;
1610 writeq(val64, &bar0->rts_qos_steering);
1611 break;
1612 case 5:
1613 val64 = 0x0001020304000102ULL;
1614 writeq(val64, &bar0->rx_w_round_robin_0);
1615 val64 = 0x0304000102030400ULL;
1616 writeq(val64, &bar0->rx_w_round_robin_1);
1617 val64 = 0x0102030400010203ULL;
1618 writeq(val64, &bar0->rx_w_round_robin_2);
1619 val64 = 0x0400010203040001ULL;
1620 writeq(val64, &bar0->rx_w_round_robin_3);
1621 val64 = 0x0203040000000000ULL;
1622 writeq(val64, &bar0->rx_w_round_robin_4);
1623
1624 val64 = 0x8080404020201008ULL;
1625 writeq(val64, &bar0->rts_qos_steering);
1626 break;
1627 case 6:
1628 val64 = 0x0001020304050001ULL;
1629 writeq(val64, &bar0->rx_w_round_robin_0);
1630 val64 = 0x0203040500010203ULL;
1631 writeq(val64, &bar0->rx_w_round_robin_1);
1632 val64 = 0x0405000102030405ULL;
1633 writeq(val64, &bar0->rx_w_round_robin_2);
1634 val64 = 0x0001020304050001ULL;
1635 writeq(val64, &bar0->rx_w_round_robin_3);
1636 val64 = 0x0203040500000000ULL;
1637 writeq(val64, &bar0->rx_w_round_robin_4);
1638
1639 val64 = 0x8080404020100804ULL;
1640 writeq(val64, &bar0->rts_qos_steering);
1641 break;
1642 case 7:
1643 val64 = 0x0001020304050600ULL;
1644 writeq(val64, &bar0->rx_w_round_robin_0);
1645 val64 = 0x0102030405060001ULL;
1646 writeq(val64, &bar0->rx_w_round_robin_1);
1647 val64 = 0x0203040506000102ULL;
1648 writeq(val64, &bar0->rx_w_round_robin_2);
1649 val64 = 0x0304050600010203ULL;
1650 writeq(val64, &bar0->rx_w_round_robin_3);
1651 val64 = 0x0405060000000000ULL;
1652 writeq(val64, &bar0->rx_w_round_robin_4);
1653
1654 val64 = 0x8080402010080402ULL;
1655 writeq(val64, &bar0->rts_qos_steering);
1656 break;
1657 case 8:
1658 val64 = 0x0001020304050607ULL;
1659 writeq(val64, &bar0->rx_w_round_robin_0);
1660 writeq(val64, &bar0->rx_w_round_robin_1);
1661 writeq(val64, &bar0->rx_w_round_robin_2);
1662 writeq(val64, &bar0->rx_w_round_robin_3);
1663 val64 = 0x0001020300000000ULL;
1664 writeq(val64, &bar0->rx_w_round_robin_4);
1665
1666 val64 = 0x8040201008040201ULL;
1667 writeq(val64, &bar0->rts_qos_steering);
1668 break;
1669 }
1670
1671 /* UDP Fix */
1672 val64 = 0;
1673 for (i = 0; i < 8; i++)
1674 writeq(val64, &bar0->rts_frm_len_n[i]);
1675
1676 /* Set the default rts frame length for the rings configured */
1677 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1678 for (i = 0 ; i < config->rx_ring_num ; i++)
1679 writeq(val64, &bar0->rts_frm_len_n[i]);
1680
1681 /* Set the frame length for the configured rings
1682 * desired by the user
1683 */
1684 for (i = 0; i < config->rx_ring_num; i++) {
1685 /* If rts_frm_len[i] == 0 then it is assumed that user not
1686 * specified frame length steering.
1687 * If the user provides the frame length then program
1688 * the rts_frm_len register for those values or else
1689 * leave it as it is.
1690 */
1691 if (rts_frm_len[i] != 0) {
1692 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1693 &bar0->rts_frm_len_n[i]);
1694 }
1695 }
1696
1697 /* Disable differentiated services steering logic */
1698 for (i = 0; i < 64; i++) {
1699 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1700 DBG_PRINT(ERR_DBG,
1701 "%s: rts_ds_steer failed on codepoint %d\n",
1702 dev->name, i);
1703 return -ENODEV;
1704 }
1705 }
1706
1707 /* Program statistics memory */
1708 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1709
1710 if (nic->device_type == XFRAME_II_DEVICE) {
1711 val64 = STAT_BC(0x320);
1712 writeq(val64, &bar0->stat_byte_cnt);
1713 }
1714
1715 /*
1716 * Initializing the sampling rate for the device to calculate the
1717 * bandwidth utilization.
1718 */
1719 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1720 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1721 writeq(val64, &bar0->mac_link_util);
1722
1723 /*
1724 * Initializing the Transmit and Receive Traffic Interrupt
1725 * Scheme.
1726 */
1727
1728 /* Initialize TTI */
1729 if (SUCCESS != init_tti(nic, nic->last_link_state))
1730 return -ENODEV;
1731
1732 /* RTI Initialization */
1733 if (nic->device_type == XFRAME_II_DEVICE) {
1734 /*
1735 * Programmed to generate Apprx 500 Intrs per
1736 * second
1737 */
1738 int count = (nic->config.bus_speed * 125)/4;
1739 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1740 } else
1741 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1742 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1743 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1744 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1745 RTI_DATA1_MEM_RX_TIMER_AC_EN;
1746
1747 writeq(val64, &bar0->rti_data1_mem);
1748
1749 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1750 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1751 if (nic->config.intr_type == MSI_X)
1752 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1753 RTI_DATA2_MEM_RX_UFC_D(0x40));
1754 else
1755 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1756 RTI_DATA2_MEM_RX_UFC_D(0x80));
1757 writeq(val64, &bar0->rti_data2_mem);
1758
1759 for (i = 0; i < config->rx_ring_num; i++) {
1760 val64 = RTI_CMD_MEM_WE |
1761 RTI_CMD_MEM_STROBE_NEW_CMD |
1762 RTI_CMD_MEM_OFFSET(i);
1763 writeq(val64, &bar0->rti_command_mem);
1764
1765 /*
1766 * Once the operation completes, the Strobe bit of the
1767 * command register will be reset. We poll for this
1768 * particular condition. We wait for a maximum of 500ms
1769 * for the operation to complete, if it's not complete
1770 * by then we return error.
1771 */
1772 time = 0;
1773 while (true) {
1774 val64 = readq(&bar0->rti_command_mem);
1775 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1776 break;
1777
1778 if (time > 10) {
1779 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1780 dev->name);
1781 return -ENODEV;
1782 }
1783 time++;
1784 msleep(50);
1785 }
1786 }
1787
1788 /*
1789 * Initializing proper values as Pause threshold into all
1790 * the 8 Queues on Rx side.
1791 */
1792 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1793 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1794
1795 /* Disable RMAC PAD STRIPPING */
1796 add = &bar0->mac_cfg;
1797 val64 = readq(&bar0->mac_cfg);
1798 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1799 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1800 writel((u32) (val64), add);
1801 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1802 writel((u32) (val64 >> 32), (add + 4));
1803 val64 = readq(&bar0->mac_cfg);
1804
1805 /* Enable FCS stripping by adapter */
1806 add = &bar0->mac_cfg;
1807 val64 = readq(&bar0->mac_cfg);
1808 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1809 if (nic->device_type == XFRAME_II_DEVICE)
1810 writeq(val64, &bar0->mac_cfg);
1811 else {
1812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1813 writel((u32) (val64), add);
1814 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1815 writel((u32) (val64 >> 32), (add + 4));
1816 }
1817
1818 /*
1819 * Set the time value to be inserted in the pause frame
1820 * generated by xena.
1821 */
1822 val64 = readq(&bar0->rmac_pause_cfg);
1823 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1824 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1825 writeq(val64, &bar0->rmac_pause_cfg);
1826
1827 /*
1828 * Set the Threshold Limit for Generating the pause frame
1829 * If the amount of data in any Queue exceeds ratio of
1830 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1831 * pause frame is generated
1832 */
1833 val64 = 0;
1834 for (i = 0; i < 4; i++) {
1835 val64 |= (((u64)0xFF00 |
1836 nic->mac_control.mc_pause_threshold_q0q3)
1837 << (i * 2 * 8));
1838 }
1839 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1840
1841 val64 = 0;
1842 for (i = 0; i < 4; i++) {
1843 val64 |= (((u64)0xFF00 |
1844 nic->mac_control.mc_pause_threshold_q4q7)
1845 << (i * 2 * 8));
1846 }
1847 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1848
1849 /*
1850 * TxDMA will stop Read request if the number of read split has
1851 * exceeded the limit pointed by shared_splits
1852 */
1853 val64 = readq(&bar0->pic_control);
1854 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1855 writeq(val64, &bar0->pic_control);
1856
1857 if (nic->config.bus_speed == 266) {
1858 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1859 writeq(0x0, &bar0->read_retry_delay);
1860 writeq(0x0, &bar0->write_retry_delay);
1861 }
1862
1863 /*
1864 * Programming the Herc to split every write transaction
1865 * that does not start on an ADB to reduce disconnects.
1866 */
1867 if (nic->device_type == XFRAME_II_DEVICE) {
1868 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1869 MISC_LINK_STABILITY_PRD(3);
1870 writeq(val64, &bar0->misc_control);
1871 val64 = readq(&bar0->pic_control2);
1872 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1873 writeq(val64, &bar0->pic_control2);
1874 }
1875 if (strstr(nic->product_name, "CX4")) {
1876 val64 = TMAC_AVG_IPG(0x17);
1877 writeq(val64, &bar0->tmac_avg_ipg);
1878 }
1879
1880 return SUCCESS;
1881 }
1882 #define LINK_UP_DOWN_INTERRUPT 1
1883 #define MAC_RMAC_ERR_TIMER 2
1884
1885 static int s2io_link_fault_indication(struct s2io_nic *nic)
1886 {
1887 if (nic->device_type == XFRAME_II_DEVICE)
1888 return LINK_UP_DOWN_INTERRUPT;
1889 else
1890 return MAC_RMAC_ERR_TIMER;
1891 }
1892
1893 /**
1894 * do_s2io_write_bits - update alarm bits in alarm register
1895 * @value: alarm bits
1896 * @flag: interrupt status
1897 * @addr: address value
1898 * Description: update alarm bits in alarm register
1899 * Return Value:
1900 * NONE.
1901 */
1902 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1903 {
1904 u64 temp64;
1905
1906 temp64 = readq(addr);
1907
1908 if (flag == ENABLE_INTRS)
1909 temp64 &= ~((u64)value);
1910 else
1911 temp64 |= ((u64)value);
1912 writeq(temp64, addr);
1913 }
1914
1915 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1916 {
1917 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1918 register u64 gen_int_mask = 0;
1919 u64 interruptible;
1920
1921 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1922 if (mask & TX_DMA_INTR) {
1923 gen_int_mask |= TXDMA_INT_M;
1924
1925 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1926 TXDMA_PCC_INT | TXDMA_TTI_INT |
1927 TXDMA_LSO_INT | TXDMA_TPA_INT |
1928 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1929
1930 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1931 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1932 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1933 &bar0->pfc_err_mask);
1934
1935 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1936 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1937 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1938
1939 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1940 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1941 PCC_N_SERR | PCC_6_COF_OV_ERR |
1942 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1943 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1944 PCC_TXB_ECC_SG_ERR,
1945 flag, &bar0->pcc_err_mask);
1946
1947 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1948 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1949
1950 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1951 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1952 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1953 flag, &bar0->lso_err_mask);
1954
1955 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1956 flag, &bar0->tpa_err_mask);
1957
1958 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1959 }
1960
1961 if (mask & TX_MAC_INTR) {
1962 gen_int_mask |= TXMAC_INT_M;
1963 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1964 &bar0->mac_int_mask);
1965 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1966 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1967 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1968 flag, &bar0->mac_tmac_err_mask);
1969 }
1970
1971 if (mask & TX_XGXS_INTR) {
1972 gen_int_mask |= TXXGXS_INT_M;
1973 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1974 &bar0->xgxs_int_mask);
1975 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1976 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1977 flag, &bar0->xgxs_txgxs_err_mask);
1978 }
1979
1980 if (mask & RX_DMA_INTR) {
1981 gen_int_mask |= RXDMA_INT_M;
1982 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1983 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1984 flag, &bar0->rxdma_int_mask);
1985 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1986 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1987 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1988 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1989 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1990 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1991 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1992 &bar0->prc_pcix_err_mask);
1993 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1994 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1995 &bar0->rpa_err_mask);
1996 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1997 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1998 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1999 RDA_FRM_ECC_SG_ERR |
2000 RDA_MISC_ERR|RDA_PCIX_ERR,
2001 flag, &bar0->rda_err_mask);
2002 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2003 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2004 flag, &bar0->rti_err_mask);
2005 }
2006
2007 if (mask & RX_MAC_INTR) {
2008 gen_int_mask |= RXMAC_INT_M;
2009 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2010 &bar0->mac_int_mask);
2011 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2012 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2013 RMAC_DOUBLE_ECC_ERR);
2014 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2015 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2016 do_s2io_write_bits(interruptible,
2017 flag, &bar0->mac_rmac_err_mask);
2018 }
2019
2020 if (mask & RX_XGXS_INTR) {
2021 gen_int_mask |= RXXGXS_INT_M;
2022 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2023 &bar0->xgxs_int_mask);
2024 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2025 &bar0->xgxs_rxgxs_err_mask);
2026 }
2027
2028 if (mask & MC_INTR) {
2029 gen_int_mask |= MC_INT_M;
2030 do_s2io_write_bits(MC_INT_MASK_MC_INT,
2031 flag, &bar0->mc_int_mask);
2032 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2033 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2034 &bar0->mc_err_mask);
2035 }
2036 nic->general_int_mask = gen_int_mask;
2037
2038 /* Remove this line when alarm interrupts are enabled */
2039 nic->general_int_mask = 0;
2040 }
2041
2042 /**
2043 * en_dis_able_nic_intrs - Enable or Disable the interrupts
2044 * @nic: device private variable,
2045 * @mask: A mask indicating which Intr block must be modified and,
2046 * @flag: A flag indicating whether to enable or disable the Intrs.
2047 * Description: This function will either disable or enable the interrupts
2048 * depending on the flag argument. The mask argument can be used to
2049 * enable/disable any Intr block.
2050 * Return Value: NONE.
2051 */
2052
2053 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2054 {
2055 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2056 register u64 temp64 = 0, intr_mask = 0;
2057
2058 intr_mask = nic->general_int_mask;
2059
2060 /* Top level interrupt classification */
2061 /* PIC Interrupts */
2062 if (mask & TX_PIC_INTR) {
2063 /* Enable PIC Intrs in the general intr mask register */
2064 intr_mask |= TXPIC_INT_M;
2065 if (flag == ENABLE_INTRS) {
2066 /*
2067 * If Hercules adapter enable GPIO otherwise
2068 * disable all PCIX, Flash, MDIO, IIC and GPIO
2069 * interrupts for now.
2070 * TODO
2071 */
2072 if (s2io_link_fault_indication(nic) ==
2073 LINK_UP_DOWN_INTERRUPT) {
2074 do_s2io_write_bits(PIC_INT_GPIO, flag,
2075 &bar0->pic_int_mask);
2076 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2077 &bar0->gpio_int_mask);
2078 } else
2079 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2080 } else if (flag == DISABLE_INTRS) {
2081 /*
2082 * Disable PIC Intrs in the general
2083 * intr mask register
2084 */
2085 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2086 }
2087 }
2088
2089 /* Tx traffic interrupts */
2090 if (mask & TX_TRAFFIC_INTR) {
2091 intr_mask |= TXTRAFFIC_INT_M;
2092 if (flag == ENABLE_INTRS) {
2093 /*
2094 * Enable all the Tx side interrupts
2095 * writing 0 Enables all 64 TX interrupt levels
2096 */
2097 writeq(0x0, &bar0->tx_traffic_mask);
2098 } else if (flag == DISABLE_INTRS) {
2099 /*
2100 * Disable Tx Traffic Intrs in the general intr mask
2101 * register.
2102 */
2103 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2104 }
2105 }
2106
2107 /* Rx traffic interrupts */
2108 if (mask & RX_TRAFFIC_INTR) {
2109 intr_mask |= RXTRAFFIC_INT_M;
2110 if (flag == ENABLE_INTRS) {
2111 /* writing 0 Enables all 8 RX interrupt levels */
2112 writeq(0x0, &bar0->rx_traffic_mask);
2113 } else if (flag == DISABLE_INTRS) {
2114 /*
2115 * Disable Rx Traffic Intrs in the general intr mask
2116 * register.
2117 */
2118 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2119 }
2120 }
2121
2122 temp64 = readq(&bar0->general_int_mask);
2123 if (flag == ENABLE_INTRS)
2124 temp64 &= ~((u64)intr_mask);
2125 else
2126 temp64 = DISABLE_ALL_INTRS;
2127 writeq(temp64, &bar0->general_int_mask);
2128
2129 nic->general_int_mask = readq(&bar0->general_int_mask);
2130 }
2131
2132 /**
2133 * verify_pcc_quiescent- Checks for PCC quiescent state
2134 * Return: 1 If PCC is quiescence
2135 * 0 If PCC is not quiescence
2136 */
2137 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2138 {
2139 int ret = 0, herc;
2140 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2141 u64 val64 = readq(&bar0->adapter_status);
2142
2143 herc = (sp->device_type == XFRAME_II_DEVICE);
2144
2145 if (flag == false) {
2146 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2147 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2148 ret = 1;
2149 } else {
2150 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2151 ret = 1;
2152 }
2153 } else {
2154 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2155 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2156 ADAPTER_STATUS_RMAC_PCC_IDLE))
2157 ret = 1;
2158 } else {
2159 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2160 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2161 ret = 1;
2162 }
2163 }
2164
2165 return ret;
2166 }
2167 /**
2168 * verify_xena_quiescence - Checks whether the H/W is ready
2169 * Description: Returns whether the H/W is ready to go or not. Depending
2170 * on whether adapter enable bit was written or not the comparison
2171 * differs and the calling function passes the input argument flag to
2172 * indicate this.
2173 * Return: 1 If xena is quiescence
2174 * 0 If Xena is not quiescence
2175 */
2176
2177 static int verify_xena_quiescence(struct s2io_nic *sp)
2178 {
2179 int mode;
2180 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2181 u64 val64 = readq(&bar0->adapter_status);
2182 mode = s2io_verify_pci_mode(sp);
2183
2184 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2185 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2186 return 0;
2187 }
2188 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2189 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2190 return 0;
2191 }
2192 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2193 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2194 return 0;
2195 }
2196 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2197 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2198 return 0;
2199 }
2200 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2201 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2202 return 0;
2203 }
2204 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2205 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2206 return 0;
2207 }
2208 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2209 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2210 return 0;
2211 }
2212 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2213 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2214 return 0;
2215 }
2216
2217 /*
2218 * In PCI 33 mode, the P_PLL is not used, and therefore,
2219 * the the P_PLL_LOCK bit in the adapter_status register will
2220 * not be asserted.
2221 */
2222 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2223 sp->device_type == XFRAME_II_DEVICE &&
2224 mode != PCI_MODE_PCI_33) {
2225 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2226 return 0;
2227 }
2228 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2229 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2230 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2231 return 0;
2232 }
2233 return 1;
2234 }
2235
2236 /**
2237 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2238 * @sp: Pointer to device specifc structure
2239 * Description :
2240 * New procedure to clear mac address reading problems on Alpha platforms
2241 *
2242 */
2243
2244 static void fix_mac_address(struct s2io_nic *sp)
2245 {
2246 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2247 int i = 0;
2248
2249 while (fix_mac[i] != END_SIGN) {
2250 writeq(fix_mac[i++], &bar0->gpio_control);
2251 udelay(10);
2252 (void) readq(&bar0->gpio_control);
2253 }
2254 }
2255
2256 /**
2257 * start_nic - Turns the device on
2258 * @nic : device private variable.
2259 * Description:
2260 * This function actually turns the device on. Before this function is
2261 * called,all Registers are configured from their reset states
2262 * and shared memory is allocated but the NIC is still quiescent. On
2263 * calling this function, the device interrupts are cleared and the NIC is
2264 * literally switched on by writing into the adapter control register.
2265 * Return Value:
2266 * SUCCESS on success and -1 on failure.
2267 */
2268
2269 static int start_nic(struct s2io_nic *nic)
2270 {
2271 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2272 struct net_device *dev = nic->dev;
2273 register u64 val64 = 0;
2274 u16 subid, i;
2275 struct config_param *config = &nic->config;
2276 struct mac_info *mac_control = &nic->mac_control;
2277
2278 /* PRC Initialization and configuration */
2279 for (i = 0; i < config->rx_ring_num; i++) {
2280 struct ring_info *ring = &mac_control->rings[i];
2281
2282 writeq((u64)ring->rx_blocks[0].block_dma_addr,
2283 &bar0->prc_rxd0_n[i]);
2284
2285 val64 = readq(&bar0->prc_ctrl_n[i]);
2286 if (nic->rxd_mode == RXD_MODE_1)
2287 val64 |= PRC_CTRL_RC_ENABLED;
2288 else
2289 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2290 if (nic->device_type == XFRAME_II_DEVICE)
2291 val64 |= PRC_CTRL_GROUP_READS;
2292 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2293 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2294 writeq(val64, &bar0->prc_ctrl_n[i]);
2295 }
2296
2297 if (nic->rxd_mode == RXD_MODE_3B) {
2298 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2299 val64 = readq(&bar0->rx_pa_cfg);
2300 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2301 writeq(val64, &bar0->rx_pa_cfg);
2302 }
2303
2304 if (vlan_tag_strip == 0) {
2305 val64 = readq(&bar0->rx_pa_cfg);
2306 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2307 writeq(val64, &bar0->rx_pa_cfg);
2308 nic->vlan_strip_flag = 0;
2309 }
2310
2311 /*
2312 * Enabling MC-RLDRAM. After enabling the device, we timeout
2313 * for around 100ms, which is approximately the time required
2314 * for the device to be ready for operation.
2315 */
2316 val64 = readq(&bar0->mc_rldram_mrs);
2317 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2318 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2319 val64 = readq(&bar0->mc_rldram_mrs);
2320
2321 msleep(100); /* Delay by around 100 ms. */
2322
2323 /* Enabling ECC Protection. */
2324 val64 = readq(&bar0->adapter_control);
2325 val64 &= ~ADAPTER_ECC_EN;
2326 writeq(val64, &bar0->adapter_control);
2327
2328 /*
2329 * Verify if the device is ready to be enabled, if so enable
2330 * it.
2331 */
2332 val64 = readq(&bar0->adapter_status);
2333 if (!verify_xena_quiescence(nic)) {
2334 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2335 "Adapter status reads: 0x%llx\n",
2336 dev->name, (unsigned long long)val64);
2337 return FAILURE;
2338 }
2339
2340 /*
2341 * With some switches, link might be already up at this point.
2342 * Because of this weird behavior, when we enable laser,
2343 * we may not get link. We need to handle this. We cannot
2344 * figure out which switch is misbehaving. So we are forced to
2345 * make a global change.
2346 */
2347
2348 /* Enabling Laser. */
2349 val64 = readq(&bar0->adapter_control);
2350 val64 |= ADAPTER_EOI_TX_ON;
2351 writeq(val64, &bar0->adapter_control);
2352
2353 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2354 /*
2355 * Dont see link state interrupts initially on some switches,
2356 * so directly scheduling the link state task here.
2357 */
2358 schedule_work(&nic->set_link_task);
2359 }
2360 /* SXE-002: Initialize link and activity LED */
2361 subid = nic->pdev->subsystem_device;
2362 if (((subid & 0xFF) >= 0x07) &&
2363 (nic->device_type == XFRAME_I_DEVICE)) {
2364 val64 = readq(&bar0->gpio_control);
2365 val64 |= 0x0000800000000000ULL;
2366 writeq(val64, &bar0->gpio_control);
2367 val64 = 0x0411040400000000ULL;
2368 writeq(val64, (void __iomem *)bar0 + 0x2700);
2369 }
2370
2371 return SUCCESS;
2372 }
2373 /**
2374 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2375 */
2376 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2377 struct TxD *txdlp, int get_off)
2378 {
2379 struct s2io_nic *nic = fifo_data->nic;
2380 struct sk_buff *skb;
2381 struct TxD *txds;
2382 u16 j, frg_cnt;
2383
2384 txds = txdlp;
2385 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2386 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2387 sizeof(u64), PCI_DMA_TODEVICE);
2388 txds++;
2389 }
2390
2391 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2392 if (!skb) {
2393 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2394 return NULL;
2395 }
2396 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2397 skb_headlen(skb), PCI_DMA_TODEVICE);
2398 frg_cnt = skb_shinfo(skb)->nr_frags;
2399 if (frg_cnt) {
2400 txds++;
2401 for (j = 0; j < frg_cnt; j++, txds++) {
2402 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2403 if (!txds->Buffer_Pointer)
2404 break;
2405 pci_unmap_page(nic->pdev,
2406 (dma_addr_t)txds->Buffer_Pointer,
2407 frag->size, PCI_DMA_TODEVICE);
2408 }
2409 }
2410 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2411 return skb;
2412 }
2413
2414 /**
2415 * free_tx_buffers - Free all queued Tx buffers
2416 * @nic : device private variable.
2417 * Description:
2418 * Free all queued Tx buffers.
2419 * Return Value: void
2420 */
2421
2422 static void free_tx_buffers(struct s2io_nic *nic)
2423 {
2424 struct net_device *dev = nic->dev;
2425 struct sk_buff *skb;
2426 struct TxD *txdp;
2427 int i, j;
2428 int cnt = 0;
2429 struct config_param *config = &nic->config;
2430 struct mac_info *mac_control = &nic->mac_control;
2431 struct stat_block *stats = mac_control->stats_info;
2432 struct swStat *swstats = &stats->sw_stat;
2433
2434 for (i = 0; i < config->tx_fifo_num; i++) {
2435 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2436 struct fifo_info *fifo = &mac_control->fifos[i];
2437 unsigned long flags;
2438
2439 spin_lock_irqsave(&fifo->tx_lock, flags);
2440 for (j = 0; j < tx_cfg->fifo_len; j++) {
2441 txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
2442 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2443 if (skb) {
2444 swstats->mem_freed += skb->truesize;
2445 dev_kfree_skb(skb);
2446 cnt++;
2447 }
2448 }
2449 DBG_PRINT(INTR_DBG,
2450 "%s: forcibly freeing %d skbs on FIFO%d\n",
2451 dev->name, cnt, i);
2452 fifo->tx_curr_get_info.offset = 0;
2453 fifo->tx_curr_put_info.offset = 0;
2454 spin_unlock_irqrestore(&fifo->tx_lock, flags);
2455 }
2456 }
2457
2458 /**
2459 * stop_nic - To stop the nic
2460 * @nic ; device private variable.
2461 * Description:
2462 * This function does exactly the opposite of what the start_nic()
2463 * function does. This function is called to stop the device.
2464 * Return Value:
2465 * void.
2466 */
2467
2468 static void stop_nic(struct s2io_nic *nic)
2469 {
2470 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2471 register u64 val64 = 0;
2472 u16 interruptible;
2473
2474 /* Disable all interrupts */
2475 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2476 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2477 interruptible |= TX_PIC_INTR;
2478 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2479
2480 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2481 val64 = readq(&bar0->adapter_control);
2482 val64 &= ~(ADAPTER_CNTL_EN);
2483 writeq(val64, &bar0->adapter_control);
2484 }
2485
2486 /**
2487 * fill_rx_buffers - Allocates the Rx side skbs
2488 * @ring_info: per ring structure
2489 * @from_card_up: If this is true, we will map the buffer to get
2490 * the dma address for buf0 and buf1 to give it to the card.
2491 * Else we will sync the already mapped buffer to give it to the card.
2492 * Description:
2493 * The function allocates Rx side skbs and puts the physical
2494 * address of these buffers into the RxD buffer pointers, so that the NIC
2495 * can DMA the received frame into these locations.
2496 * The NIC supports 3 receive modes, viz
2497 * 1. single buffer,
2498 * 2. three buffer and
2499 * 3. Five buffer modes.
2500 * Each mode defines how many fragments the received frame will be split
2501 * up into by the NIC. The frame is split into L3 header, L4 Header,
2502 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2503 * is split into 3 fragments. As of now only single buffer mode is
2504 * supported.
2505 * Return Value:
2506 * SUCCESS on success or an appropriate -ve value on failure.
2507 */
2508 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2509 int from_card_up)
2510 {
2511 struct sk_buff *skb;
2512 struct RxD_t *rxdp;
2513 int off, size, block_no, block_no1;
2514 u32 alloc_tab = 0;
2515 u32 alloc_cnt;
2516 u64 tmp;
2517 struct buffAdd *ba;
2518 struct RxD_t *first_rxdp = NULL;
2519 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2520 int rxd_index = 0;
2521 struct RxD1 *rxdp1;
2522 struct RxD3 *rxdp3;
2523 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2524
2525 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2526
2527 block_no1 = ring->rx_curr_get_info.block_index;
2528 while (alloc_tab < alloc_cnt) {
2529 block_no = ring->rx_curr_put_info.block_index;
2530
2531 off = ring->rx_curr_put_info.offset;
2532
2533 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2534
2535 rxd_index = off + 1;
2536 if (block_no)
2537 rxd_index += (block_no * ring->rxd_count);
2538
2539 if ((block_no == block_no1) &&
2540 (off == ring->rx_curr_get_info.offset) &&
2541 (rxdp->Host_Control)) {
2542 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2543 ring->dev->name);
2544 goto end;
2545 }
2546 if (off && (off == ring->rxd_count)) {
2547 ring->rx_curr_put_info.block_index++;
2548 if (ring->rx_curr_put_info.block_index ==
2549 ring->block_count)
2550 ring->rx_curr_put_info.block_index = 0;
2551 block_no = ring->rx_curr_put_info.block_index;
2552 off = 0;
2553 ring->rx_curr_put_info.offset = off;
2554 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2555 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2556 ring->dev->name, rxdp);
2557
2558 }
2559
2560 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2561 ((ring->rxd_mode == RXD_MODE_3B) &&
2562 (rxdp->Control_2 & s2BIT(0)))) {
2563 ring->rx_curr_put_info.offset = off;
2564 goto end;
2565 }
2566 /* calculate size of skb based on ring mode */
2567 size = ring->mtu +
2568 HEADER_ETHERNET_II_802_3_SIZE +
2569 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2570 if (ring->rxd_mode == RXD_MODE_1)
2571 size += NET_IP_ALIGN;
2572 else
2573 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2574
2575 /* allocate skb */
2576 skb = dev_alloc_skb(size);
2577 if (!skb) {
2578 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2579 ring->dev->name);
2580 if (first_rxdp) {
2581 wmb();
2582 first_rxdp->Control_1 |= RXD_OWN_XENA;
2583 }
2584 swstats->mem_alloc_fail_cnt++;
2585
2586 return -ENOMEM ;
2587 }
2588 swstats->mem_allocated += skb->truesize;
2589
2590 if (ring->rxd_mode == RXD_MODE_1) {
2591 /* 1 buffer mode - normal operation mode */
2592 rxdp1 = (struct RxD1 *)rxdp;
2593 memset(rxdp, 0, sizeof(struct RxD1));
2594 skb_reserve(skb, NET_IP_ALIGN);
2595 rxdp1->Buffer0_ptr =
2596 pci_map_single(ring->pdev, skb->data,
2597 size - NET_IP_ALIGN,
2598 PCI_DMA_FROMDEVICE);
2599 if (pci_dma_mapping_error(nic->pdev,
2600 rxdp1->Buffer0_ptr))
2601 goto pci_map_failed;
2602
2603 rxdp->Control_2 =
2604 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2605 rxdp->Host_Control = (unsigned long)skb;
2606 } else if (ring->rxd_mode == RXD_MODE_3B) {
2607 /*
2608 * 2 buffer mode -
2609 * 2 buffer mode provides 128
2610 * byte aligned receive buffers.
2611 */
2612
2613 rxdp3 = (struct RxD3 *)rxdp;
2614 /* save buffer pointers to avoid frequent dma mapping */
2615 Buffer0_ptr = rxdp3->Buffer0_ptr;
2616 Buffer1_ptr = rxdp3->Buffer1_ptr;
2617 memset(rxdp, 0, sizeof(struct RxD3));
2618 /* restore the buffer pointers for dma sync*/
2619 rxdp3->Buffer0_ptr = Buffer0_ptr;
2620 rxdp3->Buffer1_ptr = Buffer1_ptr;
2621
2622 ba = &ring->ba[block_no][off];
2623 skb_reserve(skb, BUF0_LEN);
2624 tmp = (u64)(unsigned long)skb->data;
2625 tmp += ALIGN_SIZE;
2626 tmp &= ~ALIGN_SIZE;
2627 skb->data = (void *) (unsigned long)tmp;
2628 skb_reset_tail_pointer(skb);
2629
2630 if (from_card_up) {
2631 rxdp3->Buffer0_ptr =
2632 pci_map_single(ring->pdev, ba->ba_0,
2633 BUF0_LEN,
2634 PCI_DMA_FROMDEVICE);
2635 if (pci_dma_mapping_error(nic->pdev,
2636 rxdp3->Buffer0_ptr))
2637 goto pci_map_failed;
2638 } else
2639 pci_dma_sync_single_for_device(ring->pdev,
2640 (dma_addr_t)rxdp3->Buffer0_ptr,
2641 BUF0_LEN,
2642 PCI_DMA_FROMDEVICE);
2643
2644 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2645 if (ring->rxd_mode == RXD_MODE_3B) {
2646 /* Two buffer mode */
2647
2648 /*
2649 * Buffer2 will have L3/L4 header plus
2650 * L4 payload
2651 */
2652 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2653 skb->data,
2654 ring->mtu + 4,
2655 PCI_DMA_FROMDEVICE);
2656
2657 if (pci_dma_mapping_error(nic->pdev,
2658 rxdp3->Buffer2_ptr))
2659 goto pci_map_failed;
2660
2661 if (from_card_up) {
2662 rxdp3->Buffer1_ptr =
2663 pci_map_single(ring->pdev,
2664 ba->ba_1,
2665 BUF1_LEN,
2666 PCI_DMA_FROMDEVICE);
2667
2668 if (pci_dma_mapping_error(nic->pdev,
2669 rxdp3->Buffer1_ptr)) {
2670 pci_unmap_single(ring->pdev,
2671 (dma_addr_t)(unsigned long)
2672 skb->data,
2673 ring->mtu + 4,
2674 PCI_DMA_FROMDEVICE);
2675 goto pci_map_failed;
2676 }
2677 }
2678 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2679 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2680 (ring->mtu + 4);
2681 }
2682 rxdp->Control_2 |= s2BIT(0);
2683 rxdp->Host_Control = (unsigned long) (skb);
2684 }
2685 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2686 rxdp->Control_1 |= RXD_OWN_XENA;
2687 off++;
2688 if (off == (ring->rxd_count + 1))
2689 off = 0;
2690 ring->rx_curr_put_info.offset = off;
2691
2692 rxdp->Control_2 |= SET_RXD_MARKER;
2693 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2694 if (first_rxdp) {
2695 wmb();
2696 first_rxdp->Control_1 |= RXD_OWN_XENA;
2697 }
2698 first_rxdp = rxdp;
2699 }
2700 ring->rx_bufs_left += 1;
2701 alloc_tab++;
2702 }
2703
2704 end:
2705 /* Transfer ownership of first descriptor to adapter just before
2706 * exiting. Before that, use memory barrier so that ownership
2707 * and other fields are seen by adapter correctly.
2708 */
2709 if (first_rxdp) {
2710 wmb();
2711 first_rxdp->Control_1 |= RXD_OWN_XENA;
2712 }
2713
2714 return SUCCESS;
2715
2716 pci_map_failed:
2717 swstats->pci_map_fail_cnt++;
2718 swstats->mem_freed += skb->truesize;
2719 dev_kfree_skb_irq(skb);
2720 return -ENOMEM;
2721 }
2722
2723 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2724 {
2725 struct net_device *dev = sp->dev;
2726 int j;
2727 struct sk_buff *skb;
2728 struct RxD_t *rxdp;
2729 struct RxD1 *rxdp1;
2730 struct RxD3 *rxdp3;
2731 struct mac_info *mac_control = &sp->mac_control;
2732 struct stat_block *stats = mac_control->stats_info;
2733 struct swStat *swstats = &stats->sw_stat;
2734
2735 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2736 rxdp = mac_control->rings[ring_no].
2737 rx_blocks[blk].rxds[j].virt_addr;
2738 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2739 if (!skb)
2740 continue;
2741 if (sp->rxd_mode == RXD_MODE_1) {
2742 rxdp1 = (struct RxD1 *)rxdp;
2743 pci_unmap_single(sp->pdev,
2744 (dma_addr_t)rxdp1->Buffer0_ptr,
2745 dev->mtu +
2746 HEADER_ETHERNET_II_802_3_SIZE +
2747 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2748 PCI_DMA_FROMDEVICE);
2749 memset(rxdp, 0, sizeof(struct RxD1));
2750 } else if (sp->rxd_mode == RXD_MODE_3B) {
2751 rxdp3 = (struct RxD3 *)rxdp;
2752 pci_unmap_single(sp->pdev,
2753 (dma_addr_t)rxdp3->Buffer0_ptr,
2754 BUF0_LEN,
2755 PCI_DMA_FROMDEVICE);
2756 pci_unmap_single(sp->pdev,
2757 (dma_addr_t)rxdp3->Buffer1_ptr,
2758 BUF1_LEN,
2759 PCI_DMA_FROMDEVICE);
2760 pci_unmap_single(sp->pdev,
2761 (dma_addr_t)rxdp3->Buffer2_ptr,
2762 dev->mtu + 4,
2763 PCI_DMA_FROMDEVICE);
2764 memset(rxdp, 0, sizeof(struct RxD3));
2765 }
2766 swstats->mem_freed += skb->truesize;
2767 dev_kfree_skb(skb);
2768 mac_control->rings[ring_no].rx_bufs_left -= 1;
2769 }
2770 }
2771
2772 /**
2773 * free_rx_buffers - Frees all Rx buffers
2774 * @sp: device private variable.
2775 * Description:
2776 * This function will free all Rx buffers allocated by host.
2777 * Return Value:
2778 * NONE.
2779 */
2780
2781 static void free_rx_buffers(struct s2io_nic *sp)
2782 {
2783 struct net_device *dev = sp->dev;
2784 int i, blk = 0, buf_cnt = 0;
2785 struct config_param *config = &sp->config;
2786 struct mac_info *mac_control = &sp->mac_control;
2787
2788 for (i = 0; i < config->rx_ring_num; i++) {
2789 struct ring_info *ring = &mac_control->rings[i];
2790
2791 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2792 free_rxd_blk(sp, i, blk);
2793
2794 ring->rx_curr_put_info.block_index = 0;
2795 ring->rx_curr_get_info.block_index = 0;
2796 ring->rx_curr_put_info.offset = 0;
2797 ring->rx_curr_get_info.offset = 0;
2798 ring->rx_bufs_left = 0;
2799 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2800 dev->name, buf_cnt, i);
2801 }
2802 }
2803
2804 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2805 {
2806 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2807 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2808 ring->dev->name);
2809 }
2810 return 0;
2811 }
2812
2813 /**
2814 * s2io_poll - Rx interrupt handler for NAPI support
2815 * @napi : pointer to the napi structure.
2816 * @budget : The number of packets that were budgeted to be processed
2817 * during one pass through the 'Poll" function.
2818 * Description:
2819 * Comes into picture only if NAPI support has been incorporated. It does
2820 * the same thing that rx_intr_handler does, but not in a interrupt context
2821 * also It will process only a given number of packets.
2822 * Return value:
2823 * 0 on success and 1 if there are No Rx packets to be processed.
2824 */
2825
2826 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2827 {
2828 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2829 struct net_device *dev = ring->dev;
2830 int pkts_processed = 0;
2831 u8 __iomem *addr = NULL;
2832 u8 val8 = 0;
2833 struct s2io_nic *nic = netdev_priv(dev);
2834 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2835 int budget_org = budget;
2836
2837 if (unlikely(!is_s2io_card_up(nic)))
2838 return 0;
2839
2840 pkts_processed = rx_intr_handler(ring, budget);
2841 s2io_chk_rx_buffers(nic, ring);
2842
2843 if (pkts_processed < budget_org) {
2844 napi_complete(napi);
2845 /*Re Enable MSI-Rx Vector*/
2846 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2847 addr += 7 - ring->ring_no;
2848 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2849 writeb(val8, addr);
2850 val8 = readb(addr);
2851 }
2852 return pkts_processed;
2853 }
2854
2855 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2856 {
2857 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2858 int pkts_processed = 0;
2859 int ring_pkts_processed, i;
2860 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2861 int budget_org = budget;
2862 struct config_param *config = &nic->config;
2863 struct mac_info *mac_control = &nic->mac_control;
2864
2865 if (unlikely(!is_s2io_card_up(nic)))
2866 return 0;
2867
2868 for (i = 0; i < config->rx_ring_num; i++) {
2869 struct ring_info *ring = &mac_control->rings[i];
2870 ring_pkts_processed = rx_intr_handler(ring, budget);
2871 s2io_chk_rx_buffers(nic, ring);
2872 pkts_processed += ring_pkts_processed;
2873 budget -= ring_pkts_processed;
2874 if (budget <= 0)
2875 break;
2876 }
2877 if (pkts_processed < budget_org) {
2878 napi_complete(napi);
2879 /* Re enable the Rx interrupts for the ring */
2880 writeq(0, &bar0->rx_traffic_mask);
2881 readl(&bar0->rx_traffic_mask);
2882 }
2883 return pkts_processed;
2884 }
2885
2886 #ifdef CONFIG_NET_POLL_CONTROLLER
2887 /**
2888 * s2io_netpoll - netpoll event handler entry point
2889 * @dev : pointer to the device structure.
2890 * Description:
2891 * This function will be called by upper layer to check for events on the
2892 * interface in situations where interrupts are disabled. It is used for
2893 * specific in-kernel networking tasks, such as remote consoles and kernel
2894 * debugging over the network (example netdump in RedHat).
2895 */
2896 static void s2io_netpoll(struct net_device *dev)
2897 {
2898 struct s2io_nic *nic = netdev_priv(dev);
2899 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2900 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2901 int i;
2902 struct config_param *config = &nic->config;
2903 struct mac_info *mac_control = &nic->mac_control;
2904
2905 if (pci_channel_offline(nic->pdev))
2906 return;
2907
2908 disable_irq(dev->irq);
2909
2910 writeq(val64, &bar0->rx_traffic_int);
2911 writeq(val64, &bar0->tx_traffic_int);
2912
2913 /* we need to free up the transmitted skbufs or else netpoll will
2914 * run out of skbs and will fail and eventually netpoll application such
2915 * as netdump will fail.
2916 */
2917 for (i = 0; i < config->tx_fifo_num; i++)
2918 tx_intr_handler(&mac_control->fifos[i]);
2919
2920 /* check for received packet and indicate up to network */
2921 for (i = 0; i < config->rx_ring_num; i++) {
2922 struct ring_info *ring = &mac_control->rings[i];
2923
2924 rx_intr_handler(ring, 0);
2925 }
2926
2927 for (i = 0; i < config->rx_ring_num; i++) {
2928 struct ring_info *ring = &mac_control->rings[i];
2929
2930 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2931 DBG_PRINT(INFO_DBG,
2932 "%s: Out of memory in Rx Netpoll!!\n",
2933 dev->name);
2934 break;
2935 }
2936 }
2937 enable_irq(dev->irq);
2938 }
2939 #endif
2940
2941 /**
2942 * rx_intr_handler - Rx interrupt handler
2943 * @ring_info: per ring structure.
2944 * @budget: budget for napi processing.
2945 * Description:
2946 * If the interrupt is because of a received frame or if the
2947 * receive ring contains fresh as yet un-processed frames,this function is
2948 * called. It picks out the RxD at which place the last Rx processing had
2949 * stopped and sends the skb to the OSM's Rx handler and then increments
2950 * the offset.
2951 * Return Value:
2952 * No. of napi packets processed.
2953 */
2954 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2955 {
2956 int get_block, put_block;
2957 struct rx_curr_get_info get_info, put_info;
2958 struct RxD_t *rxdp;
2959 struct sk_buff *skb;
2960 int pkt_cnt = 0, napi_pkts = 0;
2961 int i;
2962 struct RxD1 *rxdp1;
2963 struct RxD3 *rxdp3;
2964
2965 get_info = ring_data->rx_curr_get_info;
2966 get_block = get_info.block_index;
2967 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2968 put_block = put_info.block_index;
2969 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2970
2971 while (RXD_IS_UP2DT(rxdp)) {
2972 /*
2973 * If your are next to put index then it's
2974 * FIFO full condition
2975 */
2976 if ((get_block == put_block) &&
2977 (get_info.offset + 1) == put_info.offset) {
2978 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2979 ring_data->dev->name);
2980 break;
2981 }
2982 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2983 if (skb == NULL) {
2984 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2985 ring_data->dev->name);
2986 return 0;
2987 }
2988 if (ring_data->rxd_mode == RXD_MODE_1) {
2989 rxdp1 = (struct RxD1 *)rxdp;
2990 pci_unmap_single(ring_data->pdev, (dma_addr_t)
2991 rxdp1->Buffer0_ptr,
2992 ring_data->mtu +
2993 HEADER_ETHERNET_II_802_3_SIZE +
2994 HEADER_802_2_SIZE +
2995 HEADER_SNAP_SIZE,
2996 PCI_DMA_FROMDEVICE);
2997 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2998 rxdp3 = (struct RxD3 *)rxdp;
2999 pci_dma_sync_single_for_cpu(ring_data->pdev,
3000 (dma_addr_t)rxdp3->Buffer0_ptr,
3001 BUF0_LEN,
3002 PCI_DMA_FROMDEVICE);
3003 pci_unmap_single(ring_data->pdev,
3004 (dma_addr_t)rxdp3->Buffer2_ptr,
3005 ring_data->mtu + 4,
3006 PCI_DMA_FROMDEVICE);
3007 }
3008 prefetch(skb->data);
3009 rx_osm_handler(ring_data, rxdp);
3010 get_info.offset++;
3011 ring_data->rx_curr_get_info.offset = get_info.offset;
3012 rxdp = ring_data->rx_blocks[get_block].
3013 rxds[get_info.offset].virt_addr;
3014 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3015 get_info.offset = 0;
3016 ring_data->rx_curr_get_info.offset = get_info.offset;
3017 get_block++;
3018 if (get_block == ring_data->block_count)
3019 get_block = 0;
3020 ring_data->rx_curr_get_info.block_index = get_block;
3021 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3022 }
3023
3024 if (ring_data->nic->config.napi) {
3025 budget--;
3026 napi_pkts++;
3027 if (!budget)
3028 break;
3029 }
3030 pkt_cnt++;
3031 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3032 break;
3033 }
3034 if (ring_data->lro) {
3035 /* Clear all LRO sessions before exiting */
3036 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
3037 struct lro *lro = &ring_data->lro0_n[i];
3038 if (lro->in_use) {
3039 update_L3L4_header(ring_data->nic, lro);
3040 queue_rx_frame(lro->parent, lro->vlan_tag);
3041 clear_lro_session(lro);
3042 }
3043 }
3044 }
3045 return napi_pkts;
3046 }
3047
3048 /**
3049 * tx_intr_handler - Transmit interrupt handler
3050 * @nic : device private variable
3051 * Description:
3052 * If an interrupt was raised to indicate DMA complete of the
3053 * Tx packet, this function is called. It identifies the last TxD
3054 * whose buffer was freed and frees all skbs whose data have already
3055 * DMA'ed into the NICs internal memory.
3056 * Return Value:
3057 * NONE
3058 */
3059
3060 static void tx_intr_handler(struct fifo_info *fifo_data)
3061 {
3062 struct s2io_nic *nic = fifo_data->nic;
3063 struct tx_curr_get_info get_info, put_info;
3064 struct sk_buff *skb = NULL;
3065 struct TxD *txdlp;
3066 int pkt_cnt = 0;
3067 unsigned long flags = 0;
3068 u8 err_mask;
3069 struct stat_block *stats = nic->mac_control.stats_info;
3070 struct swStat *swstats = &stats->sw_stat;
3071
3072 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3073 return;
3074
3075 get_info = fifo_data->tx_curr_get_info;
3076 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3077 txdlp = (struct TxD *)
3078 fifo_data->list_info[get_info.offset].list_virt_addr;
3079 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3080 (get_info.offset != put_info.offset) &&
3081 (txdlp->Host_Control)) {
3082 /* Check for TxD errors */
3083 if (txdlp->Control_1 & TXD_T_CODE) {
3084 unsigned long long err;
3085 err = txdlp->Control_1 & TXD_T_CODE;
3086 if (err & 0x1) {
3087 swstats->parity_err_cnt++;
3088 }
3089
3090 /* update t_code statistics */
3091 err_mask = err >> 48;
3092 switch (err_mask) {
3093 case 2:
3094 swstats->tx_buf_abort_cnt++;
3095 break;
3096
3097 case 3:
3098 swstats->tx_desc_abort_cnt++;
3099 break;
3100
3101 case 7:
3102 swstats->tx_parity_err_cnt++;
3103 break;
3104
3105 case 10:
3106 swstats->tx_link_loss_cnt++;
3107 break;
3108
3109 case 15:
3110 swstats->tx_list_proc_err_cnt++;
3111 break;
3112 }
3113 }
3114
3115 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3116 if (skb == NULL) {
3117 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3118 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3119 __func__);
3120 return;
3121 }
3122 pkt_cnt++;
3123
3124 /* Updating the statistics block */
3125 swstats->mem_freed += skb->truesize;
3126 dev_kfree_skb_irq(skb);
3127
3128 get_info.offset++;
3129 if (get_info.offset == get_info.fifo_len + 1)
3130 get_info.offset = 0;
3131 txdlp = (struct TxD *)
3132 fifo_data->list_info[get_info.offset].list_virt_addr;
3133 fifo_data->tx_curr_get_info.offset = get_info.offset;
3134 }
3135
3136 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3137
3138 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3139 }
3140
3141 /**
3142 * s2io_mdio_write - Function to write in to MDIO registers
3143 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3144 * @addr : address value
3145 * @value : data value
3146 * @dev : pointer to net_device structure
3147 * Description:
3148 * This function is used to write values to the MDIO registers
3149 * NONE
3150 */
3151 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3152 struct net_device *dev)
3153 {
3154 u64 val64;
3155 struct s2io_nic *sp = netdev_priv(dev);
3156 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3157
3158 /* address transaction */
3159 val64 = MDIO_MMD_INDX_ADDR(addr) |
3160 MDIO_MMD_DEV_ADDR(mmd_type) |
3161 MDIO_MMS_PRT_ADDR(0x0);
3162 writeq(val64, &bar0->mdio_control);
3163 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3164 writeq(val64, &bar0->mdio_control);
3165 udelay(100);
3166
3167 /* Data transaction */
3168 val64 = MDIO_MMD_INDX_ADDR(addr) |
3169 MDIO_MMD_DEV_ADDR(mmd_type) |
3170 MDIO_MMS_PRT_ADDR(0x0) |
3171 MDIO_MDIO_DATA(value) |
3172 MDIO_OP(MDIO_OP_WRITE_TRANS);
3173 writeq(val64, &bar0->mdio_control);
3174 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3175 writeq(val64, &bar0->mdio_control);
3176 udelay(100);
3177
3178 val64 = MDIO_MMD_INDX_ADDR(addr) |
3179 MDIO_MMD_DEV_ADDR(mmd_type) |
3180 MDIO_MMS_PRT_ADDR(0x0) |
3181 MDIO_OP(MDIO_OP_READ_TRANS);
3182 writeq(val64, &bar0->mdio_control);
3183 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3184 writeq(val64, &bar0->mdio_control);
3185 udelay(100);
3186 }
3187
3188 /**
3189 * s2io_mdio_read - Function to write in to MDIO registers
3190 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3191 * @addr : address value
3192 * @dev : pointer to net_device structure
3193 * Description:
3194 * This function is used to read values to the MDIO registers
3195 * NONE
3196 */
3197 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3198 {
3199 u64 val64 = 0x0;
3200 u64 rval64 = 0x0;
3201 struct s2io_nic *sp = netdev_priv(dev);
3202 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3203
3204 /* address transaction */
3205 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3206 | MDIO_MMD_DEV_ADDR(mmd_type)
3207 | MDIO_MMS_PRT_ADDR(0x0));
3208 writeq(val64, &bar0->mdio_control);
3209 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3210 writeq(val64, &bar0->mdio_control);
3211 udelay(100);
3212
3213 /* Data transaction */
3214 val64 = MDIO_MMD_INDX_ADDR(addr) |
3215 MDIO_MMD_DEV_ADDR(mmd_type) |
3216 MDIO_MMS_PRT_ADDR(0x0) |
3217 MDIO_OP(MDIO_OP_READ_TRANS);
3218 writeq(val64, &bar0->mdio_control);
3219 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3220 writeq(val64, &bar0->mdio_control);
3221 udelay(100);
3222
3223 /* Read the value from regs */
3224 rval64 = readq(&bar0->mdio_control);
3225 rval64 = rval64 & 0xFFFF0000;
3226 rval64 = rval64 >> 16;
3227 return rval64;
3228 }
3229
3230 /**
3231 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3232 * @counter : counter value to be updated
3233 * @flag : flag to indicate the status
3234 * @type : counter type
3235 * Description:
3236 * This function is to check the status of the xpak counters value
3237 * NONE
3238 */
3239
3240 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3241 u16 flag, u16 type)
3242 {
3243 u64 mask = 0x3;
3244 u64 val64;
3245 int i;
3246 for (i = 0; i < index; i++)
3247 mask = mask << 0x2;
3248
3249 if (flag > 0) {
3250 *counter = *counter + 1;
3251 val64 = *regs_stat & mask;
3252 val64 = val64 >> (index * 0x2);
3253 val64 = val64 + 1;
3254 if (val64 == 3) {
3255 switch (type) {
3256 case 1:
3257 DBG_PRINT(ERR_DBG,
3258 "Take Xframe NIC out of service.\n");
3259 DBG_PRINT(ERR_DBG,
3260 "Excessive temperatures may result in premature transceiver failure.\n");
3261 break;
3262 case 2:
3263 DBG_PRINT(ERR_DBG,
3264 "Take Xframe NIC out of service.\n");
3265 DBG_PRINT(ERR_DBG,
3266 "Excessive bias currents may indicate imminent laser diode failure.\n");
3267 break;
3268 case 3:
3269 DBG_PRINT(ERR_DBG,
3270 "Take Xframe NIC out of service.\n");
3271 DBG_PRINT(ERR_DBG,
3272 "Excessive laser output power may saturate far-end receiver.\n");
3273 break;
3274 default:
3275 DBG_PRINT(ERR_DBG,
3276 "Incorrect XPAK Alarm type\n");
3277 }
3278 val64 = 0x0;
3279 }
3280 val64 = val64 << (index * 0x2);
3281 *regs_stat = (*regs_stat & (~mask)) | (val64);
3282
3283 } else {
3284 *regs_stat = *regs_stat & (~mask);
3285 }
3286 }
3287
3288 /**
3289 * s2io_updt_xpak_counter - Function to update the xpak counters
3290 * @dev : pointer to net_device struct
3291 * Description:
3292 * This function is to upate the status of the xpak counters value
3293 * NONE
3294 */
3295 static void s2io_updt_xpak_counter(struct net_device *dev)
3296 {
3297 u16 flag = 0x0;
3298 u16 type = 0x0;
3299 u16 val16 = 0x0;
3300 u64 val64 = 0x0;
3301 u64 addr = 0x0;
3302
3303 struct s2io_nic *sp = netdev_priv(dev);
3304 struct stat_block *stats = sp->mac_control.stats_info;
3305 struct xpakStat *xstats = &stats->xpak_stat;
3306
3307 /* Check the communication with the MDIO slave */
3308 addr = MDIO_CTRL1;
3309 val64 = 0x0;
3310 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3311 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3312 DBG_PRINT(ERR_DBG,
3313 "ERR: MDIO slave access failed - Returned %llx\n",
3314 (unsigned long long)val64);
3315 return;
3316 }
3317
3318 /* Check for the expected value of control reg 1 */
3319 if (val64 != MDIO_CTRL1_SPEED10G) {
3320 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3321 "Returned: %llx- Expected: 0x%x\n",
3322 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3323 return;
3324 }
3325
3326 /* Loading the DOM register to MDIO register */
3327 addr = 0xA100;
3328 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3329 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3330
3331 /* Reading the Alarm flags */
3332 addr = 0xA070;
3333 val64 = 0x0;
3334 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3335
3336 flag = CHECKBIT(val64, 0x7);
3337 type = 1;
3338 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3339 &xstats->xpak_regs_stat,
3340 0x0, flag, type);
3341
3342 if (CHECKBIT(val64, 0x6))
3343 xstats->alarm_transceiver_temp_low++;
3344
3345 flag = CHECKBIT(val64, 0x3);
3346 type = 2;
3347 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3348 &xstats->xpak_regs_stat,
3349 0x2, flag, type);
3350
3351 if (CHECKBIT(val64, 0x2))
3352 xstats->alarm_laser_bias_current_low++;
3353
3354 flag = CHECKBIT(val64, 0x1);
3355 type = 3;
3356 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3357 &xstats->xpak_regs_stat,
3358 0x4, flag, type);
3359
3360 if (CHECKBIT(val64, 0x0))
3361 xstats->alarm_laser_output_power_low++;
3362
3363 /* Reading the Warning flags */
3364 addr = 0xA074;
3365 val64 = 0x0;
3366 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3367
3368 if (CHECKBIT(val64, 0x7))
3369 xstats->warn_transceiver_temp_high++;
3370
3371 if (CHECKBIT(val64, 0x6))
3372 xstats->warn_transceiver_temp_low++;
3373
3374 if (CHECKBIT(val64, 0x3))
3375 xstats->warn_laser_bias_current_high++;
3376
3377 if (CHECKBIT(val64, 0x2))
3378 xstats->warn_laser_bias_current_low++;
3379
3380 if (CHECKBIT(val64, 0x1))
3381 xstats->warn_laser_output_power_high++;
3382
3383 if (CHECKBIT(val64, 0x0))
3384 xstats->warn_laser_output_power_low++;
3385 }
3386
3387 /**
3388 * wait_for_cmd_complete - waits for a command to complete.
3389 * @sp : private member of the device structure, which is a pointer to the
3390 * s2io_nic structure.
3391 * Description: Function that waits for a command to Write into RMAC
3392 * ADDR DATA registers to be completed and returns either success or
3393 * error depending on whether the command was complete or not.
3394 * Return value:
3395 * SUCCESS on success and FAILURE on failure.
3396 */
3397
3398 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3399 int bit_state)
3400 {
3401 int ret = FAILURE, cnt = 0, delay = 1;
3402 u64 val64;
3403
3404 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3405 return FAILURE;
3406
3407 do {
3408 val64 = readq(addr);
3409 if (bit_state == S2IO_BIT_RESET) {
3410 if (!(val64 & busy_bit)) {
3411 ret = SUCCESS;
3412 break;
3413 }
3414 } else {
3415 if (val64 & busy_bit) {
3416 ret = SUCCESS;
3417 break;
3418 }
3419 }
3420
3421 if (in_interrupt())
3422 mdelay(delay);
3423 else
3424 msleep(delay);
3425
3426 if (++cnt >= 10)
3427 delay = 50;
3428 } while (cnt < 20);
3429 return ret;
3430 }
3431 /*
3432 * check_pci_device_id - Checks if the device id is supported
3433 * @id : device id
3434 * Description: Function to check if the pci device id is supported by driver.
3435 * Return value: Actual device id if supported else PCI_ANY_ID
3436 */
3437 static u16 check_pci_device_id(u16 id)
3438 {
3439 switch (id) {
3440 case PCI_DEVICE_ID_HERC_WIN:
3441 case PCI_DEVICE_ID_HERC_UNI:
3442 return XFRAME_II_DEVICE;
3443 case PCI_DEVICE_ID_S2IO_UNI:
3444 case PCI_DEVICE_ID_S2IO_WIN:
3445 return XFRAME_I_DEVICE;
3446 default:
3447 return PCI_ANY_ID;
3448 }
3449 }
3450
3451 /**
3452 * s2io_reset - Resets the card.
3453 * @sp : private member of the device structure.
3454 * Description: Function to Reset the card. This function then also
3455 * restores the previously saved PCI configuration space registers as
3456 * the card reset also resets the configuration space.
3457 * Return value:
3458 * void.
3459 */
3460
3461 static void s2io_reset(struct s2io_nic *sp)
3462 {
3463 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3464 u64 val64;
3465 u16 subid, pci_cmd;
3466 int i;
3467 u16 val16;
3468 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3469 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3470 struct stat_block *stats;
3471 struct swStat *swstats;
3472
3473 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3474 __func__, pci_name(sp->pdev));
3475
3476 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3477 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3478
3479 val64 = SW_RESET_ALL;
3480 writeq(val64, &bar0->sw_reset);
3481 if (strstr(sp->product_name, "CX4"))
3482 msleep(750);
3483 msleep(250);
3484 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3485
3486 /* Restore the PCI state saved during initialization. */
3487 pci_restore_state(sp->pdev);
3488 pci_save_state(sp->pdev);
3489 pci_read_config_word(sp->pdev, 0x2, &val16);
3490 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3491 break;
3492 msleep(200);
3493 }
3494
3495 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3496 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3497
3498 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3499
3500 s2io_init_pci(sp);
3501
3502 /* Set swapper to enable I/O register access */
3503 s2io_set_swapper(sp);
3504
3505 /* restore mac_addr entries */
3506 do_s2io_restore_unicast_mc(sp);
3507
3508 /* Restore the MSIX table entries from local variables */
3509 restore_xmsi_data(sp);
3510
3511 /* Clear certain PCI/PCI-X fields after reset */
3512 if (sp->device_type == XFRAME_II_DEVICE) {
3513 /* Clear "detected parity error" bit */
3514 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3515
3516 /* Clearing PCIX Ecc status register */
3517 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3518
3519 /* Clearing PCI_STATUS error reflected here */
3520 writeq(s2BIT(62), &bar0->txpic_int_reg);
3521 }
3522
3523 /* Reset device statistics maintained by OS */
3524 memset(&sp->stats, 0, sizeof(struct net_device_stats));
3525
3526 stats = sp->mac_control.stats_info;
3527 swstats = &stats->sw_stat;
3528
3529 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3530 up_cnt = swstats->link_up_cnt;
3531 down_cnt = swstats->link_down_cnt;
3532 up_time = swstats->link_up_time;
3533 down_time = swstats->link_down_time;
3534 reset_cnt = swstats->soft_reset_cnt;
3535 mem_alloc_cnt = swstats->mem_allocated;
3536 mem_free_cnt = swstats->mem_freed;
3537 watchdog_cnt = swstats->watchdog_timer_cnt;
3538
3539 memset(stats, 0, sizeof(struct stat_block));
3540
3541 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3542 swstats->link_up_cnt = up_cnt;
3543 swstats->link_down_cnt = down_cnt;
3544 swstats->link_up_time = up_time;
3545 swstats->link_down_time = down_time;
3546 swstats->soft_reset_cnt = reset_cnt;
3547 swstats->mem_allocated = mem_alloc_cnt;
3548 swstats->mem_freed = mem_free_cnt;
3549 swstats->watchdog_timer_cnt = watchdog_cnt;
3550
3551 /* SXE-002: Configure link and activity LED to turn it off */
3552 subid = sp->pdev->subsystem_device;
3553 if (((subid & 0xFF) >= 0x07) &&
3554 (sp->device_type == XFRAME_I_DEVICE)) {
3555 val64 = readq(&bar0->gpio_control);
3556 val64 |= 0x0000800000000000ULL;
3557 writeq(val64, &bar0->gpio_control);
3558 val64 = 0x0411040400000000ULL;
3559 writeq(val64, (void __iomem *)bar0 + 0x2700);
3560 }
3561
3562 /*
3563 * Clear spurious ECC interrupts that would have occurred on
3564 * XFRAME II cards after reset.
3565 */
3566 if (sp->device_type == XFRAME_II_DEVICE) {
3567 val64 = readq(&bar0->pcc_err_reg);
3568 writeq(val64, &bar0->pcc_err_reg);
3569 }
3570
3571 sp->device_enabled_once = false;
3572 }
3573
3574 /**
3575 * s2io_set_swapper - to set the swapper controle on the card
3576 * @sp : private member of the device structure,
3577 * pointer to the s2io_nic structure.
3578 * Description: Function to set the swapper control on the card
3579 * correctly depending on the 'endianness' of the system.
3580 * Return value:
3581 * SUCCESS on success and FAILURE on failure.
3582 */
3583
3584 static int s2io_set_swapper(struct s2io_nic *sp)
3585 {
3586 struct net_device *dev = sp->dev;
3587 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3588 u64 val64, valt, valr;
3589
3590 /*
3591 * Set proper endian settings and verify the same by reading
3592 * the PIF Feed-back register.
3593 */
3594
3595 val64 = readq(&bar0->pif_rd_swapper_fb);
3596 if (val64 != 0x0123456789ABCDEFULL) {
3597 int i = 0;
3598 static const u64 value[] = {
3599 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3600 0x8100008181000081ULL, /* FE=1, SE=0 */
3601 0x4200004242000042ULL, /* FE=0, SE=1 */
3602 0 /* FE=0, SE=0 */
3603 };
3604
3605 while (i < 4) {
3606 writeq(value[i], &bar0->swapper_ctrl);
3607 val64 = readq(&bar0->pif_rd_swapper_fb);
3608 if (val64 == 0x0123456789ABCDEFULL)
3609 break;
3610 i++;
3611 }
3612 if (i == 4) {
3613 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3614 "feedback read %llx\n",
3615 dev->name, (unsigned long long)val64);
3616 return FAILURE;
3617 }
3618 valr = value[i];
3619 } else {
3620 valr = readq(&bar0->swapper_ctrl);
3621 }
3622
3623 valt = 0x0123456789ABCDEFULL;
3624 writeq(valt, &bar0->xmsi_address);
3625 val64 = readq(&bar0->xmsi_address);
3626
3627 if (val64 != valt) {
3628 int i = 0;
3629 static const u64 value[] = {
3630 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3631 0x0081810000818100ULL, /* FE=1, SE=0 */
3632 0x0042420000424200ULL, /* FE=0, SE=1 */
3633 0 /* FE=0, SE=0 */
3634 };
3635
3636 while (i < 4) {
3637 writeq((value[i] | valr), &bar0->swapper_ctrl);
3638 writeq(valt, &bar0->xmsi_address);
3639 val64 = readq(&bar0->xmsi_address);
3640 if (val64 == valt)
3641 break;
3642 i++;
3643 }
3644 if (i == 4) {
3645 unsigned long long x = val64;
3646 DBG_PRINT(ERR_DBG,
3647 "Write failed, Xmsi_addr reads:0x%llx\n", x);
3648 return FAILURE;
3649 }
3650 }
3651 val64 = readq(&bar0->swapper_ctrl);
3652 val64 &= 0xFFFF000000000000ULL;
3653
3654 #ifdef __BIG_ENDIAN
3655 /*
3656 * The device by default set to a big endian format, so a
3657 * big endian driver need not set anything.
3658 */
3659 val64 |= (SWAPPER_CTRL_TXP_FE |
3660 SWAPPER_CTRL_TXP_SE |
3661 SWAPPER_CTRL_TXD_R_FE |
3662 SWAPPER_CTRL_TXD_W_FE |
3663 SWAPPER_CTRL_TXF_R_FE |
3664 SWAPPER_CTRL_RXD_R_FE |
3665 SWAPPER_CTRL_RXD_W_FE |
3666 SWAPPER_CTRL_RXF_W_FE |
3667 SWAPPER_CTRL_XMSI_FE |
3668 SWAPPER_CTRL_STATS_FE |
3669 SWAPPER_CTRL_STATS_SE);
3670 if (sp->config.intr_type == INTA)
3671 val64 |= SWAPPER_CTRL_XMSI_SE;
3672 writeq(val64, &bar0->swapper_ctrl);
3673 #else
3674 /*
3675 * Initially we enable all bits to make it accessible by the
3676 * driver, then we selectively enable only those bits that
3677 * we want to set.
3678 */
3679 val64 |= (SWAPPER_CTRL_TXP_FE |
3680 SWAPPER_CTRL_TXP_SE |
3681 SWAPPER_CTRL_TXD_R_FE |
3682 SWAPPER_CTRL_TXD_R_SE |
3683 SWAPPER_CTRL_TXD_W_FE |
3684 SWAPPER_CTRL_TXD_W_SE |
3685 SWAPPER_CTRL_TXF_R_FE |
3686 SWAPPER_CTRL_RXD_R_FE |
3687 SWAPPER_CTRL_RXD_R_SE |
3688 SWAPPER_CTRL_RXD_W_FE |
3689 SWAPPER_CTRL_RXD_W_SE |
3690 SWAPPER_CTRL_RXF_W_FE |
3691 SWAPPER_CTRL_XMSI_FE |
3692 SWAPPER_CTRL_STATS_FE |
3693 SWAPPER_CTRL_STATS_SE);
3694 if (sp->config.intr_type == INTA)
3695 val64 |= SWAPPER_CTRL_XMSI_SE;
3696 writeq(val64, &bar0->swapper_ctrl);
3697 #endif
3698 val64 = readq(&bar0->swapper_ctrl);
3699
3700 /*
3701 * Verifying if endian settings are accurate by reading a
3702 * feedback register.
3703 */
3704 val64 = readq(&bar0->pif_rd_swapper_fb);
3705 if (val64 != 0x0123456789ABCDEFULL) {
3706 /* Endian settings are incorrect, calls for another dekko. */
3707 DBG_PRINT(ERR_DBG,
3708 "%s: Endian settings are wrong, feedback read %llx\n",
3709 dev->name, (unsigned long long)val64);
3710 return FAILURE;
3711 }
3712
3713 return SUCCESS;
3714 }
3715
3716 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3717 {
3718 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3719 u64 val64;
3720 int ret = 0, cnt = 0;
3721
3722 do {
3723 val64 = readq(&bar0->xmsi_access);
3724 if (!(val64 & s2BIT(15)))
3725 break;
3726 mdelay(1);
3727 cnt++;
3728 } while (cnt < 5);
3729 if (cnt == 5) {
3730 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3731 ret = 1;
3732 }
3733
3734 return ret;
3735 }
3736
3737 static void restore_xmsi_data(struct s2io_nic *nic)
3738 {
3739 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3740 u64 val64;
3741 int i, msix_index;
3742
3743 if (nic->device_type == XFRAME_I_DEVICE)
3744 return;
3745
3746 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3747 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3748 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3749 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3750 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3751 writeq(val64, &bar0->xmsi_access);
3752 if (wait_for_msix_trans(nic, msix_index)) {
3753 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3754 __func__, msix_index);
3755 continue;
3756 }
3757 }
3758 }
3759
3760 static void store_xmsi_data(struct s2io_nic *nic)
3761 {
3762 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3763 u64 val64, addr, data;
3764 int i, msix_index;
3765
3766 if (nic->device_type == XFRAME_I_DEVICE)
3767 return;
3768
3769 /* Store and display */
3770 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3771 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3772 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3773 writeq(val64, &bar0->xmsi_access);
3774 if (wait_for_msix_trans(nic, msix_index)) {
3775 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3776 __func__, msix_index);
3777 continue;
3778 }
3779 addr = readq(&bar0->xmsi_address);
3780 data = readq(&bar0->xmsi_data);
3781 if (addr && data) {
3782 nic->msix_info[i].addr = addr;
3783 nic->msix_info[i].data = data;
3784 }
3785 }
3786 }
3787
3788 static int s2io_enable_msi_x(struct s2io_nic *nic)
3789 {
3790 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3791 u64 rx_mat;
3792 u16 msi_control; /* Temp variable */
3793 int ret, i, j, msix_indx = 1;
3794 int size;
3795 struct stat_block *stats = nic->mac_control.stats_info;
3796 struct swStat *swstats = &stats->sw_stat;
3797
3798 size = nic->num_entries * sizeof(struct msix_entry);
3799 nic->entries = kzalloc(size, GFP_KERNEL);
3800 if (!nic->entries) {
3801 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3802 __func__);
3803 swstats->mem_alloc_fail_cnt++;
3804 return -ENOMEM;
3805 }
3806 swstats->mem_allocated += size;
3807
3808 size = nic->num_entries * sizeof(struct s2io_msix_entry);
3809 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3810 if (!nic->s2io_entries) {
3811 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3812 __func__);
3813 swstats->mem_alloc_fail_cnt++;
3814 kfree(nic->entries);
3815 swstats->mem_freed
3816 += (nic->num_entries * sizeof(struct msix_entry));
3817 return -ENOMEM;
3818 }
3819 swstats->mem_allocated += size;
3820
3821 nic->entries[0].entry = 0;
3822 nic->s2io_entries[0].entry = 0;
3823 nic->s2io_entries[0].in_use = MSIX_FLG;
3824 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3825 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3826
3827 for (i = 1; i < nic->num_entries; i++) {
3828 nic->entries[i].entry = ((i - 1) * 8) + 1;
3829 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3830 nic->s2io_entries[i].arg = NULL;
3831 nic->s2io_entries[i].in_use = 0;
3832 }
3833
3834 rx_mat = readq(&bar0->rx_mat);
3835 for (j = 0; j < nic->config.rx_ring_num; j++) {
3836 rx_mat |= RX_MAT_SET(j, msix_indx);
3837 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3838 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3839 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3840 msix_indx += 8;
3841 }
3842 writeq(rx_mat, &bar0->rx_mat);
3843 readq(&bar0->rx_mat);
3844
3845 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3846 /* We fail init if error or we get less vectors than min required */
3847 if (ret) {
3848 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3849 kfree(nic->entries);
3850 swstats->mem_freed += nic->num_entries *
3851 sizeof(struct msix_entry);
3852 kfree(nic->s2io_entries);
3853 swstats->mem_freed += nic->num_entries *
3854 sizeof(struct s2io_msix_entry);
3855 nic->entries = NULL;
3856 nic->s2io_entries = NULL;
3857 return -ENOMEM;
3858 }
3859
3860 /*
3861 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3862 * in the herc NIC. (Temp change, needs to be removed later)
3863 */
3864 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3865 msi_control |= 0x1; /* Enable MSI */
3866 pci_write_config_word(nic->pdev, 0x42, msi_control);
3867
3868 return 0;
3869 }
3870
3871 /* Handle software interrupt used during MSI(X) test */
3872 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3873 {
3874 struct s2io_nic *sp = dev_id;
3875
3876 sp->msi_detected = 1;
3877 wake_up(&sp->msi_wait);
3878
3879 return IRQ_HANDLED;
3880 }
3881
3882 /* Test interrupt path by forcing a a software IRQ */
3883 static int s2io_test_msi(struct s2io_nic *sp)
3884 {
3885 struct pci_dev *pdev = sp->pdev;
3886 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3887 int err;
3888 u64 val64, saved64;
3889
3890 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3891 sp->name, sp);
3892 if (err) {
3893 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3894 sp->dev->name, pci_name(pdev), pdev->irq);
3895 return err;
3896 }
3897
3898 init_waitqueue_head(&sp->msi_wait);
3899 sp->msi_detected = 0;
3900
3901 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3902 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3903 val64 |= SCHED_INT_CTRL_TIMER_EN;
3904 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3905 writeq(val64, &bar0->scheduled_int_ctrl);
3906
3907 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3908
3909 if (!sp->msi_detected) {
3910 /* MSI(X) test failed, go back to INTx mode */
3911 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3912 "using MSI(X) during test\n",
3913 sp->dev->name, pci_name(pdev));
3914
3915 err = -EOPNOTSUPP;
3916 }
3917
3918 free_irq(sp->entries[1].vector, sp);
3919
3920 writeq(saved64, &bar0->scheduled_int_ctrl);
3921
3922 return err;
3923 }
3924
3925 static void remove_msix_isr(struct s2io_nic *sp)
3926 {
3927 int i;
3928 u16 msi_control;
3929
3930 for (i = 0; i < sp->num_entries; i++) {
3931 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3932 int vector = sp->entries[i].vector;
3933 void *arg = sp->s2io_entries[i].arg;
3934 free_irq(vector, arg);
3935 }
3936 }
3937
3938 kfree(sp->entries);
3939 kfree(sp->s2io_entries);
3940 sp->entries = NULL;
3941 sp->s2io_entries = NULL;
3942
3943 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3944 msi_control &= 0xFFFE; /* Disable MSI */
3945 pci_write_config_word(sp->pdev, 0x42, msi_control);
3946
3947 pci_disable_msix(sp->pdev);
3948 }
3949
3950 static void remove_inta_isr(struct s2io_nic *sp)
3951 {
3952 struct net_device *dev = sp->dev;
3953
3954 free_irq(sp->pdev->irq, dev);
3955 }
3956
3957 /* ********************************************************* *
3958 * Functions defined below concern the OS part of the driver *
3959 * ********************************************************* */
3960
3961 /**
3962 * s2io_open - open entry point of the driver
3963 * @dev : pointer to the device structure.
3964 * Description:
3965 * This function is the open entry point of the driver. It mainly calls a
3966 * function to allocate Rx buffers and inserts them into the buffer
3967 * descriptors and then enables the Rx part of the NIC.
3968 * Return value:
3969 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3970 * file on failure.
3971 */
3972
3973 static int s2io_open(struct net_device *dev)
3974 {
3975 struct s2io_nic *sp = netdev_priv(dev);
3976 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3977 int err = 0;
3978
3979 /*
3980 * Make sure you have link off by default every time
3981 * Nic is initialized
3982 */
3983 netif_carrier_off(dev);
3984 sp->last_link_state = 0;
3985
3986 /* Initialize H/W and enable interrupts */
3987 err = s2io_card_up(sp);
3988 if (err) {
3989 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3990 dev->name);
3991 goto hw_init_failed;
3992 }
3993
3994 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3995 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3996 s2io_card_down(sp);
3997 err = -ENODEV;
3998 goto hw_init_failed;
3999 }
4000 s2io_start_all_tx_queue(sp);
4001 return 0;
4002
4003 hw_init_failed:
4004 if (sp->config.intr_type == MSI_X) {
4005 if (sp->entries) {
4006 kfree(sp->entries);
4007 swstats->mem_freed += sp->num_entries *
4008 sizeof(struct msix_entry);
4009 }
4010 if (sp->s2io_entries) {
4011 kfree(sp->s2io_entries);
4012 swstats->mem_freed += sp->num_entries *
4013 sizeof(struct s2io_msix_entry);
4014 }
4015 }
4016 return err;
4017 }
4018
4019 /**
4020 * s2io_close -close entry point of the driver
4021 * @dev : device pointer.
4022 * Description:
4023 * This is the stop entry point of the driver. It needs to undo exactly
4024 * whatever was done by the open entry point,thus it's usually referred to
4025 * as the close function.Among other things this function mainly stops the
4026 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4027 * Return value:
4028 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4029 * file on failure.
4030 */
4031
4032 static int s2io_close(struct net_device *dev)
4033 {
4034 struct s2io_nic *sp = netdev_priv(dev);
4035 struct config_param *config = &sp->config;
4036 u64 tmp64;
4037 int offset;
4038
4039 /* Return if the device is already closed *
4040 * Can happen when s2io_card_up failed in change_mtu *
4041 */
4042 if (!is_s2io_card_up(sp))
4043 return 0;
4044
4045 s2io_stop_all_tx_queue(sp);
4046 /* delete all populated mac entries */
4047 for (offset = 1; offset < config->max_mc_addr; offset++) {
4048 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4049 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4050 do_s2io_delete_unicast_mc(sp, tmp64);
4051 }
4052
4053 s2io_card_down(sp);
4054
4055 return 0;
4056 }
4057
4058 /**
4059 * s2io_xmit - Tx entry point of te driver
4060 * @skb : the socket buffer containing the Tx data.
4061 * @dev : device pointer.
4062 * Description :
4063 * This function is the Tx entry point of the driver. S2IO NIC supports
4064 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4065 * NOTE: when device can't queue the pkt,just the trans_start variable will
4066 * not be upadted.
4067 * Return value:
4068 * 0 on success & 1 on failure.
4069 */
4070
4071 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4072 {
4073 struct s2io_nic *sp = netdev_priv(dev);
4074 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4075 register u64 val64;
4076 struct TxD *txdp;
4077 struct TxFIFO_element __iomem *tx_fifo;
4078 unsigned long flags = 0;
4079 u16 vlan_tag = 0;
4080 struct fifo_info *fifo = NULL;
4081 int do_spin_lock = 1;
4082 int offload_type;
4083 int enable_per_list_interrupt = 0;
4084 struct config_param *config = &sp->config;
4085 struct mac_info *mac_control = &sp->mac_control;
4086 struct stat_block *stats = mac_control->stats_info;
4087 struct swStat *swstats = &stats->sw_stat;
4088
4089 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4090
4091 if (unlikely(skb->len <= 0)) {
4092 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4093 dev_kfree_skb_any(skb);
4094 return NETDEV_TX_OK;
4095 }
4096
4097 if (!is_s2io_card_up(sp)) {
4098 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4099 dev->name);
4100 dev_kfree_skb(skb);
4101 return NETDEV_TX_OK;
4102 }
4103
4104 queue = 0;
4105 if (vlan_tx_tag_present(skb))
4106 vlan_tag = vlan_tx_tag_get(skb);
4107 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4108 if (skb->protocol == htons(ETH_P_IP)) {
4109 struct iphdr *ip;
4110 struct tcphdr *th;
4111 ip = ip_hdr(skb);
4112
4113 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4114 th = (struct tcphdr *)(((unsigned char *)ip) +
4115 ip->ihl*4);
4116
4117 if (ip->protocol == IPPROTO_TCP) {
4118 queue_len = sp->total_tcp_fifos;
4119 queue = (ntohs(th->source) +
4120 ntohs(th->dest)) &
4121 sp->fifo_selector[queue_len - 1];
4122 if (queue >= queue_len)
4123 queue = queue_len - 1;
4124 } else if (ip->protocol == IPPROTO_UDP) {
4125 queue_len = sp->total_udp_fifos;
4126 queue = (ntohs(th->source) +
4127 ntohs(th->dest)) &
4128 sp->fifo_selector[queue_len - 1];
4129 if (queue >= queue_len)
4130 queue = queue_len - 1;
4131 queue += sp->udp_fifo_idx;
4132 if (skb->len > 1024)
4133 enable_per_list_interrupt = 1;
4134 do_spin_lock = 0;
4135 }
4136 }
4137 }
4138 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4139 /* get fifo number based on skb->priority value */
4140 queue = config->fifo_mapping
4141 [skb->priority & (MAX_TX_FIFOS - 1)];
4142 fifo = &mac_control->fifos[queue];
4143
4144 if (do_spin_lock)
4145 spin_lock_irqsave(&fifo->tx_lock, flags);
4146 else {
4147 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4148 return NETDEV_TX_LOCKED;
4149 }
4150
4151 if (sp->config.multiq) {
4152 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4153 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4154 return NETDEV_TX_BUSY;
4155 }
4156 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4157 if (netif_queue_stopped(dev)) {
4158 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4159 return NETDEV_TX_BUSY;
4160 }
4161 }
4162
4163 put_off = (u16)fifo->tx_curr_put_info.offset;
4164 get_off = (u16)fifo->tx_curr_get_info.offset;
4165 txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr;
4166
4167 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4168 /* Avoid "put" pointer going beyond "get" pointer */
4169 if (txdp->Host_Control ||
4170 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4171 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4172 s2io_stop_tx_queue(sp, fifo->fifo_no);
4173 dev_kfree_skb(skb);
4174 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4175 return NETDEV_TX_OK;
4176 }
4177
4178 offload_type = s2io_offload_type(skb);
4179 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4180 txdp->Control_1 |= TXD_TCP_LSO_EN;
4181 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4182 }
4183 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4184 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4185 TXD_TX_CKO_TCP_EN |
4186 TXD_TX_CKO_UDP_EN);
4187 }
4188 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4189 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4190 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4191 if (enable_per_list_interrupt)
4192 if (put_off & (queue_len >> 5))
4193 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4194 if (vlan_tag) {
4195 txdp->Control_2 |= TXD_VLAN_ENABLE;
4196 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4197 }
4198
4199 frg_len = skb_headlen(skb);
4200 if (offload_type == SKB_GSO_UDP) {
4201 int ufo_size;
4202
4203 ufo_size = s2io_udp_mss(skb);
4204 ufo_size &= ~7;
4205 txdp->Control_1 |= TXD_UFO_EN;
4206 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4207 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4208 #ifdef __BIG_ENDIAN
4209 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4210 fifo->ufo_in_band_v[put_off] =
4211 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4212 #else
4213 fifo->ufo_in_band_v[put_off] =
4214 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4215 #endif
4216 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4217 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4218 fifo->ufo_in_band_v,
4219 sizeof(u64),
4220 PCI_DMA_TODEVICE);
4221 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4222 goto pci_map_failed;
4223 txdp++;
4224 }
4225
4226 txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4227 frg_len, PCI_DMA_TODEVICE);
4228 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4229 goto pci_map_failed;
4230
4231 txdp->Host_Control = (unsigned long)skb;
4232 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4233 if (offload_type == SKB_GSO_UDP)
4234 txdp->Control_1 |= TXD_UFO_EN;
4235
4236 frg_cnt = skb_shinfo(skb)->nr_frags;
4237 /* For fragmented SKB. */
4238 for (i = 0; i < frg_cnt; i++) {
4239 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4240 /* A '0' length fragment will be ignored */
4241 if (!frag->size)
4242 continue;
4243 txdp++;
4244 txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page,
4245 frag->page_offset,
4246 frag->size,
4247 PCI_DMA_TODEVICE);
4248 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4249 if (offload_type == SKB_GSO_UDP)
4250 txdp->Control_1 |= TXD_UFO_EN;
4251 }
4252 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4253
4254 if (offload_type == SKB_GSO_UDP)
4255 frg_cnt++; /* as Txd0 was used for inband header */
4256
4257 tx_fifo = mac_control->tx_FIFO_start[queue];
4258 val64 = fifo->list_info[put_off].list_phy_addr;
4259 writeq(val64, &tx_fifo->TxDL_Pointer);
4260
4261 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4262 TX_FIFO_LAST_LIST);
4263 if (offload_type)
4264 val64 |= TX_FIFO_SPECIAL_FUNC;
4265
4266 writeq(val64, &tx_fifo->List_Control);
4267
4268 mmiowb();
4269
4270 put_off++;
4271 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4272 put_off = 0;
4273 fifo->tx_curr_put_info.offset = put_off;
4274
4275 /* Avoid "put" pointer going beyond "get" pointer */
4276 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4277 swstats->fifo_full_cnt++;
4278 DBG_PRINT(TX_DBG,
4279 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4280 put_off, get_off);
4281 s2io_stop_tx_queue(sp, fifo->fifo_no);
4282 }
4283 swstats->mem_allocated += skb->truesize;
4284 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4285
4286 if (sp->config.intr_type == MSI_X)
4287 tx_intr_handler(fifo);
4288
4289 return NETDEV_TX_OK;
4290
4291 pci_map_failed:
4292 swstats->pci_map_fail_cnt++;
4293 s2io_stop_tx_queue(sp, fifo->fifo_no);
4294 swstats->mem_freed += skb->truesize;
4295 dev_kfree_skb(skb);
4296 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4297 return NETDEV_TX_OK;
4298 }
4299
4300 static void
4301 s2io_alarm_handle(unsigned long data)
4302 {
4303 struct s2io_nic *sp = (struct s2io_nic *)data;
4304 struct net_device *dev = sp->dev;
4305
4306 s2io_handle_errors(dev);
4307 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4308 }
4309
4310 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4311 {
4312 struct ring_info *ring = (struct ring_info *)dev_id;
4313 struct s2io_nic *sp = ring->nic;
4314 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4315
4316 if (unlikely(!is_s2io_card_up(sp)))
4317 return IRQ_HANDLED;
4318
4319 if (sp->config.napi) {
4320 u8 __iomem *addr = NULL;
4321 u8 val8 = 0;
4322
4323 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4324 addr += (7 - ring->ring_no);
4325 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4326 writeb(val8, addr);
4327 val8 = readb(addr);
4328 napi_schedule(&ring->napi);
4329 } else {
4330 rx_intr_handler(ring, 0);
4331 s2io_chk_rx_buffers(sp, ring);
4332 }
4333
4334 return IRQ_HANDLED;
4335 }
4336
4337 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4338 {
4339 int i;
4340 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4341 struct s2io_nic *sp = fifos->nic;
4342 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4343 struct config_param *config = &sp->config;
4344 u64 reason;
4345
4346 if (unlikely(!is_s2io_card_up(sp)))
4347 return IRQ_NONE;
4348
4349 reason = readq(&bar0->general_int_status);
4350 if (unlikely(reason == S2IO_MINUS_ONE))
4351 /* Nothing much can be done. Get out */
4352 return IRQ_HANDLED;
4353
4354 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4355 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4356
4357 if (reason & GEN_INTR_TXPIC)
4358 s2io_txpic_intr_handle(sp);
4359
4360 if (reason & GEN_INTR_TXTRAFFIC)
4361 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4362
4363 for (i = 0; i < config->tx_fifo_num; i++)
4364 tx_intr_handler(&fifos[i]);
4365
4366 writeq(sp->general_int_mask, &bar0->general_int_mask);
4367 readl(&bar0->general_int_status);
4368 return IRQ_HANDLED;
4369 }
4370 /* The interrupt was not raised by us */
4371 return IRQ_NONE;
4372 }
4373
4374 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4375 {
4376 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4377 u64 val64;
4378
4379 val64 = readq(&bar0->pic_int_status);
4380 if (val64 & PIC_INT_GPIO) {
4381 val64 = readq(&bar0->gpio_int_reg);
4382 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4383 (val64 & GPIO_INT_REG_LINK_UP)) {
4384 /*
4385 * This is unstable state so clear both up/down
4386 * interrupt and adapter to re-evaluate the link state.
4387 */
4388 val64 |= GPIO_INT_REG_LINK_DOWN;
4389 val64 |= GPIO_INT_REG_LINK_UP;
4390 writeq(val64, &bar0->gpio_int_reg);
4391 val64 = readq(&bar0->gpio_int_mask);
4392 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4393 GPIO_INT_MASK_LINK_DOWN);
4394 writeq(val64, &bar0->gpio_int_mask);
4395 } else if (val64 & GPIO_INT_REG_LINK_UP) {
4396 val64 = readq(&bar0->adapter_status);
4397 /* Enable Adapter */
4398 val64 = readq(&bar0->adapter_control);
4399 val64 |= ADAPTER_CNTL_EN;
4400 writeq(val64, &bar0->adapter_control);
4401 val64 |= ADAPTER_LED_ON;
4402 writeq(val64, &bar0->adapter_control);
4403 if (!sp->device_enabled_once)
4404 sp->device_enabled_once = 1;
4405
4406 s2io_link(sp, LINK_UP);
4407 /*
4408 * unmask link down interrupt and mask link-up
4409 * intr
4410 */
4411 val64 = readq(&bar0->gpio_int_mask);
4412 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4413 val64 |= GPIO_INT_MASK_LINK_UP;
4414 writeq(val64, &bar0->gpio_int_mask);
4415
4416 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4417 val64 = readq(&bar0->adapter_status);
4418 s2io_link(sp, LINK_DOWN);
4419 /* Link is down so unmaks link up interrupt */
4420 val64 = readq(&bar0->gpio_int_mask);
4421 val64 &= ~GPIO_INT_MASK_LINK_UP;
4422 val64 |= GPIO_INT_MASK_LINK_DOWN;
4423 writeq(val64, &bar0->gpio_int_mask);
4424
4425 /* turn off LED */
4426 val64 = readq(&bar0->adapter_control);
4427 val64 = val64 & (~ADAPTER_LED_ON);
4428 writeq(val64, &bar0->adapter_control);
4429 }
4430 }
4431 val64 = readq(&bar0->gpio_int_mask);
4432 }
4433
4434 /**
4435 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4436 * @value: alarm bits
4437 * @addr: address value
4438 * @cnt: counter variable
4439 * Description: Check for alarm and increment the counter
4440 * Return Value:
4441 * 1 - if alarm bit set
4442 * 0 - if alarm bit is not set
4443 */
4444 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4445 unsigned long long *cnt)
4446 {
4447 u64 val64;
4448 val64 = readq(addr);
4449 if (val64 & value) {
4450 writeq(val64, addr);
4451 (*cnt)++;
4452 return 1;
4453 }
4454 return 0;
4455
4456 }
4457
4458 /**
4459 * s2io_handle_errors - Xframe error indication handler
4460 * @nic: device private variable
4461 * Description: Handle alarms such as loss of link, single or
4462 * double ECC errors, critical and serious errors.
4463 * Return Value:
4464 * NONE
4465 */
4466 static void s2io_handle_errors(void *dev_id)
4467 {
4468 struct net_device *dev = (struct net_device *)dev_id;
4469 struct s2io_nic *sp = netdev_priv(dev);
4470 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4471 u64 temp64 = 0, val64 = 0;
4472 int i = 0;
4473
4474 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4475 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4476
4477 if (!is_s2io_card_up(sp))
4478 return;
4479
4480 if (pci_channel_offline(sp->pdev))
4481 return;
4482
4483 memset(&sw_stat->ring_full_cnt, 0,
4484 sizeof(sw_stat->ring_full_cnt));
4485
4486 /* Handling the XPAK counters update */
4487 if (stats->xpak_timer_count < 72000) {
4488 /* waiting for an hour */
4489 stats->xpak_timer_count++;
4490 } else {
4491 s2io_updt_xpak_counter(dev);
4492 /* reset the count to zero */
4493 stats->xpak_timer_count = 0;
4494 }
4495
4496 /* Handling link status change error Intr */
4497 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4498 val64 = readq(&bar0->mac_rmac_err_reg);
4499 writeq(val64, &bar0->mac_rmac_err_reg);
4500 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4501 schedule_work(&sp->set_link_task);
4502 }
4503
4504 /* In case of a serious error, the device will be Reset. */
4505 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4506 &sw_stat->serious_err_cnt))
4507 goto reset;
4508
4509 /* Check for data parity error */
4510 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4511 &sw_stat->parity_err_cnt))
4512 goto reset;
4513
4514 /* Check for ring full counter */
4515 if (sp->device_type == XFRAME_II_DEVICE) {
4516 val64 = readq(&bar0->ring_bump_counter1);
4517 for (i = 0; i < 4; i++) {
4518 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4519 temp64 >>= 64 - ((i+1)*16);
4520 sw_stat->ring_full_cnt[i] += temp64;
4521 }
4522
4523 val64 = readq(&bar0->ring_bump_counter2);
4524 for (i = 0; i < 4; i++) {
4525 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4526 temp64 >>= 64 - ((i+1)*16);
4527 sw_stat->ring_full_cnt[i+4] += temp64;
4528 }
4529 }
4530
4531 val64 = readq(&bar0->txdma_int_status);
4532 /*check for pfc_err*/
4533 if (val64 & TXDMA_PFC_INT) {
4534 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4535 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4536 PFC_PCIX_ERR,
4537 &bar0->pfc_err_reg,
4538 &sw_stat->pfc_err_cnt))
4539 goto reset;
4540 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4541 &bar0->pfc_err_reg,
4542 &sw_stat->pfc_err_cnt);
4543 }
4544
4545 /*check for tda_err*/
4546 if (val64 & TXDMA_TDA_INT) {
4547 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4548 TDA_SM0_ERR_ALARM |
4549 TDA_SM1_ERR_ALARM,
4550 &bar0->tda_err_reg,
4551 &sw_stat->tda_err_cnt))
4552 goto reset;
4553 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4554 &bar0->tda_err_reg,
4555 &sw_stat->tda_err_cnt);
4556 }
4557 /*check for pcc_err*/
4558 if (val64 & TXDMA_PCC_INT) {
4559 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4560 PCC_N_SERR | PCC_6_COF_OV_ERR |
4561 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4562 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4563 PCC_TXB_ECC_DB_ERR,
4564 &bar0->pcc_err_reg,
4565 &sw_stat->pcc_err_cnt))
4566 goto reset;
4567 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4568 &bar0->pcc_err_reg,
4569 &sw_stat->pcc_err_cnt);
4570 }
4571
4572 /*check for tti_err*/
4573 if (val64 & TXDMA_TTI_INT) {
4574 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4575 &bar0->tti_err_reg,
4576 &sw_stat->tti_err_cnt))
4577 goto reset;
4578 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4579 &bar0->tti_err_reg,
4580 &sw_stat->tti_err_cnt);
4581 }
4582
4583 /*check for lso_err*/
4584 if (val64 & TXDMA_LSO_INT) {
4585 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4586 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4587 &bar0->lso_err_reg,
4588 &sw_stat->lso_err_cnt))
4589 goto reset;
4590 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4591 &bar0->lso_err_reg,
4592 &sw_stat->lso_err_cnt);
4593 }
4594
4595 /*check for tpa_err*/
4596 if (val64 & TXDMA_TPA_INT) {
4597 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4598 &bar0->tpa_err_reg,
4599 &sw_stat->tpa_err_cnt))
4600 goto reset;
4601 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4602 &bar0->tpa_err_reg,
4603 &sw_stat->tpa_err_cnt);
4604 }
4605
4606 /*check for sm_err*/
4607 if (val64 & TXDMA_SM_INT) {
4608 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4609 &bar0->sm_err_reg,
4610 &sw_stat->sm_err_cnt))
4611 goto reset;
4612 }
4613
4614 val64 = readq(&bar0->mac_int_status);
4615 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4616 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4617 &bar0->mac_tmac_err_reg,
4618 &sw_stat->mac_tmac_err_cnt))
4619 goto reset;
4620 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4621 TMAC_DESC_ECC_SG_ERR |
4622 TMAC_DESC_ECC_DB_ERR,
4623 &bar0->mac_tmac_err_reg,
4624 &sw_stat->mac_tmac_err_cnt);
4625 }
4626
4627 val64 = readq(&bar0->xgxs_int_status);
4628 if (val64 & XGXS_INT_STATUS_TXGXS) {
4629 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4630 &bar0->xgxs_txgxs_err_reg,
4631 &sw_stat->xgxs_txgxs_err_cnt))
4632 goto reset;
4633 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4634 &bar0->xgxs_txgxs_err_reg,
4635 &sw_stat->xgxs_txgxs_err_cnt);
4636 }
4637
4638 val64 = readq(&bar0->rxdma_int_status);
4639 if (val64 & RXDMA_INT_RC_INT_M) {
4640 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4641 RC_FTC_ECC_DB_ERR |
4642 RC_PRCn_SM_ERR_ALARM |
4643 RC_FTC_SM_ERR_ALARM,
4644 &bar0->rc_err_reg,
4645 &sw_stat->rc_err_cnt))
4646 goto reset;
4647 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4648 RC_FTC_ECC_SG_ERR |
4649 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4650 &sw_stat->rc_err_cnt);
4651 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4652 PRC_PCI_AB_WR_Rn |
4653 PRC_PCI_AB_F_WR_Rn,
4654 &bar0->prc_pcix_err_reg,
4655 &sw_stat->prc_pcix_err_cnt))
4656 goto reset;
4657 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4658 PRC_PCI_DP_WR_Rn |
4659 PRC_PCI_DP_F_WR_Rn,
4660 &bar0->prc_pcix_err_reg,
4661 &sw_stat->prc_pcix_err_cnt);
4662 }
4663
4664 if (val64 & RXDMA_INT_RPA_INT_M) {
4665 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4666 &bar0->rpa_err_reg,
4667 &sw_stat->rpa_err_cnt))
4668 goto reset;
4669 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4670 &bar0->rpa_err_reg,
4671 &sw_stat->rpa_err_cnt);
4672 }
4673
4674 if (val64 & RXDMA_INT_RDA_INT_M) {
4675 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4676 RDA_FRM_ECC_DB_N_AERR |
4677 RDA_SM1_ERR_ALARM |
4678 RDA_SM0_ERR_ALARM |
4679 RDA_RXD_ECC_DB_SERR,
4680 &bar0->rda_err_reg,
4681 &sw_stat->rda_err_cnt))
4682 goto reset;
4683 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4684 RDA_FRM_ECC_SG_ERR |
4685 RDA_MISC_ERR |
4686 RDA_PCIX_ERR,
4687 &bar0->rda_err_reg,
4688 &sw_stat->rda_err_cnt);
4689 }
4690
4691 if (val64 & RXDMA_INT_RTI_INT_M) {
4692 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4693 &bar0->rti_err_reg,
4694 &sw_stat->rti_err_cnt))
4695 goto reset;
4696 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4697 &bar0->rti_err_reg,
4698 &sw_stat->rti_err_cnt);
4699 }
4700
4701 val64 = readq(&bar0->mac_int_status);
4702 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4703 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4704 &bar0->mac_rmac_err_reg,
4705 &sw_stat->mac_rmac_err_cnt))
4706 goto reset;
4707 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4708 RMAC_SINGLE_ECC_ERR |
4709 RMAC_DOUBLE_ECC_ERR,
4710 &bar0->mac_rmac_err_reg,
4711 &sw_stat->mac_rmac_err_cnt);
4712 }
4713
4714 val64 = readq(&bar0->xgxs_int_status);
4715 if (val64 & XGXS_INT_STATUS_RXGXS) {
4716 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4717 &bar0->xgxs_rxgxs_err_reg,
4718 &sw_stat->xgxs_rxgxs_err_cnt))
4719 goto reset;
4720 }
4721
4722 val64 = readq(&bar0->mc_int_status);
4723 if (val64 & MC_INT_STATUS_MC_INT) {
4724 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4725 &bar0->mc_err_reg,
4726 &sw_stat->mc_err_cnt))
4727 goto reset;
4728
4729 /* Handling Ecc errors */
4730 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4731 writeq(val64, &bar0->mc_err_reg);
4732 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4733 sw_stat->double_ecc_errs++;
4734 if (sp->device_type != XFRAME_II_DEVICE) {
4735 /*
4736 * Reset XframeI only if critical error
4737 */
4738 if (val64 &
4739 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4740 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4741 goto reset;
4742 }
4743 } else
4744 sw_stat->single_ecc_errs++;
4745 }
4746 }
4747 return;
4748
4749 reset:
4750 s2io_stop_all_tx_queue(sp);
4751 schedule_work(&sp->rst_timer_task);
4752 sw_stat->soft_reset_cnt++;
4753 }
4754
4755 /**
4756 * s2io_isr - ISR handler of the device .
4757 * @irq: the irq of the device.
4758 * @dev_id: a void pointer to the dev structure of the NIC.
4759 * Description: This function is the ISR handler of the device. It
4760 * identifies the reason for the interrupt and calls the relevant
4761 * service routines. As a contongency measure, this ISR allocates the
4762 * recv buffers, if their numbers are below the panic value which is
4763 * presently set to 25% of the original number of rcv buffers allocated.
4764 * Return value:
4765 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4766 * IRQ_NONE: will be returned if interrupt is not from our device
4767 */
4768 static irqreturn_t s2io_isr(int irq, void *dev_id)
4769 {
4770 struct net_device *dev = (struct net_device *)dev_id;
4771 struct s2io_nic *sp = netdev_priv(dev);
4772 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4773 int i;
4774 u64 reason = 0;
4775 struct mac_info *mac_control;
4776 struct config_param *config;
4777
4778 /* Pretend we handled any irq's from a disconnected card */
4779 if (pci_channel_offline(sp->pdev))
4780 return IRQ_NONE;
4781
4782 if (!is_s2io_card_up(sp))
4783 return IRQ_NONE;
4784
4785 config = &sp->config;
4786 mac_control = &sp->mac_control;
4787
4788 /*
4789 * Identify the cause for interrupt and call the appropriate
4790 * interrupt handler. Causes for the interrupt could be;
4791 * 1. Rx of packet.
4792 * 2. Tx complete.
4793 * 3. Link down.
4794 */
4795 reason = readq(&bar0->general_int_status);
4796
4797 if (unlikely(reason == S2IO_MINUS_ONE))
4798 return IRQ_HANDLED; /* Nothing much can be done. Get out */
4799
4800 if (reason &
4801 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4802 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4803
4804 if (config->napi) {
4805 if (reason & GEN_INTR_RXTRAFFIC) {
4806 napi_schedule(&sp->napi);
4807 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4808 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4809 readl(&bar0->rx_traffic_int);
4810 }
4811 } else {
4812 /*
4813 * rx_traffic_int reg is an R1 register, writing all 1's
4814 * will ensure that the actual interrupt causing bit
4815 * get's cleared and hence a read can be avoided.
4816 */
4817 if (reason & GEN_INTR_RXTRAFFIC)
4818 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4819
4820 for (i = 0; i < config->rx_ring_num; i++) {
4821 struct ring_info *ring = &mac_control->rings[i];
4822
4823 rx_intr_handler(ring, 0);
4824 }
4825 }
4826
4827 /*
4828 * tx_traffic_int reg is an R1 register, writing all 1's
4829 * will ensure that the actual interrupt causing bit get's
4830 * cleared and hence a read can be avoided.
4831 */
4832 if (reason & GEN_INTR_TXTRAFFIC)
4833 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4834
4835 for (i = 0; i < config->tx_fifo_num; i++)
4836 tx_intr_handler(&mac_control->fifos[i]);
4837
4838 if (reason & GEN_INTR_TXPIC)
4839 s2io_txpic_intr_handle(sp);
4840
4841 /*
4842 * Reallocate the buffers from the interrupt handler itself.
4843 */
4844 if (!config->napi) {
4845 for (i = 0; i < config->rx_ring_num; i++) {
4846 struct ring_info *ring = &mac_control->rings[i];
4847
4848 s2io_chk_rx_buffers(sp, ring);
4849 }
4850 }
4851 writeq(sp->general_int_mask, &bar0->general_int_mask);
4852 readl(&bar0->general_int_status);
4853
4854 return IRQ_HANDLED;
4855
4856 } else if (!reason) {
4857 /* The interrupt was not raised by us */
4858 return IRQ_NONE;
4859 }
4860
4861 return IRQ_HANDLED;
4862 }
4863
4864 /**
4865 * s2io_updt_stats -
4866 */
4867 static void s2io_updt_stats(struct s2io_nic *sp)
4868 {
4869 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4870 u64 val64;
4871 int cnt = 0;
4872
4873 if (is_s2io_card_up(sp)) {
4874 /* Apprx 30us on a 133 MHz bus */
4875 val64 = SET_UPDT_CLICKS(10) |
4876 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4877 writeq(val64, &bar0->stat_cfg);
4878 do {
4879 udelay(100);
4880 val64 = readq(&bar0->stat_cfg);
4881 if (!(val64 & s2BIT(0)))
4882 break;
4883 cnt++;
4884 if (cnt == 5)
4885 break; /* Updt failed */
4886 } while (1);
4887 }
4888 }
4889
4890 /**
4891 * s2io_get_stats - Updates the device statistics structure.
4892 * @dev : pointer to the device structure.
4893 * Description:
4894 * This function updates the device statistics structure in the s2io_nic
4895 * structure and returns a pointer to the same.
4896 * Return value:
4897 * pointer to the updated net_device_stats structure.
4898 */
4899 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4900 {
4901 struct s2io_nic *sp = netdev_priv(dev);
4902 struct mac_info *mac_control = &sp->mac_control;
4903 struct stat_block *stats = mac_control->stats_info;
4904 u64 delta;
4905
4906 /* Configure Stats for immediate updt */
4907 s2io_updt_stats(sp);
4908
4909 /* A device reset will cause the on-adapter statistics to be zero'ed.
4910 * This can be done while running by changing the MTU. To prevent the
4911 * system from having the stats zero'ed, the driver keeps a copy of the
4912 * last update to the system (which is also zero'ed on reset). This
4913 * enables the driver to accurately know the delta between the last
4914 * update and the current update.
4915 */
4916 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4917 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4918 sp->stats.rx_packets += delta;
4919 dev->stats.rx_packets += delta;
4920
4921 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4922 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4923 sp->stats.tx_packets += delta;
4924 dev->stats.tx_packets += delta;
4925
4926 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4927 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4928 sp->stats.rx_bytes += delta;
4929 dev->stats.rx_bytes += delta;
4930
4931 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4932 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4933 sp->stats.tx_bytes += delta;
4934 dev->stats.tx_bytes += delta;
4935
4936 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4937 sp->stats.rx_errors += delta;
4938 dev->stats.rx_errors += delta;
4939
4940 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4941 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4942 sp->stats.tx_errors += delta;
4943 dev->stats.tx_errors += delta;
4944
4945 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4946 sp->stats.rx_dropped += delta;
4947 dev->stats.rx_dropped += delta;
4948
4949 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4950 sp->stats.tx_dropped += delta;
4951 dev->stats.tx_dropped += delta;
4952
4953 /* The adapter MAC interprets pause frames as multicast packets, but
4954 * does not pass them up. This erroneously increases the multicast
4955 * packet count and needs to be deducted when the multicast frame count
4956 * is queried.
4957 */
4958 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4959 le32_to_cpu(stats->rmac_vld_mcst_frms);
4960 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4961 delta -= sp->stats.multicast;
4962 sp->stats.multicast += delta;
4963 dev->stats.multicast += delta;
4964
4965 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4966 le32_to_cpu(stats->rmac_usized_frms)) +
4967 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4968 sp->stats.rx_length_errors += delta;
4969 dev->stats.rx_length_errors += delta;
4970
4971 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4972 sp->stats.rx_crc_errors += delta;
4973 dev->stats.rx_crc_errors += delta;
4974
4975 return &dev->stats;
4976 }
4977
4978 /**
4979 * s2io_set_multicast - entry point for multicast address enable/disable.
4980 * @dev : pointer to the device structure
4981 * Description:
4982 * This function is a driver entry point which gets called by the kernel
4983 * whenever multicast addresses must be enabled/disabled. This also gets
4984 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4985 * determine, if multicast address must be enabled or if promiscuous mode
4986 * is to be disabled etc.
4987 * Return value:
4988 * void.
4989 */
4990
4991 static void s2io_set_multicast(struct net_device *dev)
4992 {
4993 int i, j, prev_cnt;
4994 struct netdev_hw_addr *ha;
4995 struct s2io_nic *sp = netdev_priv(dev);
4996 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4997 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4998 0xfeffffffffffULL;
4999 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
5000 void __iomem *add;
5001 struct config_param *config = &sp->config;
5002
5003 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
5004 /* Enable all Multicast addresses */
5005 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
5006 &bar0->rmac_addr_data0_mem);
5007 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
5008 &bar0->rmac_addr_data1_mem);
5009 val64 = RMAC_ADDR_CMD_MEM_WE |
5010 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5011 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
5012 writeq(val64, &bar0->rmac_addr_cmd_mem);
5013 /* Wait till command completes */
5014 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5015 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5016 S2IO_BIT_RESET);
5017
5018 sp->m_cast_flg = 1;
5019 sp->all_multi_pos = config->max_mc_addr - 1;
5020 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
5021 /* Disable all Multicast addresses */
5022 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5023 &bar0->rmac_addr_data0_mem);
5024 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
5025 &bar0->rmac_addr_data1_mem);
5026 val64 = RMAC_ADDR_CMD_MEM_WE |
5027 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5028 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
5029 writeq(val64, &bar0->rmac_addr_cmd_mem);
5030 /* Wait till command completes */
5031 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5032 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5033 S2IO_BIT_RESET);
5034
5035 sp->m_cast_flg = 0;
5036 sp->all_multi_pos = 0;
5037 }
5038
5039 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
5040 /* Put the NIC into promiscuous mode */
5041 add = &bar0->mac_cfg;
5042 val64 = readq(&bar0->mac_cfg);
5043 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5044
5045 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5046 writel((u32)val64, add);
5047 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5048 writel((u32) (val64 >> 32), (add + 4));
5049
5050 if (vlan_tag_strip != 1) {
5051 val64 = readq(&bar0->rx_pa_cfg);
5052 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5053 writeq(val64, &bar0->rx_pa_cfg);
5054 sp->vlan_strip_flag = 0;
5055 }
5056
5057 val64 = readq(&bar0->mac_cfg);
5058 sp->promisc_flg = 1;
5059 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5060 dev->name);
5061 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5062 /* Remove the NIC from promiscuous mode */
5063 add = &bar0->mac_cfg;
5064 val64 = readq(&bar0->mac_cfg);
5065 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5066
5067 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5068 writel((u32)val64, add);
5069 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5070 writel((u32) (val64 >> 32), (add + 4));
5071
5072 if (vlan_tag_strip != 0) {
5073 val64 = readq(&bar0->rx_pa_cfg);
5074 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5075 writeq(val64, &bar0->rx_pa_cfg);
5076 sp->vlan_strip_flag = 1;
5077 }
5078
5079 val64 = readq(&bar0->mac_cfg);
5080 sp->promisc_flg = 0;
5081 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
5082 }
5083
5084 /* Update individual M_CAST address list */
5085 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5086 if (netdev_mc_count(dev) >
5087 (config->max_mc_addr - config->max_mac_addr)) {
5088 DBG_PRINT(ERR_DBG,
5089 "%s: No more Rx filters can be added - "
5090 "please enable ALL_MULTI instead\n",
5091 dev->name);
5092 return;
5093 }
5094
5095 prev_cnt = sp->mc_addr_count;
5096 sp->mc_addr_count = netdev_mc_count(dev);
5097
5098 /* Clear out the previous list of Mc in the H/W. */
5099 for (i = 0; i < prev_cnt; i++) {
5100 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5101 &bar0->rmac_addr_data0_mem);
5102 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5103 &bar0->rmac_addr_data1_mem);
5104 val64 = RMAC_ADDR_CMD_MEM_WE |
5105 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5106 RMAC_ADDR_CMD_MEM_OFFSET
5107 (config->mc_start_offset + i);
5108 writeq(val64, &bar0->rmac_addr_cmd_mem);
5109
5110 /* Wait for command completes */
5111 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5112 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5113 S2IO_BIT_RESET)) {
5114 DBG_PRINT(ERR_DBG,
5115 "%s: Adding Multicasts failed\n",
5116 dev->name);
5117 return;
5118 }
5119 }
5120
5121 /* Create the new Rx filter list and update the same in H/W. */
5122 i = 0;
5123 netdev_for_each_mc_addr(ha, dev) {
5124 mac_addr = 0;
5125 for (j = 0; j < ETH_ALEN; j++) {
5126 mac_addr |= ha->addr[j];
5127 mac_addr <<= 8;
5128 }
5129 mac_addr >>= 8;
5130 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5131 &bar0->rmac_addr_data0_mem);
5132 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5133 &bar0->rmac_addr_data1_mem);
5134 val64 = RMAC_ADDR_CMD_MEM_WE |
5135 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5136 RMAC_ADDR_CMD_MEM_OFFSET
5137 (i + config->mc_start_offset);
5138 writeq(val64, &bar0->rmac_addr_cmd_mem);
5139
5140 /* Wait for command completes */
5141 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5142 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5143 S2IO_BIT_RESET)) {
5144 DBG_PRINT(ERR_DBG,
5145 "%s: Adding Multicasts failed\n",
5146 dev->name);
5147 return;
5148 }
5149 i++;
5150 }
5151 }
5152 }
5153
5154 /* read from CAM unicast & multicast addresses and store it in
5155 * def_mac_addr structure
5156 */
5157 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5158 {
5159 int offset;
5160 u64 mac_addr = 0x0;
5161 struct config_param *config = &sp->config;
5162
5163 /* store unicast & multicast mac addresses */
5164 for (offset = 0; offset < config->max_mc_addr; offset++) {
5165 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5166 /* if read fails disable the entry */
5167 if (mac_addr == FAILURE)
5168 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5169 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5170 }
5171 }
5172
5173 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5174 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5175 {
5176 int offset;
5177 struct config_param *config = &sp->config;
5178 /* restore unicast mac address */
5179 for (offset = 0; offset < config->max_mac_addr; offset++)
5180 do_s2io_prog_unicast(sp->dev,
5181 sp->def_mac_addr[offset].mac_addr);
5182
5183 /* restore multicast mac address */
5184 for (offset = config->mc_start_offset;
5185 offset < config->max_mc_addr; offset++)
5186 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5187 }
5188
5189 /* add a multicast MAC address to CAM */
5190 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5191 {
5192 int i;
5193 u64 mac_addr = 0;
5194 struct config_param *config = &sp->config;
5195
5196 for (i = 0; i < ETH_ALEN; i++) {
5197 mac_addr <<= 8;
5198 mac_addr |= addr[i];
5199 }
5200 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5201 return SUCCESS;
5202
5203 /* check if the multicast mac already preset in CAM */
5204 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5205 u64 tmp64;
5206 tmp64 = do_s2io_read_unicast_mc(sp, i);
5207 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5208 break;
5209
5210 if (tmp64 == mac_addr)
5211 return SUCCESS;
5212 }
5213 if (i == config->max_mc_addr) {
5214 DBG_PRINT(ERR_DBG,
5215 "CAM full no space left for multicast MAC\n");
5216 return FAILURE;
5217 }
5218 /* Update the internal structure with this new mac address */
5219 do_s2io_copy_mac_addr(sp, i, mac_addr);
5220
5221 return do_s2io_add_mac(sp, mac_addr, i);
5222 }
5223
5224 /* add MAC address to CAM */
5225 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5226 {
5227 u64 val64;
5228 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5229
5230 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5231 &bar0->rmac_addr_data0_mem);
5232
5233 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5234 RMAC_ADDR_CMD_MEM_OFFSET(off);
5235 writeq(val64, &bar0->rmac_addr_cmd_mem);
5236
5237 /* Wait till command completes */
5238 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5239 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5240 S2IO_BIT_RESET)) {
5241 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5242 return FAILURE;
5243 }
5244 return SUCCESS;
5245 }
5246 /* deletes a specified unicast/multicast mac entry from CAM */
5247 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5248 {
5249 int offset;
5250 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5251 struct config_param *config = &sp->config;
5252
5253 for (offset = 1;
5254 offset < config->max_mc_addr; offset++) {
5255 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5256 if (tmp64 == addr) {
5257 /* disable the entry by writing 0xffffffffffffULL */
5258 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5259 return FAILURE;
5260 /* store the new mac list from CAM */
5261 do_s2io_store_unicast_mc(sp);
5262 return SUCCESS;
5263 }
5264 }
5265 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5266 (unsigned long long)addr);
5267 return FAILURE;
5268 }
5269
5270 /* read mac entries from CAM */
5271 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5272 {
5273 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5274 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5275
5276 /* read mac addr */
5277 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5278 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5279 writeq(val64, &bar0->rmac_addr_cmd_mem);
5280
5281 /* Wait till command completes */
5282 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5283 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5284 S2IO_BIT_RESET)) {
5285 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5286 return FAILURE;
5287 }
5288 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5289
5290 return tmp64 >> 16;
5291 }
5292
5293 /**
5294 * s2io_set_mac_addr driver entry point
5295 */
5296
5297 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5298 {
5299 struct sockaddr *addr = p;
5300
5301 if (!is_valid_ether_addr(addr->sa_data))
5302 return -EINVAL;
5303
5304 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5305
5306 /* store the MAC address in CAM */
5307 return do_s2io_prog_unicast(dev, dev->dev_addr);
5308 }
5309 /**
5310 * do_s2io_prog_unicast - Programs the Xframe mac address
5311 * @dev : pointer to the device structure.
5312 * @addr: a uchar pointer to the new mac address which is to be set.
5313 * Description : This procedure will program the Xframe to receive
5314 * frames with new Mac Address
5315 * Return value: SUCCESS on success and an appropriate (-)ve integer
5316 * as defined in errno.h file on failure.
5317 */
5318
5319 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5320 {
5321 struct s2io_nic *sp = netdev_priv(dev);
5322 register u64 mac_addr = 0, perm_addr = 0;
5323 int i;
5324 u64 tmp64;
5325 struct config_param *config = &sp->config;
5326
5327 /*
5328 * Set the new MAC address as the new unicast filter and reflect this
5329 * change on the device address registered with the OS. It will be
5330 * at offset 0.
5331 */
5332 for (i = 0; i < ETH_ALEN; i++) {
5333 mac_addr <<= 8;
5334 mac_addr |= addr[i];
5335 perm_addr <<= 8;
5336 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5337 }
5338
5339 /* check if the dev_addr is different than perm_addr */
5340 if (mac_addr == perm_addr)
5341 return SUCCESS;
5342
5343 /* check if the mac already preset in CAM */
5344 for (i = 1; i < config->max_mac_addr; i++) {
5345 tmp64 = do_s2io_read_unicast_mc(sp, i);
5346 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5347 break;
5348
5349 if (tmp64 == mac_addr) {
5350 DBG_PRINT(INFO_DBG,
5351 "MAC addr:0x%llx already present in CAM\n",
5352 (unsigned long long)mac_addr);
5353 return SUCCESS;
5354 }
5355 }
5356 if (i == config->max_mac_addr) {
5357 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5358 return FAILURE;
5359 }
5360 /* Update the internal structure with this new mac address */
5361 do_s2io_copy_mac_addr(sp, i, mac_addr);
5362
5363 return do_s2io_add_mac(sp, mac_addr, i);
5364 }
5365
5366 /**
5367 * s2io_ethtool_sset - Sets different link parameters.
5368 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5369 * @info: pointer to the structure with parameters given by ethtool to set
5370 * link information.
5371 * Description:
5372 * The function sets different link parameters provided by the user onto
5373 * the NIC.
5374 * Return value:
5375 * 0 on success.
5376 */
5377
5378 static int s2io_ethtool_sset(struct net_device *dev,
5379 struct ethtool_cmd *info)
5380 {
5381 struct s2io_nic *sp = netdev_priv(dev);
5382 if ((info->autoneg == AUTONEG_ENABLE) ||
5383 (info->speed != SPEED_10000) ||
5384 (info->duplex != DUPLEX_FULL))
5385 return -EINVAL;
5386 else {
5387 s2io_close(sp->dev);
5388 s2io_open(sp->dev);
5389 }
5390
5391 return 0;
5392 }
5393
5394 /**
5395 * s2io_ethtol_gset - Return link specific information.
5396 * @sp : private member of the device structure, pointer to the
5397 * s2io_nic structure.
5398 * @info : pointer to the structure with parameters given by ethtool
5399 * to return link information.
5400 * Description:
5401 * Returns link specific information like speed, duplex etc.. to ethtool.
5402 * Return value :
5403 * return 0 on success.
5404 */
5405
5406 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5407 {
5408 struct s2io_nic *sp = netdev_priv(dev);
5409 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5410 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5411 info->port = PORT_FIBRE;
5412
5413 /* info->transceiver */
5414 info->transceiver = XCVR_EXTERNAL;
5415
5416 if (netif_carrier_ok(sp->dev)) {
5417 info->speed = 10000;
5418 info->duplex = DUPLEX_FULL;
5419 } else {
5420 info->speed = -1;
5421 info->duplex = -1;
5422 }
5423
5424 info->autoneg = AUTONEG_DISABLE;
5425 return 0;
5426 }
5427
5428 /**
5429 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5430 * @sp : private member of the device structure, which is a pointer to the
5431 * s2io_nic structure.
5432 * @info : pointer to the structure with parameters given by ethtool to
5433 * return driver information.
5434 * Description:
5435 * Returns driver specefic information like name, version etc.. to ethtool.
5436 * Return value:
5437 * void
5438 */
5439
5440 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5441 struct ethtool_drvinfo *info)
5442 {
5443 struct s2io_nic *sp = netdev_priv(dev);
5444
5445 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5446 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5447 strncpy(info->fw_version, "", sizeof(info->fw_version));
5448 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5449 info->regdump_len = XENA_REG_SPACE;
5450 info->eedump_len = XENA_EEPROM_SPACE;
5451 }
5452
5453 /**
5454 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5455 * @sp: private member of the device structure, which is a pointer to the
5456 * s2io_nic structure.
5457 * @regs : pointer to the structure with parameters given by ethtool for
5458 * dumping the registers.
5459 * @reg_space: The input argumnet into which all the registers are dumped.
5460 * Description:
5461 * Dumps the entire register space of xFrame NIC into the user given
5462 * buffer area.
5463 * Return value :
5464 * void .
5465 */
5466
5467 static void s2io_ethtool_gregs(struct net_device *dev,
5468 struct ethtool_regs *regs, void *space)
5469 {
5470 int i;
5471 u64 reg;
5472 u8 *reg_space = (u8 *)space;
5473 struct s2io_nic *sp = netdev_priv(dev);
5474
5475 regs->len = XENA_REG_SPACE;
5476 regs->version = sp->pdev->subsystem_device;
5477
5478 for (i = 0; i < regs->len; i += 8) {
5479 reg = readq(sp->bar0 + i);
5480 memcpy((reg_space + i), &reg, 8);
5481 }
5482 }
5483
5484 /*
5485 * s2io_set_led - control NIC led
5486 */
5487 static void s2io_set_led(struct s2io_nic *sp, bool on)
5488 {
5489 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5490 u16 subid = sp->pdev->subsystem_device;
5491 u64 val64;
5492
5493 if ((sp->device_type == XFRAME_II_DEVICE) ||
5494 ((subid & 0xFF) >= 0x07)) {
5495 val64 = readq(&bar0->gpio_control);
5496 if (on)
5497 val64 |= GPIO_CTRL_GPIO_0;
5498 else
5499 val64 &= ~GPIO_CTRL_GPIO_0;
5500
5501 writeq(val64, &bar0->gpio_control);
5502 } else {
5503 val64 = readq(&bar0->adapter_control);
5504 if (on)
5505 val64 |= ADAPTER_LED_ON;
5506 else
5507 val64 &= ~ADAPTER_LED_ON;
5508
5509 writeq(val64, &bar0->adapter_control);
5510 }
5511
5512 }
5513
5514 /**
5515 * s2io_ethtool_set_led - To physically identify the nic on the system.
5516 * @dev : network device
5517 * @state: led setting
5518 *
5519 * Description: Used to physically identify the NIC on the system.
5520 * The Link LED will blink for a time specified by the user for
5521 * identification.
5522 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5523 * identification is possible only if it's link is up.
5524 */
5525
5526 static int s2io_ethtool_set_led(struct net_device *dev,
5527 enum ethtool_phys_id_state state)
5528 {
5529 struct s2io_nic *sp = netdev_priv(dev);
5530 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5531 u16 subid = sp->pdev->subsystem_device;
5532
5533 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5534 u64 val64 = readq(&bar0->adapter_control);
5535 if (!(val64 & ADAPTER_CNTL_EN)) {
5536 pr_err("Adapter Link down, cannot blink LED\n");
5537 return -EAGAIN;
5538 }
5539 }
5540
5541 switch (state) {
5542 case ETHTOOL_ID_ACTIVE:
5543 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5544 return 1; /* cycle on/off once per second */
5545
5546 case ETHTOOL_ID_ON:
5547 s2io_set_led(sp, true);
5548 break;
5549
5550 case ETHTOOL_ID_OFF:
5551 s2io_set_led(sp, false);
5552 break;
5553
5554 case ETHTOOL_ID_INACTIVE:
5555 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5556 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5557 }
5558
5559 return 0;
5560 }
5561
5562 static void s2io_ethtool_gringparam(struct net_device *dev,
5563 struct ethtool_ringparam *ering)
5564 {
5565 struct s2io_nic *sp = netdev_priv(dev);
5566 int i, tx_desc_count = 0, rx_desc_count = 0;
5567
5568 if (sp->rxd_mode == RXD_MODE_1) {
5569 ering->rx_max_pending = MAX_RX_DESC_1;
5570 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5571 } else {
5572 ering->rx_max_pending = MAX_RX_DESC_2;
5573 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5574 }
5575
5576 ering->rx_mini_max_pending = 0;
5577 ering->tx_max_pending = MAX_TX_DESC;
5578
5579 for (i = 0; i < sp->config.rx_ring_num; i++)
5580 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5581 ering->rx_pending = rx_desc_count;
5582 ering->rx_jumbo_pending = rx_desc_count;
5583 ering->rx_mini_pending = 0;
5584
5585 for (i = 0; i < sp->config.tx_fifo_num; i++)
5586 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5587 ering->tx_pending = tx_desc_count;
5588 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5589 }
5590
5591 /**
5592 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5593 * @sp : private member of the device structure, which is a pointer to the
5594 * s2io_nic structure.
5595 * @ep : pointer to the structure with pause parameters given by ethtool.
5596 * Description:
5597 * Returns the Pause frame generation and reception capability of the NIC.
5598 * Return value:
5599 * void
5600 */
5601 static void s2io_ethtool_getpause_data(struct net_device *dev,
5602 struct ethtool_pauseparam *ep)
5603 {
5604 u64 val64;
5605 struct s2io_nic *sp = netdev_priv(dev);
5606 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5607
5608 val64 = readq(&bar0->rmac_pause_cfg);
5609 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5610 ep->tx_pause = true;
5611 if (val64 & RMAC_PAUSE_RX_ENABLE)
5612 ep->rx_pause = true;
5613 ep->autoneg = false;
5614 }
5615
5616 /**
5617 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5618 * @sp : private member of the device structure, which is a pointer to the
5619 * s2io_nic structure.
5620 * @ep : pointer to the structure with pause parameters given by ethtool.
5621 * Description:
5622 * It can be used to set or reset Pause frame generation or reception
5623 * support of the NIC.
5624 * Return value:
5625 * int, returns 0 on Success
5626 */
5627
5628 static int s2io_ethtool_setpause_data(struct net_device *dev,
5629 struct ethtool_pauseparam *ep)
5630 {
5631 u64 val64;
5632 struct s2io_nic *sp = netdev_priv(dev);
5633 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5634
5635 val64 = readq(&bar0->rmac_pause_cfg);
5636 if (ep->tx_pause)
5637 val64 |= RMAC_PAUSE_GEN_ENABLE;
5638 else
5639 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5640 if (ep->rx_pause)
5641 val64 |= RMAC_PAUSE_RX_ENABLE;
5642 else
5643 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5644 writeq(val64, &bar0->rmac_pause_cfg);
5645 return 0;
5646 }
5647
5648 /**
5649 * read_eeprom - reads 4 bytes of data from user given offset.
5650 * @sp : private member of the device structure, which is a pointer to the
5651 * s2io_nic structure.
5652 * @off : offset at which the data must be written
5653 * @data : Its an output parameter where the data read at the given
5654 * offset is stored.
5655 * Description:
5656 * Will read 4 bytes of data from the user given offset and return the
5657 * read data.
5658 * NOTE: Will allow to read only part of the EEPROM visible through the
5659 * I2C bus.
5660 * Return value:
5661 * -1 on failure and 0 on success.
5662 */
5663
5664 #define S2IO_DEV_ID 5
5665 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5666 {
5667 int ret = -1;
5668 u32 exit_cnt = 0;
5669 u64 val64;
5670 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5671
5672 if (sp->device_type == XFRAME_I_DEVICE) {
5673 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5674 I2C_CONTROL_ADDR(off) |
5675 I2C_CONTROL_BYTE_CNT(0x3) |
5676 I2C_CONTROL_READ |
5677 I2C_CONTROL_CNTL_START;
5678 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5679
5680 while (exit_cnt < 5) {
5681 val64 = readq(&bar0->i2c_control);
5682 if (I2C_CONTROL_CNTL_END(val64)) {
5683 *data = I2C_CONTROL_GET_DATA(val64);
5684 ret = 0;
5685 break;
5686 }
5687 msleep(50);
5688 exit_cnt++;
5689 }
5690 }
5691
5692 if (sp->device_type == XFRAME_II_DEVICE) {
5693 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5694 SPI_CONTROL_BYTECNT(0x3) |
5695 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5696 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5697 val64 |= SPI_CONTROL_REQ;
5698 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5699 while (exit_cnt < 5) {
5700 val64 = readq(&bar0->spi_control);
5701 if (val64 & SPI_CONTROL_NACK) {
5702 ret = 1;
5703 break;
5704 } else if (val64 & SPI_CONTROL_DONE) {
5705 *data = readq(&bar0->spi_data);
5706 *data &= 0xffffff;
5707 ret = 0;
5708 break;
5709 }
5710 msleep(50);
5711 exit_cnt++;
5712 }
5713 }
5714 return ret;
5715 }
5716
5717 /**
5718 * write_eeprom - actually writes the relevant part of the data value.
5719 * @sp : private member of the device structure, which is a pointer to the
5720 * s2io_nic structure.
5721 * @off : offset at which the data must be written
5722 * @data : The data that is to be written
5723 * @cnt : Number of bytes of the data that are actually to be written into
5724 * the Eeprom. (max of 3)
5725 * Description:
5726 * Actually writes the relevant part of the data value into the Eeprom
5727 * through the I2C bus.
5728 * Return value:
5729 * 0 on success, -1 on failure.
5730 */
5731
5732 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5733 {
5734 int exit_cnt = 0, ret = -1;
5735 u64 val64;
5736 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5737
5738 if (sp->device_type == XFRAME_I_DEVICE) {
5739 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5740 I2C_CONTROL_ADDR(off) |
5741 I2C_CONTROL_BYTE_CNT(cnt) |
5742 I2C_CONTROL_SET_DATA((u32)data) |
5743 I2C_CONTROL_CNTL_START;
5744 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5745
5746 while (exit_cnt < 5) {
5747 val64 = readq(&bar0->i2c_control);
5748 if (I2C_CONTROL_CNTL_END(val64)) {
5749 if (!(val64 & I2C_CONTROL_NACK))
5750 ret = 0;
5751 break;
5752 }
5753 msleep(50);
5754 exit_cnt++;
5755 }
5756 }
5757
5758 if (sp->device_type == XFRAME_II_DEVICE) {
5759 int write_cnt = (cnt == 8) ? 0 : cnt;
5760 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5761
5762 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5763 SPI_CONTROL_BYTECNT(write_cnt) |
5764 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5765 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5766 val64 |= SPI_CONTROL_REQ;
5767 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5768 while (exit_cnt < 5) {
5769 val64 = readq(&bar0->spi_control);
5770 if (val64 & SPI_CONTROL_NACK) {
5771 ret = 1;
5772 break;
5773 } else if (val64 & SPI_CONTROL_DONE) {
5774 ret = 0;
5775 break;
5776 }
5777 msleep(50);
5778 exit_cnt++;
5779 }
5780 }
5781 return ret;
5782 }
5783 static void s2io_vpd_read(struct s2io_nic *nic)
5784 {
5785 u8 *vpd_data;
5786 u8 data;
5787 int i = 0, cnt, len, fail = 0;
5788 int vpd_addr = 0x80;
5789 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5790
5791 if (nic->device_type == XFRAME_II_DEVICE) {
5792 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5793 vpd_addr = 0x80;
5794 } else {
5795 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5796 vpd_addr = 0x50;
5797 }
5798 strcpy(nic->serial_num, "NOT AVAILABLE");
5799
5800 vpd_data = kmalloc(256, GFP_KERNEL);
5801 if (!vpd_data) {
5802 swstats->mem_alloc_fail_cnt++;
5803 return;
5804 }
5805 swstats->mem_allocated += 256;
5806
5807 for (i = 0; i < 256; i += 4) {
5808 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5809 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5810 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5811 for (cnt = 0; cnt < 5; cnt++) {
5812 msleep(2);
5813 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5814 if (data == 0x80)
5815 break;
5816 }
5817 if (cnt >= 5) {
5818 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5819 fail = 1;
5820 break;
5821 }
5822 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5823 (u32 *)&vpd_data[i]);
5824 }
5825
5826 if (!fail) {
5827 /* read serial number of adapter */
5828 for (cnt = 0; cnt < 252; cnt++) {
5829 if ((vpd_data[cnt] == 'S') &&
5830 (vpd_data[cnt+1] == 'N')) {
5831 len = vpd_data[cnt+2];
5832 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5833 memcpy(nic->serial_num,
5834 &vpd_data[cnt + 3],
5835 len);
5836 memset(nic->serial_num+len,
5837 0,
5838 VPD_STRING_LEN-len);
5839 break;
5840 }
5841 }
5842 }
5843 }
5844
5845 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5846 len = vpd_data[1];
5847 memcpy(nic->product_name, &vpd_data[3], len);
5848 nic->product_name[len] = 0;
5849 }
5850 kfree(vpd_data);
5851 swstats->mem_freed += 256;
5852 }
5853
5854 /**
5855 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5856 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5857 * @eeprom : pointer to the user level structure provided by ethtool,
5858 * containing all relevant information.
5859 * @data_buf : user defined value to be written into Eeprom.
5860 * Description: Reads the values stored in the Eeprom at given offset
5861 * for a given length. Stores these values int the input argument data
5862 * buffer 'data_buf' and returns these to the caller (ethtool.)
5863 * Return value:
5864 * int 0 on success
5865 */
5866
5867 static int s2io_ethtool_geeprom(struct net_device *dev,
5868 struct ethtool_eeprom *eeprom, u8 * data_buf)
5869 {
5870 u32 i, valid;
5871 u64 data;
5872 struct s2io_nic *sp = netdev_priv(dev);
5873
5874 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5875
5876 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5877 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5878
5879 for (i = 0; i < eeprom->len; i += 4) {
5880 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5881 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5882 return -EFAULT;
5883 }
5884 valid = INV(data);
5885 memcpy((data_buf + i), &valid, 4);
5886 }
5887 return 0;
5888 }
5889
5890 /**
5891 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5892 * @sp : private member of the device structure, which is a pointer to the
5893 * s2io_nic structure.
5894 * @eeprom : pointer to the user level structure provided by ethtool,
5895 * containing all relevant information.
5896 * @data_buf ; user defined value to be written into Eeprom.
5897 * Description:
5898 * Tries to write the user provided value in the Eeprom, at the offset
5899 * given by the user.
5900 * Return value:
5901 * 0 on success, -EFAULT on failure.
5902 */
5903
5904 static int s2io_ethtool_seeprom(struct net_device *dev,
5905 struct ethtool_eeprom *eeprom,
5906 u8 *data_buf)
5907 {
5908 int len = eeprom->len, cnt = 0;
5909 u64 valid = 0, data;
5910 struct s2io_nic *sp = netdev_priv(dev);
5911
5912 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5913 DBG_PRINT(ERR_DBG,
5914 "ETHTOOL_WRITE_EEPROM Err: "
5915 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5916 (sp->pdev->vendor | (sp->pdev->device << 16)),
5917 eeprom->magic);
5918 return -EFAULT;
5919 }
5920
5921 while (len) {
5922 data = (u32)data_buf[cnt] & 0x000000FF;
5923 if (data)
5924 valid = (u32)(data << 24);
5925 else
5926 valid = data;
5927
5928 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5929 DBG_PRINT(ERR_DBG,
5930 "ETHTOOL_WRITE_EEPROM Err: "
5931 "Cannot write into the specified offset\n");
5932 return -EFAULT;
5933 }
5934 cnt++;
5935 len--;
5936 }
5937
5938 return 0;
5939 }
5940
5941 /**
5942 * s2io_register_test - reads and writes into all clock domains.
5943 * @sp : private member of the device structure, which is a pointer to the
5944 * s2io_nic structure.
5945 * @data : variable that returns the result of each of the test conducted b
5946 * by the driver.
5947 * Description:
5948 * Read and write into all clock domains. The NIC has 3 clock domains,
5949 * see that registers in all the three regions are accessible.
5950 * Return value:
5951 * 0 on success.
5952 */
5953
5954 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5955 {
5956 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5957 u64 val64 = 0, exp_val;
5958 int fail = 0;
5959
5960 val64 = readq(&bar0->pif_rd_swapper_fb);
5961 if (val64 != 0x123456789abcdefULL) {
5962 fail = 1;
5963 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5964 }
5965
5966 val64 = readq(&bar0->rmac_pause_cfg);
5967 if (val64 != 0xc000ffff00000000ULL) {
5968 fail = 1;
5969 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5970 }
5971
5972 val64 = readq(&bar0->rx_queue_cfg);
5973 if (sp->device_type == XFRAME_II_DEVICE)
5974 exp_val = 0x0404040404040404ULL;
5975 else
5976 exp_val = 0x0808080808080808ULL;
5977 if (val64 != exp_val) {
5978 fail = 1;
5979 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5980 }
5981
5982 val64 = readq(&bar0->xgxs_efifo_cfg);
5983 if (val64 != 0x000000001923141EULL) {
5984 fail = 1;
5985 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5986 }
5987
5988 val64 = 0x5A5A5A5A5A5A5A5AULL;
5989 writeq(val64, &bar0->xmsi_data);
5990 val64 = readq(&bar0->xmsi_data);
5991 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5992 fail = 1;
5993 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5994 }
5995
5996 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5997 writeq(val64, &bar0->xmsi_data);
5998 val64 = readq(&bar0->xmsi_data);
5999 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
6000 fail = 1;
6001 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
6002 }
6003
6004 *data = fail;
6005 return fail;
6006 }
6007
6008 /**
6009 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
6010 * @sp : private member of the device structure, which is a pointer to the
6011 * s2io_nic structure.
6012 * @data:variable that returns the result of each of the test conducted by
6013 * the driver.
6014 * Description:
6015 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
6016 * register.
6017 * Return value:
6018 * 0 on success.
6019 */
6020
6021 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
6022 {
6023 int fail = 0;
6024 u64 ret_data, org_4F0, org_7F0;
6025 u8 saved_4F0 = 0, saved_7F0 = 0;
6026 struct net_device *dev = sp->dev;
6027
6028 /* Test Write Error at offset 0 */
6029 /* Note that SPI interface allows write access to all areas
6030 * of EEPROM. Hence doing all negative testing only for Xframe I.
6031 */
6032 if (sp->device_type == XFRAME_I_DEVICE)
6033 if (!write_eeprom(sp, 0, 0, 3))
6034 fail = 1;
6035
6036 /* Save current values at offsets 0x4F0 and 0x7F0 */
6037 if (!read_eeprom(sp, 0x4F0, &org_4F0))
6038 saved_4F0 = 1;
6039 if (!read_eeprom(sp, 0x7F0, &org_7F0))
6040 saved_7F0 = 1;
6041
6042 /* Test Write at offset 4f0 */
6043 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
6044 fail = 1;
6045 if (read_eeprom(sp, 0x4F0, &ret_data))
6046 fail = 1;
6047
6048 if (ret_data != 0x012345) {
6049 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
6050 "Data written %llx Data read %llx\n",
6051 dev->name, (unsigned long long)0x12345,
6052 (unsigned long long)ret_data);
6053 fail = 1;
6054 }
6055
6056 /* Reset the EEPROM data go FFFF */
6057 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6058
6059 /* Test Write Request Error at offset 0x7c */
6060 if (sp->device_type == XFRAME_I_DEVICE)
6061 if (!write_eeprom(sp, 0x07C, 0, 3))
6062 fail = 1;
6063
6064 /* Test Write Request at offset 0x7f0 */
6065 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6066 fail = 1;
6067 if (read_eeprom(sp, 0x7F0, &ret_data))
6068 fail = 1;
6069
6070 if (ret_data != 0x012345) {
6071 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6072 "Data written %llx Data read %llx\n",
6073 dev->name, (unsigned long long)0x12345,
6074 (unsigned long long)ret_data);
6075 fail = 1;
6076 }
6077
6078 /* Reset the EEPROM data go FFFF */
6079 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6080
6081 if (sp->device_type == XFRAME_I_DEVICE) {
6082 /* Test Write Error at offset 0x80 */
6083 if (!write_eeprom(sp, 0x080, 0, 3))
6084 fail = 1;
6085
6086 /* Test Write Error at offset 0xfc */
6087 if (!write_eeprom(sp, 0x0FC, 0, 3))
6088 fail = 1;
6089
6090 /* Test Write Error at offset 0x100 */
6091 if (!write_eeprom(sp, 0x100, 0, 3))
6092 fail = 1;
6093
6094 /* Test Write Error at offset 4ec */
6095 if (!write_eeprom(sp, 0x4EC, 0, 3))
6096 fail = 1;
6097 }
6098
6099 /* Restore values at offsets 0x4F0 and 0x7F0 */
6100 if (saved_4F0)
6101 write_eeprom(sp, 0x4F0, org_4F0, 3);
6102 if (saved_7F0)
6103 write_eeprom(sp, 0x7F0, org_7F0, 3);
6104
6105 *data = fail;
6106 return fail;
6107 }
6108
6109 /**
6110 * s2io_bist_test - invokes the MemBist test of the card .
6111 * @sp : private member of the device structure, which is a pointer to the
6112 * s2io_nic structure.
6113 * @data:variable that returns the result of each of the test conducted by
6114 * the driver.
6115 * Description:
6116 * This invokes the MemBist test of the card. We give around
6117 * 2 secs time for the Test to complete. If it's still not complete
6118 * within this peiod, we consider that the test failed.
6119 * Return value:
6120 * 0 on success and -1 on failure.
6121 */
6122
6123 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6124 {
6125 u8 bist = 0;
6126 int cnt = 0, ret = -1;
6127
6128 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6129 bist |= PCI_BIST_START;
6130 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6131
6132 while (cnt < 20) {
6133 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6134 if (!(bist & PCI_BIST_START)) {
6135 *data = (bist & PCI_BIST_CODE_MASK);
6136 ret = 0;
6137 break;
6138 }
6139 msleep(100);
6140 cnt++;
6141 }
6142
6143 return ret;
6144 }
6145
6146 /**
6147 * s2io-link_test - verifies the link state of the nic
6148 * @sp ; private member of the device structure, which is a pointer to the
6149 * s2io_nic structure.
6150 * @data: variable that returns the result of each of the test conducted by
6151 * the driver.
6152 * Description:
6153 * The function verifies the link state of the NIC and updates the input
6154 * argument 'data' appropriately.
6155 * Return value:
6156 * 0 on success.
6157 */
6158
6159 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6160 {
6161 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6162 u64 val64;
6163
6164 val64 = readq(&bar0->adapter_status);
6165 if (!(LINK_IS_UP(val64)))
6166 *data = 1;
6167 else
6168 *data = 0;
6169
6170 return *data;
6171 }
6172
6173 /**
6174 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6175 * @sp - private member of the device structure, which is a pointer to the
6176 * s2io_nic structure.
6177 * @data - variable that returns the result of each of the test
6178 * conducted by the driver.
6179 * Description:
6180 * This is one of the offline test that tests the read and write
6181 * access to the RldRam chip on the NIC.
6182 * Return value:
6183 * 0 on success.
6184 */
6185
6186 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6187 {
6188 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6189 u64 val64;
6190 int cnt, iteration = 0, test_fail = 0;
6191
6192 val64 = readq(&bar0->adapter_control);
6193 val64 &= ~ADAPTER_ECC_EN;
6194 writeq(val64, &bar0->adapter_control);
6195
6196 val64 = readq(&bar0->mc_rldram_test_ctrl);
6197 val64 |= MC_RLDRAM_TEST_MODE;
6198 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6199
6200 val64 = readq(&bar0->mc_rldram_mrs);
6201 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6202 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6203
6204 val64 |= MC_RLDRAM_MRS_ENABLE;
6205 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6206
6207 while (iteration < 2) {
6208 val64 = 0x55555555aaaa0000ULL;
6209 if (iteration == 1)
6210 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6211 writeq(val64, &bar0->mc_rldram_test_d0);
6212
6213 val64 = 0xaaaa5a5555550000ULL;
6214 if (iteration == 1)
6215 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6216 writeq(val64, &bar0->mc_rldram_test_d1);
6217
6218 val64 = 0x55aaaaaaaa5a0000ULL;
6219 if (iteration == 1)
6220 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6221 writeq(val64, &bar0->mc_rldram_test_d2);
6222
6223 val64 = (u64) (0x0000003ffffe0100ULL);
6224 writeq(val64, &bar0->mc_rldram_test_add);
6225
6226 val64 = MC_RLDRAM_TEST_MODE |
6227 MC_RLDRAM_TEST_WRITE |
6228 MC_RLDRAM_TEST_GO;
6229 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6230
6231 for (cnt = 0; cnt < 5; cnt++) {
6232 val64 = readq(&bar0->mc_rldram_test_ctrl);
6233 if (val64 & MC_RLDRAM_TEST_DONE)
6234 break;
6235 msleep(200);
6236 }
6237
6238 if (cnt == 5)
6239 break;
6240
6241 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6242 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6243
6244 for (cnt = 0; cnt < 5; cnt++) {
6245 val64 = readq(&bar0->mc_rldram_test_ctrl);
6246 if (val64 & MC_RLDRAM_TEST_DONE)
6247 break;
6248 msleep(500);
6249 }
6250
6251 if (cnt == 5)
6252 break;
6253
6254 val64 = readq(&bar0->mc_rldram_test_ctrl);
6255 if (!(val64 & MC_RLDRAM_TEST_PASS))
6256 test_fail = 1;
6257
6258 iteration++;
6259 }
6260
6261 *data = test_fail;
6262
6263 /* Bring the adapter out of test mode */
6264 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6265
6266 return test_fail;
6267 }
6268
6269 /**
6270 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6271 * @sp : private member of the device structure, which is a pointer to the
6272 * s2io_nic structure.
6273 * @ethtest : pointer to a ethtool command specific structure that will be
6274 * returned to the user.
6275 * @data : variable that returns the result of each of the test
6276 * conducted by the driver.
6277 * Description:
6278 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6279 * the health of the card.
6280 * Return value:
6281 * void
6282 */
6283
6284 static void s2io_ethtool_test(struct net_device *dev,
6285 struct ethtool_test *ethtest,
6286 uint64_t *data)
6287 {
6288 struct s2io_nic *sp = netdev_priv(dev);
6289 int orig_state = netif_running(sp->dev);
6290
6291 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6292 /* Offline Tests. */
6293 if (orig_state)
6294 s2io_close(sp->dev);
6295
6296 if (s2io_register_test(sp, &data[0]))
6297 ethtest->flags |= ETH_TEST_FL_FAILED;
6298
6299 s2io_reset(sp);
6300
6301 if (s2io_rldram_test(sp, &data[3]))
6302 ethtest->flags |= ETH_TEST_FL_FAILED;
6303
6304 s2io_reset(sp);
6305
6306 if (s2io_eeprom_test(sp, &data[1]))
6307 ethtest->flags |= ETH_TEST_FL_FAILED;
6308
6309 if (s2io_bist_test(sp, &data[4]))
6310 ethtest->flags |= ETH_TEST_FL_FAILED;
6311
6312 if (orig_state)
6313 s2io_open(sp->dev);
6314
6315 data[2] = 0;
6316 } else {
6317 /* Online Tests. */
6318 if (!orig_state) {
6319 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6320 dev->name);
6321 data[0] = -1;
6322 data[1] = -1;
6323 data[2] = -1;
6324 data[3] = -1;
6325 data[4] = -1;
6326 }
6327
6328 if (s2io_link_test(sp, &data[2]))
6329 ethtest->flags |= ETH_TEST_FL_FAILED;
6330
6331 data[0] = 0;
6332 data[1] = 0;
6333 data[3] = 0;
6334 data[4] = 0;
6335 }
6336 }
6337
6338 static void s2io_get_ethtool_stats(struct net_device *dev,
6339 struct ethtool_stats *estats,
6340 u64 *tmp_stats)
6341 {
6342 int i = 0, k;
6343 struct s2io_nic *sp = netdev_priv(dev);
6344 struct stat_block *stats = sp->mac_control.stats_info;
6345 struct swStat *swstats = &stats->sw_stat;
6346 struct xpakStat *xstats = &stats->xpak_stat;
6347
6348 s2io_updt_stats(sp);
6349 tmp_stats[i++] =
6350 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6351 le32_to_cpu(stats->tmac_frms);
6352 tmp_stats[i++] =
6353 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6354 le32_to_cpu(stats->tmac_data_octets);
6355 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6356 tmp_stats[i++] =
6357 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6358 le32_to_cpu(stats->tmac_mcst_frms);
6359 tmp_stats[i++] =
6360 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6361 le32_to_cpu(stats->tmac_bcst_frms);
6362 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6363 tmp_stats[i++] =
6364 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6365 le32_to_cpu(stats->tmac_ttl_octets);
6366 tmp_stats[i++] =
6367 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6368 le32_to_cpu(stats->tmac_ucst_frms);
6369 tmp_stats[i++] =
6370 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6371 le32_to_cpu(stats->tmac_nucst_frms);
6372 tmp_stats[i++] =
6373 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6374 le32_to_cpu(stats->tmac_any_err_frms);
6375 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6376 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6377 tmp_stats[i++] =
6378 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6379 le32_to_cpu(stats->tmac_vld_ip);
6380 tmp_stats[i++] =
6381 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6382 le32_to_cpu(stats->tmac_drop_ip);
6383 tmp_stats[i++] =
6384 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6385 le32_to_cpu(stats->tmac_icmp);
6386 tmp_stats[i++] =
6387 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6388 le32_to_cpu(stats->tmac_rst_tcp);
6389 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6390 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6391 le32_to_cpu(stats->tmac_udp);
6392 tmp_stats[i++] =
6393 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6394 le32_to_cpu(stats->rmac_vld_frms);
6395 tmp_stats[i++] =
6396 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6397 le32_to_cpu(stats->rmac_data_octets);
6398 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6399 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6400 tmp_stats[i++] =
6401 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6402 le32_to_cpu(stats->rmac_vld_mcst_frms);
6403 tmp_stats[i++] =
6404 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6405 le32_to_cpu(stats->rmac_vld_bcst_frms);
6406 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6407 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6408 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6409 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6410 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6411 tmp_stats[i++] =
6412 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6413 le32_to_cpu(stats->rmac_ttl_octets);
6414 tmp_stats[i++] =
6415 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6416 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
6417 tmp_stats[i++] =
6418 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6419 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6420 tmp_stats[i++] =
6421 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6422 le32_to_cpu(stats->rmac_discarded_frms);
6423 tmp_stats[i++] =
6424 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6425 << 32 | le32_to_cpu(stats->rmac_drop_events);
6426 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6427 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6428 tmp_stats[i++] =
6429 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6430 le32_to_cpu(stats->rmac_usized_frms);
6431 tmp_stats[i++] =
6432 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6433 le32_to_cpu(stats->rmac_osized_frms);
6434 tmp_stats[i++] =
6435 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6436 le32_to_cpu(stats->rmac_frag_frms);
6437 tmp_stats[i++] =
6438 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6439 le32_to_cpu(stats->rmac_jabber_frms);
6440 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6441 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6442 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6443 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6444 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6445 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6446 tmp_stats[i++] =
6447 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6448 le32_to_cpu(stats->rmac_ip);
6449 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6450 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6451 tmp_stats[i++] =
6452 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6453 le32_to_cpu(stats->rmac_drop_ip);
6454 tmp_stats[i++] =
6455 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6456 le32_to_cpu(stats->rmac_icmp);
6457 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6458 tmp_stats[i++] =
6459 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6460 le32_to_cpu(stats->rmac_udp);
6461 tmp_stats[i++] =
6462 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6463 le32_to_cpu(stats->rmac_err_drp_udp);
6464 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6465 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6466 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6467 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6468 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6469 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6470 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6471 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6472 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6473 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6474 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6475 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6476 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6477 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6478 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6479 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6480 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6481 tmp_stats[i++] =
6482 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6483 le32_to_cpu(stats->rmac_pause_cnt);
6484 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6485 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6486 tmp_stats[i++] =
6487 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6488 le32_to_cpu(stats->rmac_accepted_ip);
6489 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6490 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6491 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6492 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6493 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6494 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6495 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6496 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6497 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6498 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6499 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6500 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6501 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6502 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6503 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6504 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6505 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6506 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6507 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6508
6509 /* Enhanced statistics exist only for Hercules */
6510 if (sp->device_type == XFRAME_II_DEVICE) {
6511 tmp_stats[i++] =
6512 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6513 tmp_stats[i++] =
6514 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6515 tmp_stats[i++] =
6516 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6517 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6518 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6519 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6520 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6521 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6522 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6523 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6524 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6525 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6526 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6527 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6528 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6529 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6530 }
6531
6532 tmp_stats[i++] = 0;
6533 tmp_stats[i++] = swstats->single_ecc_errs;
6534 tmp_stats[i++] = swstats->double_ecc_errs;
6535 tmp_stats[i++] = swstats->parity_err_cnt;
6536 tmp_stats[i++] = swstats->serious_err_cnt;
6537 tmp_stats[i++] = swstats->soft_reset_cnt;
6538 tmp_stats[i++] = swstats->fifo_full_cnt;
6539 for (k = 0; k < MAX_RX_RINGS; k++)
6540 tmp_stats[i++] = swstats->ring_full_cnt[k];
6541 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6542 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6543 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6544 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6545 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6546 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6547 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6548 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6549 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6550 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6551 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6552 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6553 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6554 tmp_stats[i++] = swstats->sending_both;
6555 tmp_stats[i++] = swstats->outof_sequence_pkts;
6556 tmp_stats[i++] = swstats->flush_max_pkts;
6557 if (swstats->num_aggregations) {
6558 u64 tmp = swstats->sum_avg_pkts_aggregated;
6559 int count = 0;
6560 /*
6561 * Since 64-bit divide does not work on all platforms,
6562 * do repeated subtraction.
6563 */
6564 while (tmp >= swstats->num_aggregations) {
6565 tmp -= swstats->num_aggregations;
6566 count++;
6567 }
6568 tmp_stats[i++] = count;
6569 } else
6570 tmp_stats[i++] = 0;
6571 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6572 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6573 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6574 tmp_stats[i++] = swstats->mem_allocated;
6575 tmp_stats[i++] = swstats->mem_freed;
6576 tmp_stats[i++] = swstats->link_up_cnt;
6577 tmp_stats[i++] = swstats->link_down_cnt;
6578 tmp_stats[i++] = swstats->link_up_time;
6579 tmp_stats[i++] = swstats->link_down_time;
6580
6581 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6582 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6583 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6584 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6585 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6586
6587 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6588 tmp_stats[i++] = swstats->rx_abort_cnt;
6589 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6590 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6591 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6592 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6593 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6594 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6595 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6596 tmp_stats[i++] = swstats->tda_err_cnt;
6597 tmp_stats[i++] = swstats->pfc_err_cnt;
6598 tmp_stats[i++] = swstats->pcc_err_cnt;
6599 tmp_stats[i++] = swstats->tti_err_cnt;
6600 tmp_stats[i++] = swstats->tpa_err_cnt;
6601 tmp_stats[i++] = swstats->sm_err_cnt;
6602 tmp_stats[i++] = swstats->lso_err_cnt;
6603 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6604 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6605 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6606 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6607 tmp_stats[i++] = swstats->rc_err_cnt;
6608 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6609 tmp_stats[i++] = swstats->rpa_err_cnt;
6610 tmp_stats[i++] = swstats->rda_err_cnt;
6611 tmp_stats[i++] = swstats->rti_err_cnt;
6612 tmp_stats[i++] = swstats->mc_err_cnt;
6613 }
6614
6615 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6616 {
6617 return XENA_REG_SPACE;
6618 }
6619
6620
6621 static int s2io_get_eeprom_len(struct net_device *dev)
6622 {
6623 return XENA_EEPROM_SPACE;
6624 }
6625
6626 static int s2io_get_sset_count(struct net_device *dev, int sset)
6627 {
6628 struct s2io_nic *sp = netdev_priv(dev);
6629
6630 switch (sset) {
6631 case ETH_SS_TEST:
6632 return S2IO_TEST_LEN;
6633 case ETH_SS_STATS:
6634 switch (sp->device_type) {
6635 case XFRAME_I_DEVICE:
6636 return XFRAME_I_STAT_LEN;
6637 case XFRAME_II_DEVICE:
6638 return XFRAME_II_STAT_LEN;
6639 default:
6640 return 0;
6641 }
6642 default:
6643 return -EOPNOTSUPP;
6644 }
6645 }
6646
6647 static void s2io_ethtool_get_strings(struct net_device *dev,
6648 u32 stringset, u8 *data)
6649 {
6650 int stat_size = 0;
6651 struct s2io_nic *sp = netdev_priv(dev);
6652
6653 switch (stringset) {
6654 case ETH_SS_TEST:
6655 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6656 break;
6657 case ETH_SS_STATS:
6658 stat_size = sizeof(ethtool_xena_stats_keys);
6659 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6660 if (sp->device_type == XFRAME_II_DEVICE) {
6661 memcpy(data + stat_size,
6662 &ethtool_enhanced_stats_keys,
6663 sizeof(ethtool_enhanced_stats_keys));
6664 stat_size += sizeof(ethtool_enhanced_stats_keys);
6665 }
6666
6667 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6668 sizeof(ethtool_driver_stats_keys));
6669 }
6670 }
6671
6672 static int s2io_set_features(struct net_device *dev, u32 features)
6673 {
6674 struct s2io_nic *sp = netdev_priv(dev);
6675 u32 changed = (features ^ dev->features) & NETIF_F_LRO;
6676
6677 if (changed && netif_running(dev)) {
6678 int rc;
6679
6680 s2io_stop_all_tx_queue(sp);
6681 s2io_card_down(sp);
6682 dev->features = features;
6683 rc = s2io_card_up(sp);
6684 if (rc)
6685 s2io_reset(sp);
6686 else
6687 s2io_start_all_tx_queue(sp);
6688
6689 return rc ? rc : 1;
6690 }
6691
6692 return 0;
6693 }
6694
6695 static const struct ethtool_ops netdev_ethtool_ops = {
6696 .get_settings = s2io_ethtool_gset,
6697 .set_settings = s2io_ethtool_sset,
6698 .get_drvinfo = s2io_ethtool_gdrvinfo,
6699 .get_regs_len = s2io_ethtool_get_regs_len,
6700 .get_regs = s2io_ethtool_gregs,
6701 .get_link = ethtool_op_get_link,
6702 .get_eeprom_len = s2io_get_eeprom_len,
6703 .get_eeprom = s2io_ethtool_geeprom,
6704 .set_eeprom = s2io_ethtool_seeprom,
6705 .get_ringparam = s2io_ethtool_gringparam,
6706 .get_pauseparam = s2io_ethtool_getpause_data,
6707 .set_pauseparam = s2io_ethtool_setpause_data,
6708 .self_test = s2io_ethtool_test,
6709 .get_strings = s2io_ethtool_get_strings,
6710 .set_phys_id = s2io_ethtool_set_led,
6711 .get_ethtool_stats = s2io_get_ethtool_stats,
6712 .get_sset_count = s2io_get_sset_count,
6713 };
6714
6715 /**
6716 * s2io_ioctl - Entry point for the Ioctl
6717 * @dev : Device pointer.
6718 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6719 * a proprietary structure used to pass information to the driver.
6720 * @cmd : This is used to distinguish between the different commands that
6721 * can be passed to the IOCTL functions.
6722 * Description:
6723 * Currently there are no special functionality supported in IOCTL, hence
6724 * function always return EOPNOTSUPPORTED
6725 */
6726
6727 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6728 {
6729 return -EOPNOTSUPP;
6730 }
6731
6732 /**
6733 * s2io_change_mtu - entry point to change MTU size for the device.
6734 * @dev : device pointer.
6735 * @new_mtu : the new MTU size for the device.
6736 * Description: A driver entry point to change MTU size for the device.
6737 * Before changing the MTU the device must be stopped.
6738 * Return value:
6739 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6740 * file on failure.
6741 */
6742
6743 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6744 {
6745 struct s2io_nic *sp = netdev_priv(dev);
6746 int ret = 0;
6747
6748 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6749 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
6750 return -EPERM;
6751 }
6752
6753 dev->mtu = new_mtu;
6754 if (netif_running(dev)) {
6755 s2io_stop_all_tx_queue(sp);
6756 s2io_card_down(sp);
6757 ret = s2io_card_up(sp);
6758 if (ret) {
6759 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6760 __func__);
6761 return ret;
6762 }
6763 s2io_wake_all_tx_queue(sp);
6764 } else { /* Device is down */
6765 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6766 u64 val64 = new_mtu;
6767
6768 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6769 }
6770
6771 return ret;
6772 }
6773
6774 /**
6775 * s2io_set_link - Set the LInk status
6776 * @data: long pointer to device private structue
6777 * Description: Sets the link status for the adapter
6778 */
6779
6780 static void s2io_set_link(struct work_struct *work)
6781 {
6782 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6783 set_link_task);
6784 struct net_device *dev = nic->dev;
6785 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6786 register u64 val64;
6787 u16 subid;
6788
6789 rtnl_lock();
6790
6791 if (!netif_running(dev))
6792 goto out_unlock;
6793
6794 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6795 /* The card is being reset, no point doing anything */
6796 goto out_unlock;
6797 }
6798
6799 subid = nic->pdev->subsystem_device;
6800 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6801 /*
6802 * Allow a small delay for the NICs self initiated
6803 * cleanup to complete.
6804 */
6805 msleep(100);
6806 }
6807
6808 val64 = readq(&bar0->adapter_status);
6809 if (LINK_IS_UP(val64)) {
6810 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6811 if (verify_xena_quiescence(nic)) {
6812 val64 = readq(&bar0->adapter_control);
6813 val64 |= ADAPTER_CNTL_EN;
6814 writeq(val64, &bar0->adapter_control);
6815 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6816 nic->device_type, subid)) {
6817 val64 = readq(&bar0->gpio_control);
6818 val64 |= GPIO_CTRL_GPIO_0;
6819 writeq(val64, &bar0->gpio_control);
6820 val64 = readq(&bar0->gpio_control);
6821 } else {
6822 val64 |= ADAPTER_LED_ON;
6823 writeq(val64, &bar0->adapter_control);
6824 }
6825 nic->device_enabled_once = true;
6826 } else {
6827 DBG_PRINT(ERR_DBG,
6828 "%s: Error: device is not Quiescent\n",
6829 dev->name);
6830 s2io_stop_all_tx_queue(nic);
6831 }
6832 }
6833 val64 = readq(&bar0->adapter_control);
6834 val64 |= ADAPTER_LED_ON;
6835 writeq(val64, &bar0->adapter_control);
6836 s2io_link(nic, LINK_UP);
6837 } else {
6838 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6839 subid)) {
6840 val64 = readq(&bar0->gpio_control);
6841 val64 &= ~GPIO_CTRL_GPIO_0;
6842 writeq(val64, &bar0->gpio_control);
6843 val64 = readq(&bar0->gpio_control);
6844 }
6845 /* turn off LED */
6846 val64 = readq(&bar0->adapter_control);
6847 val64 = val64 & (~ADAPTER_LED_ON);
6848 writeq(val64, &bar0->adapter_control);
6849 s2io_link(nic, LINK_DOWN);
6850 }
6851 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6852
6853 out_unlock:
6854 rtnl_unlock();
6855 }
6856
6857 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6858 struct buffAdd *ba,
6859 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6860 u64 *temp2, int size)
6861 {
6862 struct net_device *dev = sp->dev;
6863 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6864
6865 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6866 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6867 /* allocate skb */
6868 if (*skb) {
6869 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6870 /*
6871 * As Rx frame are not going to be processed,
6872 * using same mapped address for the Rxd
6873 * buffer pointer
6874 */
6875 rxdp1->Buffer0_ptr = *temp0;
6876 } else {
6877 *skb = dev_alloc_skb(size);
6878 if (!(*skb)) {
6879 DBG_PRINT(INFO_DBG,
6880 "%s: Out of memory to allocate %s\n",
6881 dev->name, "1 buf mode SKBs");
6882 stats->mem_alloc_fail_cnt++;
6883 return -ENOMEM ;
6884 }
6885 stats->mem_allocated += (*skb)->truesize;
6886 /* storing the mapped addr in a temp variable
6887 * such it will be used for next rxd whose
6888 * Host Control is NULL
6889 */
6890 rxdp1->Buffer0_ptr = *temp0 =
6891 pci_map_single(sp->pdev, (*skb)->data,
6892 size - NET_IP_ALIGN,
6893 PCI_DMA_FROMDEVICE);
6894 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6895 goto memalloc_failed;
6896 rxdp->Host_Control = (unsigned long) (*skb);
6897 }
6898 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6899 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6900 /* Two buffer Mode */
6901 if (*skb) {
6902 rxdp3->Buffer2_ptr = *temp2;
6903 rxdp3->Buffer0_ptr = *temp0;
6904 rxdp3->Buffer1_ptr = *temp1;
6905 } else {
6906 *skb = dev_alloc_skb(size);
6907 if (!(*skb)) {
6908 DBG_PRINT(INFO_DBG,
6909 "%s: Out of memory to allocate %s\n",
6910 dev->name,
6911 "2 buf mode SKBs");
6912 stats->mem_alloc_fail_cnt++;
6913 return -ENOMEM;
6914 }
6915 stats->mem_allocated += (*skb)->truesize;
6916 rxdp3->Buffer2_ptr = *temp2 =
6917 pci_map_single(sp->pdev, (*skb)->data,
6918 dev->mtu + 4,
6919 PCI_DMA_FROMDEVICE);
6920 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6921 goto memalloc_failed;
6922 rxdp3->Buffer0_ptr = *temp0 =
6923 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6924 PCI_DMA_FROMDEVICE);
6925 if (pci_dma_mapping_error(sp->pdev,
6926 rxdp3->Buffer0_ptr)) {
6927 pci_unmap_single(sp->pdev,
6928 (dma_addr_t)rxdp3->Buffer2_ptr,
6929 dev->mtu + 4,
6930 PCI_DMA_FROMDEVICE);
6931 goto memalloc_failed;
6932 }
6933 rxdp->Host_Control = (unsigned long) (*skb);
6934
6935 /* Buffer-1 will be dummy buffer not used */
6936 rxdp3->Buffer1_ptr = *temp1 =
6937 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6938 PCI_DMA_FROMDEVICE);
6939 if (pci_dma_mapping_error(sp->pdev,
6940 rxdp3->Buffer1_ptr)) {
6941 pci_unmap_single(sp->pdev,
6942 (dma_addr_t)rxdp3->Buffer0_ptr,
6943 BUF0_LEN, PCI_DMA_FROMDEVICE);
6944 pci_unmap_single(sp->pdev,
6945 (dma_addr_t)rxdp3->Buffer2_ptr,
6946 dev->mtu + 4,
6947 PCI_DMA_FROMDEVICE);
6948 goto memalloc_failed;
6949 }
6950 }
6951 }
6952 return 0;
6953
6954 memalloc_failed:
6955 stats->pci_map_fail_cnt++;
6956 stats->mem_freed += (*skb)->truesize;
6957 dev_kfree_skb(*skb);
6958 return -ENOMEM;
6959 }
6960
6961 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6962 int size)
6963 {
6964 struct net_device *dev = sp->dev;
6965 if (sp->rxd_mode == RXD_MODE_1) {
6966 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6967 } else if (sp->rxd_mode == RXD_MODE_3B) {
6968 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6969 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6970 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6971 }
6972 }
6973
6974 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6975 {
6976 int i, j, k, blk_cnt = 0, size;
6977 struct config_param *config = &sp->config;
6978 struct mac_info *mac_control = &sp->mac_control;
6979 struct net_device *dev = sp->dev;
6980 struct RxD_t *rxdp = NULL;
6981 struct sk_buff *skb = NULL;
6982 struct buffAdd *ba = NULL;
6983 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6984
6985 /* Calculate the size based on ring mode */
6986 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6987 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6988 if (sp->rxd_mode == RXD_MODE_1)
6989 size += NET_IP_ALIGN;
6990 else if (sp->rxd_mode == RXD_MODE_3B)
6991 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6992
6993 for (i = 0; i < config->rx_ring_num; i++) {
6994 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6995 struct ring_info *ring = &mac_control->rings[i];
6996
6997 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6998
6999 for (j = 0; j < blk_cnt; j++) {
7000 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
7001 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
7002 if (sp->rxd_mode == RXD_MODE_3B)
7003 ba = &ring->ba[j][k];
7004 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
7005 (u64 *)&temp0_64,
7006 (u64 *)&temp1_64,
7007 (u64 *)&temp2_64,
7008 size) == -ENOMEM) {
7009 return 0;
7010 }
7011
7012 set_rxd_buffer_size(sp, rxdp, size);
7013 wmb();
7014 /* flip the Ownership bit to Hardware */
7015 rxdp->Control_1 |= RXD_OWN_XENA;
7016 }
7017 }
7018 }
7019 return 0;
7020
7021 }
7022
7023 static int s2io_add_isr(struct s2io_nic *sp)
7024 {
7025 int ret = 0;
7026 struct net_device *dev = sp->dev;
7027 int err = 0;
7028
7029 if (sp->config.intr_type == MSI_X)
7030 ret = s2io_enable_msi_x(sp);
7031 if (ret) {
7032 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7033 sp->config.intr_type = INTA;
7034 }
7035
7036 /*
7037 * Store the values of the MSIX table in
7038 * the struct s2io_nic structure
7039 */
7040 store_xmsi_data(sp);
7041
7042 /* After proper initialization of H/W, register ISR */
7043 if (sp->config.intr_type == MSI_X) {
7044 int i, msix_rx_cnt = 0;
7045
7046 for (i = 0; i < sp->num_entries; i++) {
7047 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7048 if (sp->s2io_entries[i].type ==
7049 MSIX_RING_TYPE) {
7050 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7051 dev->name, i);
7052 err = request_irq(sp->entries[i].vector,
7053 s2io_msix_ring_handle,
7054 0,
7055 sp->desc[i],
7056 sp->s2io_entries[i].arg);
7057 } else if (sp->s2io_entries[i].type ==
7058 MSIX_ALARM_TYPE) {
7059 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7060 dev->name, i);
7061 err = request_irq(sp->entries[i].vector,
7062 s2io_msix_fifo_handle,
7063 0,
7064 sp->desc[i],
7065 sp->s2io_entries[i].arg);
7066
7067 }
7068 /* if either data or addr is zero print it. */
7069 if (!(sp->msix_info[i].addr &&
7070 sp->msix_info[i].data)) {
7071 DBG_PRINT(ERR_DBG,
7072 "%s @Addr:0x%llx Data:0x%llx\n",
7073 sp->desc[i],
7074 (unsigned long long)
7075 sp->msix_info[i].addr,
7076 (unsigned long long)
7077 ntohl(sp->msix_info[i].data));
7078 } else
7079 msix_rx_cnt++;
7080 if (err) {
7081 remove_msix_isr(sp);
7082
7083 DBG_PRINT(ERR_DBG,
7084 "%s:MSI-X-%d registration "
7085 "failed\n", dev->name, i);
7086
7087 DBG_PRINT(ERR_DBG,
7088 "%s: Defaulting to INTA\n",
7089 dev->name);
7090 sp->config.intr_type = INTA;
7091 break;
7092 }
7093 sp->s2io_entries[i].in_use =
7094 MSIX_REGISTERED_SUCCESS;
7095 }
7096 }
7097 if (!err) {
7098 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
7099 DBG_PRINT(INFO_DBG,
7100 "MSI-X-TX entries enabled through alarm vector\n");
7101 }
7102 }
7103 if (sp->config.intr_type == INTA) {
7104 err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
7105 sp->name, dev);
7106 if (err) {
7107 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7108 dev->name);
7109 return -1;
7110 }
7111 }
7112 return 0;
7113 }
7114
7115 static void s2io_rem_isr(struct s2io_nic *sp)
7116 {
7117 if (sp->config.intr_type == MSI_X)
7118 remove_msix_isr(sp);
7119 else
7120 remove_inta_isr(sp);
7121 }
7122
7123 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7124 {
7125 int cnt = 0;
7126 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7127 register u64 val64 = 0;
7128 struct config_param *config;
7129 config = &sp->config;
7130
7131 if (!is_s2io_card_up(sp))
7132 return;
7133
7134 del_timer_sync(&sp->alarm_timer);
7135 /* If s2io_set_link task is executing, wait till it completes. */
7136 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7137 msleep(50);
7138 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7139
7140 /* Disable napi */
7141 if (sp->config.napi) {
7142 int off = 0;
7143 if (config->intr_type == MSI_X) {
7144 for (; off < sp->config.rx_ring_num; off++)
7145 napi_disable(&sp->mac_control.rings[off].napi);
7146 }
7147 else
7148 napi_disable(&sp->napi);
7149 }
7150
7151 /* disable Tx and Rx traffic on the NIC */
7152 if (do_io)
7153 stop_nic(sp);
7154
7155 s2io_rem_isr(sp);
7156
7157 /* stop the tx queue, indicate link down */
7158 s2io_link(sp, LINK_DOWN);
7159
7160 /* Check if the device is Quiescent and then Reset the NIC */
7161 while (do_io) {
7162 /* As per the HW requirement we need to replenish the
7163 * receive buffer to avoid the ring bump. Since there is
7164 * no intention of processing the Rx frame at this pointwe are
7165 * just settting the ownership bit of rxd in Each Rx
7166 * ring to HW and set the appropriate buffer size
7167 * based on the ring mode
7168 */
7169 rxd_owner_bit_reset(sp);
7170
7171 val64 = readq(&bar0->adapter_status);
7172 if (verify_xena_quiescence(sp)) {
7173 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7174 break;
7175 }
7176
7177 msleep(50);
7178 cnt++;
7179 if (cnt == 10) {
7180 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7181 "adapter status reads 0x%llx\n",
7182 (unsigned long long)val64);
7183 break;
7184 }
7185 }
7186 if (do_io)
7187 s2io_reset(sp);
7188
7189 /* Free all Tx buffers */
7190 free_tx_buffers(sp);
7191
7192 /* Free all Rx buffers */
7193 free_rx_buffers(sp);
7194
7195 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7196 }
7197
7198 static void s2io_card_down(struct s2io_nic *sp)
7199 {
7200 do_s2io_card_down(sp, 1);
7201 }
7202
7203 static int s2io_card_up(struct s2io_nic *sp)
7204 {
7205 int i, ret = 0;
7206 struct config_param *config;
7207 struct mac_info *mac_control;
7208 struct net_device *dev = (struct net_device *)sp->dev;
7209 u16 interruptible;
7210
7211 /* Initialize the H/W I/O registers */
7212 ret = init_nic(sp);
7213 if (ret != 0) {
7214 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7215 dev->name);
7216 if (ret != -EIO)
7217 s2io_reset(sp);
7218 return ret;
7219 }
7220
7221 /*
7222 * Initializing the Rx buffers. For now we are considering only 1
7223 * Rx ring and initializing buffers into 30 Rx blocks
7224 */
7225 config = &sp->config;
7226 mac_control = &sp->mac_control;
7227
7228 for (i = 0; i < config->rx_ring_num; i++) {
7229 struct ring_info *ring = &mac_control->rings[i];
7230
7231 ring->mtu = dev->mtu;
7232 ring->lro = !!(dev->features & NETIF_F_LRO);
7233 ret = fill_rx_buffers(sp, ring, 1);
7234 if (ret) {
7235 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7236 dev->name);
7237 s2io_reset(sp);
7238 free_rx_buffers(sp);
7239 return -ENOMEM;
7240 }
7241 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7242 ring->rx_bufs_left);
7243 }
7244
7245 /* Initialise napi */
7246 if (config->napi) {
7247 if (config->intr_type == MSI_X) {
7248 for (i = 0; i < sp->config.rx_ring_num; i++)
7249 napi_enable(&sp->mac_control.rings[i].napi);
7250 } else {
7251 napi_enable(&sp->napi);
7252 }
7253 }
7254
7255 /* Maintain the state prior to the open */
7256 if (sp->promisc_flg)
7257 sp->promisc_flg = 0;
7258 if (sp->m_cast_flg) {
7259 sp->m_cast_flg = 0;
7260 sp->all_multi_pos = 0;
7261 }
7262
7263 /* Setting its receive mode */
7264 s2io_set_multicast(dev);
7265
7266 if (dev->features & NETIF_F_LRO) {
7267 /* Initialize max aggregatable pkts per session based on MTU */
7268 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7269 /* Check if we can use (if specified) user provided value */
7270 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7271 sp->lro_max_aggr_per_sess = lro_max_pkts;
7272 }
7273
7274 /* Enable Rx Traffic and interrupts on the NIC */
7275 if (start_nic(sp)) {
7276 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7277 s2io_reset(sp);
7278 free_rx_buffers(sp);
7279 return -ENODEV;
7280 }
7281
7282 /* Add interrupt service routine */
7283 if (s2io_add_isr(sp) != 0) {
7284 if (sp->config.intr_type == MSI_X)
7285 s2io_rem_isr(sp);
7286 s2io_reset(sp);
7287 free_rx_buffers(sp);
7288 return -ENODEV;
7289 }
7290
7291 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7292
7293 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7294
7295 /* Enable select interrupts */
7296 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7297 if (sp->config.intr_type != INTA) {
7298 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7299 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7300 } else {
7301 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7302 interruptible |= TX_PIC_INTR;
7303 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7304 }
7305
7306 return 0;
7307 }
7308
7309 /**
7310 * s2io_restart_nic - Resets the NIC.
7311 * @data : long pointer to the device private structure
7312 * Description:
7313 * This function is scheduled to be run by the s2io_tx_watchdog
7314 * function after 0.5 secs to reset the NIC. The idea is to reduce
7315 * the run time of the watch dog routine which is run holding a
7316 * spin lock.
7317 */
7318
7319 static void s2io_restart_nic(struct work_struct *work)
7320 {
7321 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7322 struct net_device *dev = sp->dev;
7323
7324 rtnl_lock();
7325
7326 if (!netif_running(dev))
7327 goto out_unlock;
7328
7329 s2io_card_down(sp);
7330 if (s2io_card_up(sp)) {
7331 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7332 }
7333 s2io_wake_all_tx_queue(sp);
7334 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7335 out_unlock:
7336 rtnl_unlock();
7337 }
7338
7339 /**
7340 * s2io_tx_watchdog - Watchdog for transmit side.
7341 * @dev : Pointer to net device structure
7342 * Description:
7343 * This function is triggered if the Tx Queue is stopped
7344 * for a pre-defined amount of time when the Interface is still up.
7345 * If the Interface is jammed in such a situation, the hardware is
7346 * reset (by s2io_close) and restarted again (by s2io_open) to
7347 * overcome any problem that might have been caused in the hardware.
7348 * Return value:
7349 * void
7350 */
7351
7352 static void s2io_tx_watchdog(struct net_device *dev)
7353 {
7354 struct s2io_nic *sp = netdev_priv(dev);
7355 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7356
7357 if (netif_carrier_ok(dev)) {
7358 swstats->watchdog_timer_cnt++;
7359 schedule_work(&sp->rst_timer_task);
7360 swstats->soft_reset_cnt++;
7361 }
7362 }
7363
7364 /**
7365 * rx_osm_handler - To perform some OS related operations on SKB.
7366 * @sp: private member of the device structure,pointer to s2io_nic structure.
7367 * @skb : the socket buffer pointer.
7368 * @len : length of the packet
7369 * @cksum : FCS checksum of the frame.
7370 * @ring_no : the ring from which this RxD was extracted.
7371 * Description:
7372 * This function is called by the Rx interrupt serivce routine to perform
7373 * some OS related operations on the SKB before passing it to the upper
7374 * layers. It mainly checks if the checksum is OK, if so adds it to the
7375 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7376 * to the upper layer. If the checksum is wrong, it increments the Rx
7377 * packet error count, frees the SKB and returns error.
7378 * Return value:
7379 * SUCCESS on success and -1 on failure.
7380 */
7381 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7382 {
7383 struct s2io_nic *sp = ring_data->nic;
7384 struct net_device *dev = (struct net_device *)ring_data->dev;
7385 struct sk_buff *skb = (struct sk_buff *)
7386 ((unsigned long)rxdp->Host_Control);
7387 int ring_no = ring_data->ring_no;
7388 u16 l3_csum, l4_csum;
7389 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7390 struct lro *uninitialized_var(lro);
7391 u8 err_mask;
7392 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7393
7394 skb->dev = dev;
7395
7396 if (err) {
7397 /* Check for parity error */
7398 if (err & 0x1)
7399 swstats->parity_err_cnt++;
7400
7401 err_mask = err >> 48;
7402 switch (err_mask) {
7403 case 1:
7404 swstats->rx_parity_err_cnt++;
7405 break;
7406
7407 case 2:
7408 swstats->rx_abort_cnt++;
7409 break;
7410
7411 case 3:
7412 swstats->rx_parity_abort_cnt++;
7413 break;
7414
7415 case 4:
7416 swstats->rx_rda_fail_cnt++;
7417 break;
7418
7419 case 5:
7420 swstats->rx_unkn_prot_cnt++;
7421 break;
7422
7423 case 6:
7424 swstats->rx_fcs_err_cnt++;
7425 break;
7426
7427 case 7:
7428 swstats->rx_buf_size_err_cnt++;
7429 break;
7430
7431 case 8:
7432 swstats->rx_rxd_corrupt_cnt++;
7433 break;
7434
7435 case 15:
7436 swstats->rx_unkn_err_cnt++;
7437 break;
7438 }
7439 /*
7440 * Drop the packet if bad transfer code. Exception being
7441 * 0x5, which could be due to unsupported IPv6 extension header.
7442 * In this case, we let stack handle the packet.
7443 * Note that in this case, since checksum will be incorrect,
7444 * stack will validate the same.
7445 */
7446 if (err_mask != 0x5) {
7447 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7448 dev->name, err_mask);
7449 dev->stats.rx_crc_errors++;
7450 swstats->mem_freed
7451 += skb->truesize;
7452 dev_kfree_skb(skb);
7453 ring_data->rx_bufs_left -= 1;
7454 rxdp->Host_Control = 0;
7455 return 0;
7456 }
7457 }
7458
7459 rxdp->Host_Control = 0;
7460 if (sp->rxd_mode == RXD_MODE_1) {
7461 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7462
7463 skb_put(skb, len);
7464 } else if (sp->rxd_mode == RXD_MODE_3B) {
7465 int get_block = ring_data->rx_curr_get_info.block_index;
7466 int get_off = ring_data->rx_curr_get_info.offset;
7467 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7468 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7469 unsigned char *buff = skb_push(skb, buf0_len);
7470
7471 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7472 memcpy(buff, ba->ba_0, buf0_len);
7473 skb_put(skb, buf2_len);
7474 }
7475
7476 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7477 ((!ring_data->lro) ||
7478 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7479 (dev->features & NETIF_F_RXCSUM)) {
7480 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7481 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7482 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7483 /*
7484 * NIC verifies if the Checksum of the received
7485 * frame is Ok or not and accordingly returns
7486 * a flag in the RxD.
7487 */
7488 skb->ip_summed = CHECKSUM_UNNECESSARY;
7489 if (ring_data->lro) {
7490 u32 tcp_len = 0;
7491 u8 *tcp;
7492 int ret = 0;
7493
7494 ret = s2io_club_tcp_session(ring_data,
7495 skb->data, &tcp,
7496 &tcp_len, &lro,
7497 rxdp, sp);
7498 switch (ret) {
7499 case 3: /* Begin anew */
7500 lro->parent = skb;
7501 goto aggregate;
7502 case 1: /* Aggregate */
7503 lro_append_pkt(sp, lro, skb, tcp_len);
7504 goto aggregate;
7505 case 4: /* Flush session */
7506 lro_append_pkt(sp, lro, skb, tcp_len);
7507 queue_rx_frame(lro->parent,
7508 lro->vlan_tag);
7509 clear_lro_session(lro);
7510 swstats->flush_max_pkts++;
7511 goto aggregate;
7512 case 2: /* Flush both */
7513 lro->parent->data_len = lro->frags_len;
7514 swstats->sending_both++;
7515 queue_rx_frame(lro->parent,
7516 lro->vlan_tag);
7517 clear_lro_session(lro);
7518 goto send_up;
7519 case 0: /* sessions exceeded */
7520 case -1: /* non-TCP or not L2 aggregatable */
7521 case 5: /*
7522 * First pkt in session not
7523 * L3/L4 aggregatable
7524 */
7525 break;
7526 default:
7527 DBG_PRINT(ERR_DBG,
7528 "%s: Samadhana!!\n",
7529 __func__);
7530 BUG();
7531 }
7532 }
7533 } else {
7534 /*
7535 * Packet with erroneous checksum, let the
7536 * upper layers deal with it.
7537 */
7538 skb_checksum_none_assert(skb);
7539 }
7540 } else
7541 skb_checksum_none_assert(skb);
7542
7543 swstats->mem_freed += skb->truesize;
7544 send_up:
7545 skb_record_rx_queue(skb, ring_no);
7546 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7547 aggregate:
7548 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7549 return SUCCESS;
7550 }
7551
7552 /**
7553 * s2io_link - stops/starts the Tx queue.
7554 * @sp : private member of the device structure, which is a pointer to the
7555 * s2io_nic structure.
7556 * @link : inidicates whether link is UP/DOWN.
7557 * Description:
7558 * This function stops/starts the Tx queue depending on whether the link
7559 * status of the NIC is is down or up. This is called by the Alarm
7560 * interrupt handler whenever a link change interrupt comes up.
7561 * Return value:
7562 * void.
7563 */
7564
7565 static void s2io_link(struct s2io_nic *sp, int link)
7566 {
7567 struct net_device *dev = (struct net_device *)sp->dev;
7568 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7569
7570 if (link != sp->last_link_state) {
7571 init_tti(sp, link);
7572 if (link == LINK_DOWN) {
7573 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7574 s2io_stop_all_tx_queue(sp);
7575 netif_carrier_off(dev);
7576 if (swstats->link_up_cnt)
7577 swstats->link_up_time =
7578 jiffies - sp->start_time;
7579 swstats->link_down_cnt++;
7580 } else {
7581 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7582 if (swstats->link_down_cnt)
7583 swstats->link_down_time =
7584 jiffies - sp->start_time;
7585 swstats->link_up_cnt++;
7586 netif_carrier_on(dev);
7587 s2io_wake_all_tx_queue(sp);
7588 }
7589 }
7590 sp->last_link_state = link;
7591 sp->start_time = jiffies;
7592 }
7593
7594 /**
7595 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7596 * @sp : private member of the device structure, which is a pointer to the
7597 * s2io_nic structure.
7598 * Description:
7599 * This function initializes a few of the PCI and PCI-X configuration registers
7600 * with recommended values.
7601 * Return value:
7602 * void
7603 */
7604
7605 static void s2io_init_pci(struct s2io_nic *sp)
7606 {
7607 u16 pci_cmd = 0, pcix_cmd = 0;
7608
7609 /* Enable Data Parity Error Recovery in PCI-X command register. */
7610 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7611 &(pcix_cmd));
7612 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7613 (pcix_cmd | 1));
7614 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7615 &(pcix_cmd));
7616
7617 /* Set the PErr Response bit in PCI command register. */
7618 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7619 pci_write_config_word(sp->pdev, PCI_COMMAND,
7620 (pci_cmd | PCI_COMMAND_PARITY));
7621 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7622 }
7623
7624 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7625 u8 *dev_multiq)
7626 {
7627 int i;
7628
7629 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7630 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7631 "(%d) not supported\n", tx_fifo_num);
7632
7633 if (tx_fifo_num < 1)
7634 tx_fifo_num = 1;
7635 else
7636 tx_fifo_num = MAX_TX_FIFOS;
7637
7638 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7639 }
7640
7641 if (multiq)
7642 *dev_multiq = multiq;
7643
7644 if (tx_steering_type && (1 == tx_fifo_num)) {
7645 if (tx_steering_type != TX_DEFAULT_STEERING)
7646 DBG_PRINT(ERR_DBG,
7647 "Tx steering is not supported with "
7648 "one fifo. Disabling Tx steering.\n");
7649 tx_steering_type = NO_STEERING;
7650 }
7651
7652 if ((tx_steering_type < NO_STEERING) ||
7653 (tx_steering_type > TX_DEFAULT_STEERING)) {
7654 DBG_PRINT(ERR_DBG,
7655 "Requested transmit steering not supported\n");
7656 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7657 tx_steering_type = NO_STEERING;
7658 }
7659
7660 if (rx_ring_num > MAX_RX_RINGS) {
7661 DBG_PRINT(ERR_DBG,
7662 "Requested number of rx rings not supported\n");
7663 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7664 MAX_RX_RINGS);
7665 rx_ring_num = MAX_RX_RINGS;
7666 }
7667
7668 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7669 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7670 "Defaulting to INTA\n");
7671 *dev_intr_type = INTA;
7672 }
7673
7674 if ((*dev_intr_type == MSI_X) &&
7675 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7676 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7677 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7678 "Defaulting to INTA\n");
7679 *dev_intr_type = INTA;
7680 }
7681
7682 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7683 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7684 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7685 rx_ring_mode = 1;
7686 }
7687
7688 for (i = 0; i < MAX_RX_RINGS; i++)
7689 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7690 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7691 "supported\nDefaulting to %d\n",
7692 MAX_RX_BLOCKS_PER_RING);
7693 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7694 }
7695
7696 return SUCCESS;
7697 }
7698
7699 /**
7700 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7701 * or Traffic class respectively.
7702 * @nic: device private variable
7703 * Description: The function configures the receive steering to
7704 * desired receive ring.
7705 * Return Value: SUCCESS on success and
7706 * '-1' on failure (endian settings incorrect).
7707 */
7708 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7709 {
7710 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7711 register u64 val64 = 0;
7712
7713 if (ds_codepoint > 63)
7714 return FAILURE;
7715
7716 val64 = RTS_DS_MEM_DATA(ring);
7717 writeq(val64, &bar0->rts_ds_mem_data);
7718
7719 val64 = RTS_DS_MEM_CTRL_WE |
7720 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7721 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7722
7723 writeq(val64, &bar0->rts_ds_mem_ctrl);
7724
7725 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7726 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7727 S2IO_BIT_RESET);
7728 }
7729
7730 static const struct net_device_ops s2io_netdev_ops = {
7731 .ndo_open = s2io_open,
7732 .ndo_stop = s2io_close,
7733 .ndo_get_stats = s2io_get_stats,
7734 .ndo_start_xmit = s2io_xmit,
7735 .ndo_validate_addr = eth_validate_addr,
7736 .ndo_set_multicast_list = s2io_set_multicast,
7737 .ndo_do_ioctl = s2io_ioctl,
7738 .ndo_set_mac_address = s2io_set_mac_addr,
7739 .ndo_change_mtu = s2io_change_mtu,
7740 .ndo_set_features = s2io_set_features,
7741 .ndo_vlan_rx_register = s2io_vlan_rx_register,
7742 .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
7743 .ndo_tx_timeout = s2io_tx_watchdog,
7744 #ifdef CONFIG_NET_POLL_CONTROLLER
7745 .ndo_poll_controller = s2io_netpoll,
7746 #endif
7747 };
7748
7749 /**
7750 * s2io_init_nic - Initialization of the adapter .
7751 * @pdev : structure containing the PCI related information of the device.
7752 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7753 * Description:
7754 * The function initializes an adapter identified by the pci_dec structure.
7755 * All OS related initialization including memory and device structure and
7756 * initlaization of the device private variable is done. Also the swapper
7757 * control register is initialized to enable read and write into the I/O
7758 * registers of the device.
7759 * Return value:
7760 * returns 0 on success and negative on failure.
7761 */
7762
7763 static int __devinit
7764 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7765 {
7766 struct s2io_nic *sp;
7767 struct net_device *dev;
7768 int i, j, ret;
7769 int dma_flag = false;
7770 u32 mac_up, mac_down;
7771 u64 val64 = 0, tmp64 = 0;
7772 struct XENA_dev_config __iomem *bar0 = NULL;
7773 u16 subid;
7774 struct config_param *config;
7775 struct mac_info *mac_control;
7776 int mode;
7777 u8 dev_intr_type = intr_type;
7778 u8 dev_multiq = 0;
7779
7780 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7781 if (ret)
7782 return ret;
7783
7784 ret = pci_enable_device(pdev);
7785 if (ret) {
7786 DBG_PRINT(ERR_DBG,
7787 "%s: pci_enable_device failed\n", __func__);
7788 return ret;
7789 }
7790
7791 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7792 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7793 dma_flag = true;
7794 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7795 DBG_PRINT(ERR_DBG,
7796 "Unable to obtain 64bit DMA "
7797 "for consistent allocations\n");
7798 pci_disable_device(pdev);
7799 return -ENOMEM;
7800 }
7801 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7802 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7803 } else {
7804 pci_disable_device(pdev);
7805 return -ENOMEM;
7806 }
7807 ret = pci_request_regions(pdev, s2io_driver_name);
7808 if (ret) {
7809 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7810 __func__, ret);
7811 pci_disable_device(pdev);
7812 return -ENODEV;
7813 }
7814 if (dev_multiq)
7815 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7816 else
7817 dev = alloc_etherdev(sizeof(struct s2io_nic));
7818 if (dev == NULL) {
7819 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7820 pci_disable_device(pdev);
7821 pci_release_regions(pdev);
7822 return -ENODEV;
7823 }
7824
7825 pci_set_master(pdev);
7826 pci_set_drvdata(pdev, dev);
7827 SET_NETDEV_DEV(dev, &pdev->dev);
7828
7829 /* Private member variable initialized to s2io NIC structure */
7830 sp = netdev_priv(dev);
7831 sp->dev = dev;
7832 sp->pdev = pdev;
7833 sp->high_dma_flag = dma_flag;
7834 sp->device_enabled_once = false;
7835 if (rx_ring_mode == 1)
7836 sp->rxd_mode = RXD_MODE_1;
7837 if (rx_ring_mode == 2)
7838 sp->rxd_mode = RXD_MODE_3B;
7839
7840 sp->config.intr_type = dev_intr_type;
7841
7842 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7843 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7844 sp->device_type = XFRAME_II_DEVICE;
7845 else
7846 sp->device_type = XFRAME_I_DEVICE;
7847
7848
7849 /* Initialize some PCI/PCI-X fields of the NIC. */
7850 s2io_init_pci(sp);
7851
7852 /*
7853 * Setting the device configuration parameters.
7854 * Most of these parameters can be specified by the user during
7855 * module insertion as they are module loadable parameters. If
7856 * these parameters are not not specified during load time, they
7857 * are initialized with default values.
7858 */
7859 config = &sp->config;
7860 mac_control = &sp->mac_control;
7861
7862 config->napi = napi;
7863 config->tx_steering_type = tx_steering_type;
7864
7865 /* Tx side parameters. */
7866 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7867 config->tx_fifo_num = MAX_TX_FIFOS;
7868 else
7869 config->tx_fifo_num = tx_fifo_num;
7870
7871 /* Initialize the fifos used for tx steering */
7872 if (config->tx_fifo_num < 5) {
7873 if (config->tx_fifo_num == 1)
7874 sp->total_tcp_fifos = 1;
7875 else
7876 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7877 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7878 sp->total_udp_fifos = 1;
7879 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7880 } else {
7881 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7882 FIFO_OTHER_MAX_NUM);
7883 sp->udp_fifo_idx = sp->total_tcp_fifos;
7884 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7885 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7886 }
7887
7888 config->multiq = dev_multiq;
7889 for (i = 0; i < config->tx_fifo_num; i++) {
7890 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7891
7892 tx_cfg->fifo_len = tx_fifo_len[i];
7893 tx_cfg->fifo_priority = i;
7894 }
7895
7896 /* mapping the QoS priority to the configured fifos */
7897 for (i = 0; i < MAX_TX_FIFOS; i++)
7898 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7899
7900 /* map the hashing selector table to the configured fifos */
7901 for (i = 0; i < config->tx_fifo_num; i++)
7902 sp->fifo_selector[i] = fifo_selector[i];
7903
7904
7905 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7906 for (i = 0; i < config->tx_fifo_num; i++) {
7907 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7908
7909 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7910 if (tx_cfg->fifo_len < 65) {
7911 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7912 break;
7913 }
7914 }
7915 /* + 2 because one Txd for skb->data and one Txd for UFO */
7916 config->max_txds = MAX_SKB_FRAGS + 2;
7917
7918 /* Rx side parameters. */
7919 config->rx_ring_num = rx_ring_num;
7920 for (i = 0; i < config->rx_ring_num; i++) {
7921 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7922 struct ring_info *ring = &mac_control->rings[i];
7923
7924 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7925 rx_cfg->ring_priority = i;
7926 ring->rx_bufs_left = 0;
7927 ring->rxd_mode = sp->rxd_mode;
7928 ring->rxd_count = rxd_count[sp->rxd_mode];
7929 ring->pdev = sp->pdev;
7930 ring->dev = sp->dev;
7931 }
7932
7933 for (i = 0; i < rx_ring_num; i++) {
7934 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7935
7936 rx_cfg->ring_org = RING_ORG_BUFF1;
7937 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7938 }
7939
7940 /* Setting Mac Control parameters */
7941 mac_control->rmac_pause_time = rmac_pause_time;
7942 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7943 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7944
7945
7946 /* initialize the shared memory used by the NIC and the host */
7947 if (init_shared_mem(sp)) {
7948 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7949 ret = -ENOMEM;
7950 goto mem_alloc_failed;
7951 }
7952
7953 sp->bar0 = pci_ioremap_bar(pdev, 0);
7954 if (!sp->bar0) {
7955 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7956 dev->name);
7957 ret = -ENOMEM;
7958 goto bar0_remap_failed;
7959 }
7960
7961 sp->bar1 = pci_ioremap_bar(pdev, 2);
7962 if (!sp->bar1) {
7963 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7964 dev->name);
7965 ret = -ENOMEM;
7966 goto bar1_remap_failed;
7967 }
7968
7969 dev->irq = pdev->irq;
7970 dev->base_addr = (unsigned long)sp->bar0;
7971
7972 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7973 for (j = 0; j < MAX_TX_FIFOS; j++) {
7974 mac_control->tx_FIFO_start[j] =
7975 (struct TxFIFO_element __iomem *)
7976 (sp->bar1 + (j * 0x00020000));
7977 }
7978
7979 /* Driver entry points */
7980 dev->netdev_ops = &s2io_netdev_ops;
7981 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7982 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7983 NETIF_F_TSO | NETIF_F_TSO6 |
7984 NETIF_F_RXCSUM | NETIF_F_LRO;
7985 dev->features |= dev->hw_features |
7986 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7987 if (sp->device_type & XFRAME_II_DEVICE) {
7988 dev->hw_features |= NETIF_F_UFO;
7989 if (ufo)
7990 dev->features |= NETIF_F_UFO;
7991 }
7992 if (sp->high_dma_flag == true)
7993 dev->features |= NETIF_F_HIGHDMA;
7994 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7995 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7996 INIT_WORK(&sp->set_link_task, s2io_set_link);
7997
7998 pci_save_state(sp->pdev);
7999
8000 /* Setting swapper control on the NIC, for proper reset operation */
8001 if (s2io_set_swapper(sp)) {
8002 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
8003 dev->name);
8004 ret = -EAGAIN;
8005 goto set_swap_failed;
8006 }
8007
8008 /* Verify if the Herc works on the slot its placed into */
8009 if (sp->device_type & XFRAME_II_DEVICE) {
8010 mode = s2io_verify_pci_mode(sp);
8011 if (mode < 0) {
8012 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
8013 __func__);
8014 ret = -EBADSLT;
8015 goto set_swap_failed;
8016 }
8017 }
8018
8019 if (sp->config.intr_type == MSI_X) {
8020 sp->num_entries = config->rx_ring_num + 1;
8021 ret = s2io_enable_msi_x(sp);
8022
8023 if (!ret) {
8024 ret = s2io_test_msi(sp);
8025 /* rollback MSI-X, will re-enable during add_isr() */
8026 remove_msix_isr(sp);
8027 }
8028 if (ret) {
8029
8030 DBG_PRINT(ERR_DBG,
8031 "MSI-X requested but failed to enable\n");
8032 sp->config.intr_type = INTA;
8033 }
8034 }
8035
8036 if (config->intr_type == MSI_X) {
8037 for (i = 0; i < config->rx_ring_num ; i++) {
8038 struct ring_info *ring = &mac_control->rings[i];
8039
8040 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
8041 }
8042 } else {
8043 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8044 }
8045
8046 /* Not needed for Herc */
8047 if (sp->device_type & XFRAME_I_DEVICE) {
8048 /*
8049 * Fix for all "FFs" MAC address problems observed on
8050 * Alpha platforms
8051 */
8052 fix_mac_address(sp);
8053 s2io_reset(sp);
8054 }
8055
8056 /*
8057 * MAC address initialization.
8058 * For now only one mac address will be read and used.
8059 */
8060 bar0 = sp->bar0;
8061 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8062 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8063 writeq(val64, &bar0->rmac_addr_cmd_mem);
8064 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8065 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8066 S2IO_BIT_RESET);
8067 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8068 mac_down = (u32)tmp64;
8069 mac_up = (u32) (tmp64 >> 32);
8070
8071 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8072 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8073 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8074 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8075 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8076 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8077
8078 /* Set the factory defined MAC address initially */
8079 dev->addr_len = ETH_ALEN;
8080 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8081 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8082
8083 /* initialize number of multicast & unicast MAC entries variables */
8084 if (sp->device_type == XFRAME_I_DEVICE) {
8085 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8086 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8087 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8088 } else if (sp->device_type == XFRAME_II_DEVICE) {
8089 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8090 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8091 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8092 }
8093
8094 /* store mac addresses from CAM to s2io_nic structure */
8095 do_s2io_store_unicast_mc(sp);
8096
8097 /* Configure MSIX vector for number of rings configured plus one */
8098 if ((sp->device_type == XFRAME_II_DEVICE) &&
8099 (config->intr_type == MSI_X))
8100 sp->num_entries = config->rx_ring_num + 1;
8101
8102 /* Store the values of the MSIX table in the s2io_nic structure */
8103 store_xmsi_data(sp);
8104 /* reset Nic and bring it to known state */
8105 s2io_reset(sp);
8106
8107 /*
8108 * Initialize link state flags
8109 * and the card state parameter
8110 */
8111 sp->state = 0;
8112
8113 /* Initialize spinlocks */
8114 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8115 struct fifo_info *fifo = &mac_control->fifos[i];
8116
8117 spin_lock_init(&fifo->tx_lock);
8118 }
8119
8120 /*
8121 * SXE-002: Configure link and activity LED to init state
8122 * on driver load.
8123 */
8124 subid = sp->pdev->subsystem_device;
8125 if ((subid & 0xFF) >= 0x07) {
8126 val64 = readq(&bar0->gpio_control);
8127 val64 |= 0x0000800000000000ULL;
8128 writeq(val64, &bar0->gpio_control);
8129 val64 = 0x0411040400000000ULL;
8130 writeq(val64, (void __iomem *)bar0 + 0x2700);
8131 val64 = readq(&bar0->gpio_control);
8132 }
8133
8134 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8135
8136 if (register_netdev(dev)) {
8137 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8138 ret = -ENODEV;
8139 goto register_failed;
8140 }
8141 s2io_vpd_read(sp);
8142 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8143 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8144 sp->product_name, pdev->revision);
8145 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8146 s2io_driver_version);
8147 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8148 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8149 if (sp->device_type & XFRAME_II_DEVICE) {
8150 mode = s2io_print_pci_mode(sp);
8151 if (mode < 0) {
8152 ret = -EBADSLT;
8153 unregister_netdev(dev);
8154 goto set_swap_failed;
8155 }
8156 }
8157 switch (sp->rxd_mode) {
8158 case RXD_MODE_1:
8159 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8160 dev->name);
8161 break;
8162 case RXD_MODE_3B:
8163 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8164 dev->name);
8165 break;
8166 }
8167
8168 switch (sp->config.napi) {
8169 case 0:
8170 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8171 break;
8172 case 1:
8173 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8174 break;
8175 }
8176
8177 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8178 sp->config.tx_fifo_num);
8179
8180 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8181 sp->config.rx_ring_num);
8182
8183 switch (sp->config.intr_type) {
8184 case INTA:
8185 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8186 break;
8187 case MSI_X:
8188 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8189 break;
8190 }
8191 if (sp->config.multiq) {
8192 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8193 struct fifo_info *fifo = &mac_control->fifos[i];
8194
8195 fifo->multiq = config->multiq;
8196 }
8197 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8198 dev->name);
8199 } else
8200 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8201 dev->name);
8202
8203 switch (sp->config.tx_steering_type) {
8204 case NO_STEERING:
8205 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8206 dev->name);
8207 break;
8208 case TX_PRIORITY_STEERING:
8209 DBG_PRINT(ERR_DBG,
8210 "%s: Priority steering enabled for transmit\n",
8211 dev->name);
8212 break;
8213 case TX_DEFAULT_STEERING:
8214 DBG_PRINT(ERR_DBG,
8215 "%s: Default steering enabled for transmit\n",
8216 dev->name);
8217 }
8218
8219 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8220 dev->name);
8221 if (ufo)
8222 DBG_PRINT(ERR_DBG,
8223 "%s: UDP Fragmentation Offload(UFO) enabled\n",
8224 dev->name);
8225 /* Initialize device name */
8226 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8227
8228 if (vlan_tag_strip)
8229 sp->vlan_strip_flag = 1;
8230 else
8231 sp->vlan_strip_flag = 0;
8232
8233 /*
8234 * Make Link state as off at this point, when the Link change
8235 * interrupt comes the state will be automatically changed to
8236 * the right state.
8237 */
8238 netif_carrier_off(dev);
8239
8240 return 0;
8241
8242 register_failed:
8243 set_swap_failed:
8244 iounmap(sp->bar1);
8245 bar1_remap_failed:
8246 iounmap(sp->bar0);
8247 bar0_remap_failed:
8248 mem_alloc_failed:
8249 free_shared_mem(sp);
8250 pci_disable_device(pdev);
8251 pci_release_regions(pdev);
8252 pci_set_drvdata(pdev, NULL);
8253 free_netdev(dev);
8254
8255 return ret;
8256 }
8257
8258 /**
8259 * s2io_rem_nic - Free the PCI device
8260 * @pdev: structure containing the PCI related information of the device.
8261 * Description: This function is called by the Pci subsystem to release a
8262 * PCI device and free up all resource held up by the device. This could
8263 * be in response to a Hot plug event or when the driver is to be removed
8264 * from memory.
8265 */
8266
8267 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8268 {
8269 struct net_device *dev = pci_get_drvdata(pdev);
8270 struct s2io_nic *sp;
8271
8272 if (dev == NULL) {
8273 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8274 return;
8275 }
8276
8277 sp = netdev_priv(dev);
8278
8279 cancel_work_sync(&sp->rst_timer_task);
8280 cancel_work_sync(&sp->set_link_task);
8281
8282 unregister_netdev(dev);
8283
8284 free_shared_mem(sp);
8285 iounmap(sp->bar0);
8286 iounmap(sp->bar1);
8287 pci_release_regions(pdev);
8288 pci_set_drvdata(pdev, NULL);
8289 free_netdev(dev);
8290 pci_disable_device(pdev);
8291 }
8292
8293 /**
8294 * s2io_starter - Entry point for the driver
8295 * Description: This function is the entry point for the driver. It verifies
8296 * the module loadable parameters and initializes PCI configuration space.
8297 */
8298
8299 static int __init s2io_starter(void)
8300 {
8301 return pci_register_driver(&s2io_driver);
8302 }
8303
8304 /**
8305 * s2io_closer - Cleanup routine for the driver
8306 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8307 */
8308
8309 static __exit void s2io_closer(void)
8310 {
8311 pci_unregister_driver(&s2io_driver);
8312 DBG_PRINT(INIT_DBG, "cleanup done\n");
8313 }
8314
8315 module_init(s2io_starter);
8316 module_exit(s2io_closer);
8317
8318 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8319 struct tcphdr **tcp, struct RxD_t *rxdp,
8320 struct s2io_nic *sp)
8321 {
8322 int ip_off;
8323 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8324
8325 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8326 DBG_PRINT(INIT_DBG,
8327 "%s: Non-TCP frames not supported for LRO\n",
8328 __func__);
8329 return -1;
8330 }
8331
8332 /* Checking for DIX type or DIX type with VLAN */
8333 if ((l2_type == 0) || (l2_type == 4)) {
8334 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8335 /*
8336 * If vlan stripping is disabled and the frame is VLAN tagged,
8337 * shift the offset by the VLAN header size bytes.
8338 */
8339 if ((!sp->vlan_strip_flag) &&
8340 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8341 ip_off += HEADER_VLAN_SIZE;
8342 } else {
8343 /* LLC, SNAP etc are considered non-mergeable */
8344 return -1;
8345 }
8346
8347 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8348 ip_len = (u8)((*ip)->ihl);
8349 ip_len <<= 2;
8350 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8351
8352 return 0;
8353 }
8354
8355 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8356 struct tcphdr *tcp)
8357 {
8358 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8359 if ((lro->iph->saddr != ip->saddr) ||
8360 (lro->iph->daddr != ip->daddr) ||
8361 (lro->tcph->source != tcp->source) ||
8362 (lro->tcph->dest != tcp->dest))
8363 return -1;
8364 return 0;
8365 }
8366
8367 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8368 {
8369 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8370 }
8371
8372 static void initiate_new_session(struct lro *lro, u8 *l2h,
8373 struct iphdr *ip, struct tcphdr *tcp,
8374 u32 tcp_pyld_len, u16 vlan_tag)
8375 {
8376 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8377 lro->l2h = l2h;
8378 lro->iph = ip;
8379 lro->tcph = tcp;
8380 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8381 lro->tcp_ack = tcp->ack_seq;
8382 lro->sg_num = 1;
8383 lro->total_len = ntohs(ip->tot_len);
8384 lro->frags_len = 0;
8385 lro->vlan_tag = vlan_tag;
8386 /*
8387 * Check if we saw TCP timestamp.
8388 * Other consistency checks have already been done.
8389 */
8390 if (tcp->doff == 8) {
8391 __be32 *ptr;
8392 ptr = (__be32 *)(tcp+1);
8393 lro->saw_ts = 1;
8394 lro->cur_tsval = ntohl(*(ptr+1));
8395 lro->cur_tsecr = *(ptr+2);
8396 }
8397 lro->in_use = 1;
8398 }
8399
8400 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8401 {
8402 struct iphdr *ip = lro->iph;
8403 struct tcphdr *tcp = lro->tcph;
8404 __sum16 nchk;
8405 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8406
8407 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8408
8409 /* Update L3 header */
8410 ip->tot_len = htons(lro->total_len);
8411 ip->check = 0;
8412 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8413 ip->check = nchk;
8414
8415 /* Update L4 header */
8416 tcp->ack_seq = lro->tcp_ack;
8417 tcp->window = lro->window;
8418
8419 /* Update tsecr field if this session has timestamps enabled */
8420 if (lro->saw_ts) {
8421 __be32 *ptr = (__be32 *)(tcp + 1);
8422 *(ptr+2) = lro->cur_tsecr;
8423 }
8424
8425 /* Update counters required for calculation of
8426 * average no. of packets aggregated.
8427 */
8428 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8429 swstats->num_aggregations++;
8430 }
8431
8432 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8433 struct tcphdr *tcp, u32 l4_pyld)
8434 {
8435 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8436 lro->total_len += l4_pyld;
8437 lro->frags_len += l4_pyld;
8438 lro->tcp_next_seq += l4_pyld;
8439 lro->sg_num++;
8440
8441 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8442 lro->tcp_ack = tcp->ack_seq;
8443 lro->window = tcp->window;
8444
8445 if (lro->saw_ts) {
8446 __be32 *ptr;
8447 /* Update tsecr and tsval from this packet */
8448 ptr = (__be32 *)(tcp+1);
8449 lro->cur_tsval = ntohl(*(ptr+1));
8450 lro->cur_tsecr = *(ptr + 2);
8451 }
8452 }
8453
8454 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8455 struct tcphdr *tcp, u32 tcp_pyld_len)
8456 {
8457 u8 *ptr;
8458
8459 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8460
8461 if (!tcp_pyld_len) {
8462 /* Runt frame or a pure ack */
8463 return -1;
8464 }
8465
8466 if (ip->ihl != 5) /* IP has options */
8467 return -1;
8468
8469 /* If we see CE codepoint in IP header, packet is not mergeable */
8470 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8471 return -1;
8472
8473 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8474 if (tcp->urg || tcp->psh || tcp->rst ||
8475 tcp->syn || tcp->fin ||
8476 tcp->ece || tcp->cwr || !tcp->ack) {
8477 /*
8478 * Currently recognize only the ack control word and
8479 * any other control field being set would result in
8480 * flushing the LRO session
8481 */
8482 return -1;
8483 }
8484
8485 /*
8486 * Allow only one TCP timestamp option. Don't aggregate if
8487 * any other options are detected.
8488 */
8489 if (tcp->doff != 5 && tcp->doff != 8)
8490 return -1;
8491
8492 if (tcp->doff == 8) {
8493 ptr = (u8 *)(tcp + 1);
8494 while (*ptr == TCPOPT_NOP)
8495 ptr++;
8496 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8497 return -1;
8498
8499 /* Ensure timestamp value increases monotonically */
8500 if (l_lro)
8501 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8502 return -1;
8503
8504 /* timestamp echo reply should be non-zero */
8505 if (*((__be32 *)(ptr+6)) == 0)
8506 return -1;
8507 }
8508
8509 return 0;
8510 }
8511
8512 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8513 u8 **tcp, u32 *tcp_len, struct lro **lro,
8514 struct RxD_t *rxdp, struct s2io_nic *sp)
8515 {
8516 struct iphdr *ip;
8517 struct tcphdr *tcph;
8518 int ret = 0, i;
8519 u16 vlan_tag = 0;
8520 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8521
8522 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8523 rxdp, sp);
8524 if (ret)
8525 return ret;
8526
8527 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8528
8529 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8530 tcph = (struct tcphdr *)*tcp;
8531 *tcp_len = get_l4_pyld_length(ip, tcph);
8532 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8533 struct lro *l_lro = &ring_data->lro0_n[i];
8534 if (l_lro->in_use) {
8535 if (check_for_socket_match(l_lro, ip, tcph))
8536 continue;
8537 /* Sock pair matched */
8538 *lro = l_lro;
8539
8540 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8541 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8542 "expected 0x%x, actual 0x%x\n",
8543 __func__,
8544 (*lro)->tcp_next_seq,
8545 ntohl(tcph->seq));
8546
8547 swstats->outof_sequence_pkts++;
8548 ret = 2;
8549 break;
8550 }
8551
8552 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8553 *tcp_len))
8554 ret = 1; /* Aggregate */
8555 else
8556 ret = 2; /* Flush both */
8557 break;
8558 }
8559 }
8560
8561 if (ret == 0) {
8562 /* Before searching for available LRO objects,
8563 * check if the pkt is L3/L4 aggregatable. If not
8564 * don't create new LRO session. Just send this
8565 * packet up.
8566 */
8567 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8568 return 5;
8569
8570 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8571 struct lro *l_lro = &ring_data->lro0_n[i];
8572 if (!(l_lro->in_use)) {
8573 *lro = l_lro;
8574 ret = 3; /* Begin anew */
8575 break;
8576 }
8577 }
8578 }
8579
8580 if (ret == 0) { /* sessions exceeded */
8581 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8582 __func__);
8583 *lro = NULL;
8584 return ret;
8585 }
8586
8587 switch (ret) {
8588 case 3:
8589 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8590 vlan_tag);
8591 break;
8592 case 2:
8593 update_L3L4_header(sp, *lro);
8594 break;
8595 case 1:
8596 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8597 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8598 update_L3L4_header(sp, *lro);
8599 ret = 4; /* Flush the LRO */
8600 }
8601 break;
8602 default:
8603 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8604 break;
8605 }
8606
8607 return ret;
8608 }
8609
8610 static void clear_lro_session(struct lro *lro)
8611 {
8612 static u16 lro_struct_size = sizeof(struct lro);
8613
8614 memset(lro, 0, lro_struct_size);
8615 }
8616
8617 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8618 {
8619 struct net_device *dev = skb->dev;
8620 struct s2io_nic *sp = netdev_priv(dev);
8621
8622 skb->protocol = eth_type_trans(skb, dev);
8623 if (sp->vlgrp && vlan_tag && (sp->vlan_strip_flag)) {
8624 /* Queueing the vlan frame to the upper layer */
8625 if (sp->config.napi)
8626 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8627 else
8628 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8629 } else {
8630 if (sp->config.napi)
8631 netif_receive_skb(skb);
8632 else
8633 netif_rx(skb);
8634 }
8635 }
8636
8637 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8638 struct sk_buff *skb, u32 tcp_len)
8639 {
8640 struct sk_buff *first = lro->parent;
8641 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8642
8643 first->len += tcp_len;
8644 first->data_len = lro->frags_len;
8645 skb_pull(skb, (skb->len - tcp_len));
8646 if (skb_shinfo(first)->frag_list)
8647 lro->last_frag->next = skb;
8648 else
8649 skb_shinfo(first)->frag_list = skb;
8650 first->truesize += skb->truesize;
8651 lro->last_frag = skb;
8652 swstats->clubbed_frms_cnt++;
8653 }
8654
8655 /**
8656 * s2io_io_error_detected - called when PCI error is detected
8657 * @pdev: Pointer to PCI device
8658 * @state: The current pci connection state
8659 *
8660 * This function is called after a PCI bus error affecting
8661 * this device has been detected.
8662 */
8663 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8664 pci_channel_state_t state)
8665 {
8666 struct net_device *netdev = pci_get_drvdata(pdev);
8667 struct s2io_nic *sp = netdev_priv(netdev);
8668
8669 netif_device_detach(netdev);
8670
8671 if (state == pci_channel_io_perm_failure)
8672 return PCI_ERS_RESULT_DISCONNECT;
8673
8674 if (netif_running(netdev)) {
8675 /* Bring down the card, while avoiding PCI I/O */
8676 do_s2io_card_down(sp, 0);
8677 }
8678 pci_disable_device(pdev);
8679
8680 return PCI_ERS_RESULT_NEED_RESET;
8681 }
8682
8683 /**
8684 * s2io_io_slot_reset - called after the pci bus has been reset.
8685 * @pdev: Pointer to PCI device
8686 *
8687 * Restart the card from scratch, as if from a cold-boot.
8688 * At this point, the card has exprienced a hard reset,
8689 * followed by fixups by BIOS, and has its config space
8690 * set up identically to what it was at cold boot.
8691 */
8692 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8693 {
8694 struct net_device *netdev = pci_get_drvdata(pdev);
8695 struct s2io_nic *sp = netdev_priv(netdev);
8696
8697 if (pci_enable_device(pdev)) {
8698 pr_err("Cannot re-enable PCI device after reset.\n");
8699 return PCI_ERS_RESULT_DISCONNECT;
8700 }
8701
8702 pci_set_master(pdev);
8703 s2io_reset(sp);
8704
8705 return PCI_ERS_RESULT_RECOVERED;
8706 }
8707
8708 /**
8709 * s2io_io_resume - called when traffic can start flowing again.
8710 * @pdev: Pointer to PCI device
8711 *
8712 * This callback is called when the error recovery driver tells
8713 * us that its OK to resume normal operation.
8714 */
8715 static void s2io_io_resume(struct pci_dev *pdev)
8716 {
8717 struct net_device *netdev = pci_get_drvdata(pdev);
8718 struct s2io_nic *sp = netdev_priv(netdev);
8719
8720 if (netif_running(netdev)) {
8721 if (s2io_card_up(sp)) {
8722 pr_err("Can't bring device back up after reset.\n");
8723 return;
8724 }
8725
8726 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8727 s2io_card_down(sp);
8728 pr_err("Can't restore mac addr after reset.\n");
8729 return;
8730 }
8731 }
8732
8733 netif_device_attach(netdev);
8734 netif_tx_wake_all_queues(netdev);
8735 }