2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/if_ether.h>
37 #include <linux/notifier.h>
38 #include <linux/reboot.h>
39 #include <linux/memory.h>
40 #include <asm/kexec.h>
41 #include <linux/mutex.h>
47 #include "ehea_phyp.h"
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
52 MODULE_DESCRIPTION("IBM eServer HEA Driver");
53 MODULE_VERSION(DRV_VERSION
);
56 static int msg_level
= -1;
57 static int rq1_entries
= EHEA_DEF_ENTRIES_RQ1
;
58 static int rq2_entries
= EHEA_DEF_ENTRIES_RQ2
;
59 static int rq3_entries
= EHEA_DEF_ENTRIES_RQ3
;
60 static int sq_entries
= EHEA_DEF_ENTRIES_SQ
;
63 static int lro_max_aggr
= EHEA_LRO_MAX_AGGR
;
64 static int num_tx_qps
= EHEA_NUM_TX_QP
;
65 static int prop_carrier_state
;
67 module_param(msg_level
, int, 0);
68 module_param(rq1_entries
, int, 0);
69 module_param(rq2_entries
, int, 0);
70 module_param(rq3_entries
, int, 0);
71 module_param(sq_entries
, int, 0);
72 module_param(prop_carrier_state
, int, 0);
73 module_param(use_mcs
, int, 0);
74 module_param(use_lro
, int, 0);
75 module_param(lro_max_aggr
, int, 0);
76 module_param(num_tx_qps
, int, 0);
78 MODULE_PARM_DESC(num_tx_qps
, "Number of TX-QPS");
79 MODULE_PARM_DESC(msg_level
, "msg_level");
80 MODULE_PARM_DESC(prop_carrier_state
, "Propagate carrier state of physical "
81 "port to stack. 1:yes, 0:no. Default = 0 ");
82 MODULE_PARM_DESC(rq3_entries
, "Number of entries for Receive Queue 3 "
83 "[2^x - 1], x = [6..14]. Default = "
84 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3
) ")");
85 MODULE_PARM_DESC(rq2_entries
, "Number of entries for Receive Queue 2 "
86 "[2^x - 1], x = [6..14]. Default = "
87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2
) ")");
88 MODULE_PARM_DESC(rq1_entries
, "Number of entries for Receive Queue 1 "
89 "[2^x - 1], x = [6..14]. Default = "
90 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1
) ")");
91 MODULE_PARM_DESC(sq_entries
, " Number of entries for the Send Queue "
92 "[2^x - 1], x = [6..14]. Default = "
93 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ
) ")");
94 MODULE_PARM_DESC(use_mcs
, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
96 MODULE_PARM_DESC(lro_max_aggr
, " LRO: Max packets to be aggregated. Default = "
97 __MODULE_STRING(EHEA_LRO_MAX_AGGR
));
98 MODULE_PARM_DESC(use_lro
, " Large Receive Offload, 1: enable, 0: disable, "
101 static int port_name_cnt
;
102 static LIST_HEAD(adapter_list
);
103 static unsigned long ehea_driver_flags
;
104 struct work_struct ehea_rereg_mr_task
;
105 static DEFINE_MUTEX(dlpar_mem_lock
);
106 struct ehea_fw_handle_array ehea_fw_handles
;
107 struct ehea_bcmc_reg_array ehea_bcmc_regs
;
110 static int __devinit
ehea_probe_adapter(struct of_device
*dev
,
111 const struct of_device_id
*id
);
113 static int __devexit
ehea_remove(struct of_device
*dev
);
115 static struct of_device_id ehea_device_table
[] = {
118 .compatible
= "IBM,lhea",
122 MODULE_DEVICE_TABLE(of
, ehea_device_table
);
124 static struct of_platform_driver ehea_driver
= {
127 .owner
= THIS_MODULE
,
128 .of_match_table
= ehea_device_table
,
130 .probe
= ehea_probe_adapter
,
131 .remove
= ehea_remove
,
134 void ehea_dump(void *adr
, int len
, char *msg
)
137 unsigned char *deb
= adr
;
138 for (x
= 0; x
< len
; x
+= 16) {
139 printk(DRV_NAME
" %s adr=%p ofs=%04x %016llx %016llx\n", msg
,
140 deb
, x
, *((u64
*)&deb
[0]), *((u64
*)&deb
[8]));
145 void ehea_schedule_port_reset(struct ehea_port
*port
)
147 if (!test_bit(__EHEA_DISABLE_PORT_RESET
, &port
->flags
))
148 schedule_work(&port
->reset_task
);
151 static void ehea_update_firmware_handles(void)
153 struct ehea_fw_handle_entry
*arr
= NULL
;
154 struct ehea_adapter
*adapter
;
155 int num_adapters
= 0;
159 int num_fw_handles
, k
, l
;
161 /* Determine number of handles */
162 mutex_lock(&ehea_fw_handles
.lock
);
164 list_for_each_entry(adapter
, &adapter_list
, list
) {
167 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
168 struct ehea_port
*port
= adapter
->port
[k
];
170 if (!port
|| (port
->state
!= EHEA_PORT_UP
))
174 num_portres
+= port
->num_def_qps
+ port
->num_add_tx_qps
;
178 num_fw_handles
= num_adapters
* EHEA_NUM_ADAPTER_FW_HANDLES
+
179 num_ports
* EHEA_NUM_PORT_FW_HANDLES
+
180 num_portres
* EHEA_NUM_PORTRES_FW_HANDLES
;
182 if (num_fw_handles
) {
183 arr
= kzalloc(num_fw_handles
* sizeof(*arr
), GFP_KERNEL
);
185 goto out
; /* Keep the existing array */
189 list_for_each_entry(adapter
, &adapter_list
, list
) {
190 if (num_adapters
== 0)
193 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
194 struct ehea_port
*port
= adapter
->port
[k
];
196 if (!port
|| (port
->state
!= EHEA_PORT_UP
) ||
201 l
< port
->num_def_qps
+ port
->num_add_tx_qps
;
203 struct ehea_port_res
*pr
= &port
->port_res
[l
];
205 arr
[i
].adh
= adapter
->handle
;
206 arr
[i
++].fwh
= pr
->qp
->fw_handle
;
207 arr
[i
].adh
= adapter
->handle
;
208 arr
[i
++].fwh
= pr
->send_cq
->fw_handle
;
209 arr
[i
].adh
= adapter
->handle
;
210 arr
[i
++].fwh
= pr
->recv_cq
->fw_handle
;
211 arr
[i
].adh
= adapter
->handle
;
212 arr
[i
++].fwh
= pr
->eq
->fw_handle
;
213 arr
[i
].adh
= adapter
->handle
;
214 arr
[i
++].fwh
= pr
->send_mr
.handle
;
215 arr
[i
].adh
= adapter
->handle
;
216 arr
[i
++].fwh
= pr
->recv_mr
.handle
;
218 arr
[i
].adh
= adapter
->handle
;
219 arr
[i
++].fwh
= port
->qp_eq
->fw_handle
;
223 arr
[i
].adh
= adapter
->handle
;
224 arr
[i
++].fwh
= adapter
->neq
->fw_handle
;
226 if (adapter
->mr
.handle
) {
227 arr
[i
].adh
= adapter
->handle
;
228 arr
[i
++].fwh
= adapter
->mr
.handle
;
234 kfree(ehea_fw_handles
.arr
);
235 ehea_fw_handles
.arr
= arr
;
236 ehea_fw_handles
.num_entries
= i
;
238 mutex_unlock(&ehea_fw_handles
.lock
);
241 static void ehea_update_bcmc_registrations(void)
244 struct ehea_bcmc_reg_entry
*arr
= NULL
;
245 struct ehea_adapter
*adapter
;
246 struct ehea_mc_list
*mc_entry
;
247 int num_registrations
= 0;
251 spin_lock_irqsave(&ehea_bcmc_regs
.lock
, flags
);
253 /* Determine number of registrations */
254 list_for_each_entry(adapter
, &adapter_list
, list
)
255 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
256 struct ehea_port
*port
= adapter
->port
[k
];
258 if (!port
|| (port
->state
!= EHEA_PORT_UP
))
261 num_registrations
+= 2; /* Broadcast registrations */
263 list_for_each_entry(mc_entry
, &port
->mc_list
->list
,list
)
264 num_registrations
+= 2;
267 if (num_registrations
) {
268 arr
= kzalloc(num_registrations
* sizeof(*arr
), GFP_ATOMIC
);
270 goto out
; /* Keep the existing array */
274 list_for_each_entry(adapter
, &adapter_list
, list
) {
275 for (k
= 0; k
< EHEA_MAX_PORTS
; k
++) {
276 struct ehea_port
*port
= adapter
->port
[k
];
278 if (!port
|| (port
->state
!= EHEA_PORT_UP
))
281 if (num_registrations
== 0)
284 arr
[i
].adh
= adapter
->handle
;
285 arr
[i
].port_id
= port
->logical_port_id
;
286 arr
[i
].reg_type
= EHEA_BCMC_BROADCAST
|
288 arr
[i
++].macaddr
= port
->mac_addr
;
290 arr
[i
].adh
= adapter
->handle
;
291 arr
[i
].port_id
= port
->logical_port_id
;
292 arr
[i
].reg_type
= EHEA_BCMC_BROADCAST
|
293 EHEA_BCMC_VLANID_ALL
;
294 arr
[i
++].macaddr
= port
->mac_addr
;
295 num_registrations
-= 2;
297 list_for_each_entry(mc_entry
,
298 &port
->mc_list
->list
, list
) {
299 if (num_registrations
== 0)
302 arr
[i
].adh
= adapter
->handle
;
303 arr
[i
].port_id
= port
->logical_port_id
;
304 arr
[i
].reg_type
= EHEA_BCMC_SCOPE_ALL
|
305 EHEA_BCMC_MULTICAST
|
307 arr
[i
++].macaddr
= mc_entry
->macaddr
;
309 arr
[i
].adh
= adapter
->handle
;
310 arr
[i
].port_id
= port
->logical_port_id
;
311 arr
[i
].reg_type
= EHEA_BCMC_SCOPE_ALL
|
312 EHEA_BCMC_MULTICAST
|
313 EHEA_BCMC_VLANID_ALL
;
314 arr
[i
++].macaddr
= mc_entry
->macaddr
;
315 num_registrations
-= 2;
321 kfree(ehea_bcmc_regs
.arr
);
322 ehea_bcmc_regs
.arr
= arr
;
323 ehea_bcmc_regs
.num_entries
= i
;
325 spin_unlock_irqrestore(&ehea_bcmc_regs
.lock
, flags
);
328 static struct net_device_stats
*ehea_get_stats(struct net_device
*dev
)
330 struct ehea_port
*port
= netdev_priv(dev
);
331 struct net_device_stats
*stats
= &port
->stats
;
332 struct hcp_ehea_port_cb2
*cb2
;
333 u64 hret
, rx_packets
, tx_packets
;
336 memset(stats
, 0, sizeof(*stats
));
338 cb2
= (void *)get_zeroed_page(GFP_KERNEL
);
340 ehea_error("no mem for cb2");
344 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
345 port
->logical_port_id
,
346 H_PORT_CB2
, H_PORT_CB2_ALL
, cb2
);
347 if (hret
!= H_SUCCESS
) {
348 ehea_error("query_ehea_port failed");
352 if (netif_msg_hw(port
))
353 ehea_dump(cb2
, sizeof(*cb2
), "net_device_stats");
356 for (i
= 0; i
< port
->num_def_qps
; i
++)
357 rx_packets
+= port
->port_res
[i
].rx_packets
;
360 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
361 tx_packets
+= port
->port_res
[i
].tx_packets
;
363 stats
->tx_packets
= tx_packets
;
364 stats
->multicast
= cb2
->rxmcp
;
365 stats
->rx_errors
= cb2
->rxuerr
;
366 stats
->rx_bytes
= cb2
->rxo
;
367 stats
->tx_bytes
= cb2
->txo
;
368 stats
->rx_packets
= rx_packets
;
371 free_page((unsigned long)cb2
);
376 static void ehea_refill_rq1(struct ehea_port_res
*pr
, int index
, int nr_of_wqes
)
378 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
379 struct net_device
*dev
= pr
->port
->netdev
;
380 int max_index_mask
= pr
->rq1_skba
.len
- 1;
381 int fill_wqes
= pr
->rq1_skba
.os_skbs
+ nr_of_wqes
;
385 pr
->rq1_skba
.os_skbs
= 0;
387 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
389 pr
->rq1_skba
.index
= index
;
390 pr
->rq1_skba
.os_skbs
= fill_wqes
;
394 for (i
= 0; i
< fill_wqes
; i
++) {
395 if (!skb_arr_rq1
[index
]) {
396 skb_arr_rq1
[index
] = netdev_alloc_skb(dev
,
398 if (!skb_arr_rq1
[index
]) {
399 pr
->rq1_skba
.os_skbs
= fill_wqes
- i
;
404 index
&= max_index_mask
;
412 ehea_update_rq1a(pr
->qp
, adder
);
415 static void ehea_init_fill_rq1(struct ehea_port_res
*pr
, int nr_rq1a
)
417 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
418 struct net_device
*dev
= pr
->port
->netdev
;
421 for (i
= 0; i
< pr
->rq1_skba
.len
; i
++) {
422 skb_arr_rq1
[i
] = netdev_alloc_skb(dev
, EHEA_L_PKT_SIZE
);
427 ehea_update_rq1a(pr
->qp
, nr_rq1a
);
430 static int ehea_refill_rq_def(struct ehea_port_res
*pr
,
431 struct ehea_q_skb_arr
*q_skba
, int rq_nr
,
432 int num_wqes
, int wqe_type
, int packet_size
)
434 struct net_device
*dev
= pr
->port
->netdev
;
435 struct ehea_qp
*qp
= pr
->qp
;
436 struct sk_buff
**skb_arr
= q_skba
->arr
;
437 struct ehea_rwqe
*rwqe
;
438 int i
, index
, max_index_mask
, fill_wqes
;
442 fill_wqes
= q_skba
->os_skbs
+ num_wqes
;
445 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
446 q_skba
->os_skbs
= fill_wqes
;
450 index
= q_skba
->index
;
451 max_index_mask
= q_skba
->len
- 1;
452 for (i
= 0; i
< fill_wqes
; i
++) {
456 skb
= netdev_alloc_skb_ip_align(dev
, packet_size
);
458 q_skba
->os_skbs
= fill_wqes
- i
;
459 if (q_skba
->os_skbs
== q_skba
->len
- 2) {
460 ehea_info("%s: rq%i ran dry - no mem for skb",
461 pr
->port
->netdev
->name
, rq_nr
);
467 skb_arr
[index
] = skb
;
468 tmp_addr
= ehea_map_vaddr(skb
->data
);
469 if (tmp_addr
== -1) {
471 q_skba
->os_skbs
= fill_wqes
- i
;
476 rwqe
= ehea_get_next_rwqe(qp
, rq_nr
);
477 rwqe
->wr_id
= EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, wqe_type
)
478 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX
, index
);
479 rwqe
->sg_list
[0].l_key
= pr
->recv_mr
.lkey
;
480 rwqe
->sg_list
[0].vaddr
= tmp_addr
;
481 rwqe
->sg_list
[0].len
= packet_size
;
482 rwqe
->data_segments
= 1;
485 index
&= max_index_mask
;
489 q_skba
->index
= index
;
496 ehea_update_rq2a(pr
->qp
, adder
);
498 ehea_update_rq3a(pr
->qp
, adder
);
504 static int ehea_refill_rq2(struct ehea_port_res
*pr
, int nr_of_wqes
)
506 return ehea_refill_rq_def(pr
, &pr
->rq2_skba
, 2,
507 nr_of_wqes
, EHEA_RWQE2_TYPE
,
512 static int ehea_refill_rq3(struct ehea_port_res
*pr
, int nr_of_wqes
)
514 return ehea_refill_rq_def(pr
, &pr
->rq3_skba
, 3,
515 nr_of_wqes
, EHEA_RWQE3_TYPE
,
516 EHEA_MAX_PACKET_SIZE
);
519 static inline int ehea_check_cqe(struct ehea_cqe
*cqe
, int *rq_num
)
521 *rq_num
= (cqe
->type
& EHEA_CQE_TYPE_RQ
) >> 5;
522 if ((cqe
->status
& EHEA_CQE_STAT_ERR_MASK
) == 0)
524 if (((cqe
->status
& EHEA_CQE_STAT_ERR_TCP
) != 0) &&
525 (cqe
->header_length
== 0))
530 static inline void ehea_fill_skb(struct net_device
*dev
,
531 struct sk_buff
*skb
, struct ehea_cqe
*cqe
)
533 int length
= cqe
->num_bytes_transfered
- 4; /*remove CRC */
535 skb_put(skb
, length
);
536 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
537 skb
->protocol
= eth_type_trans(skb
, dev
);
540 static inline struct sk_buff
*get_skb_by_index(struct sk_buff
**skb_array
,
542 struct ehea_cqe
*cqe
)
544 int skb_index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, cqe
->wr_id
);
555 prefetchw(pref
+ EHEA_CACHE_LINE
);
557 pref
= (skb_array
[x
]->data
);
559 prefetch(pref
+ EHEA_CACHE_LINE
);
560 prefetch(pref
+ EHEA_CACHE_LINE
* 2);
561 prefetch(pref
+ EHEA_CACHE_LINE
* 3);
564 skb
= skb_array
[skb_index
];
565 skb_array
[skb_index
] = NULL
;
569 static inline struct sk_buff
*get_skb_by_index_ll(struct sk_buff
**skb_array
,
570 int arr_len
, int wqe_index
)
582 prefetchw(pref
+ EHEA_CACHE_LINE
);
584 pref
= (skb_array
[x
]->data
);
586 prefetchw(pref
+ EHEA_CACHE_LINE
);
589 skb
= skb_array
[wqe_index
];
590 skb_array
[wqe_index
] = NULL
;
594 static int ehea_treat_poll_error(struct ehea_port_res
*pr
, int rq
,
595 struct ehea_cqe
*cqe
, int *processed_rq2
,
600 if (cqe
->status
& EHEA_CQE_STAT_ERR_TCP
)
601 pr
->p_stats
.err_tcp_cksum
++;
602 if (cqe
->status
& EHEA_CQE_STAT_ERR_IP
)
603 pr
->p_stats
.err_ip_cksum
++;
604 if (cqe
->status
& EHEA_CQE_STAT_ERR_CRC
)
605 pr
->p_stats
.err_frame_crc
++;
609 skb
= get_skb_by_index(pr
->rq2_skba
.arr
, pr
->rq2_skba
.len
, cqe
);
611 } else if (rq
== 3) {
613 skb
= get_skb_by_index(pr
->rq3_skba
.arr
, pr
->rq3_skba
.len
, cqe
);
617 if (cqe
->status
& EHEA_CQE_STAT_FAT_ERR_MASK
) {
618 if (netif_msg_rx_err(pr
->port
)) {
619 ehea_error("Critical receive error for QP %d. "
620 "Resetting port.", pr
->qp
->init_attr
.qp_nr
);
621 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
623 ehea_schedule_port_reset(pr
->port
);
630 static int get_skb_hdr(struct sk_buff
*skb
, void **iphdr
,
631 void **tcph
, u64
*hdr_flags
, void *priv
)
633 struct ehea_cqe
*cqe
= priv
;
637 /* non tcp/udp packets */
638 if (!cqe
->header_length
)
642 skb_reset_network_header(skb
);
644 if (iph
->protocol
!= IPPROTO_TCP
)
647 ip_len
= ip_hdrlen(skb
);
648 skb_set_transport_header(skb
, ip_len
);
649 *tcph
= tcp_hdr(skb
);
651 /* check if ip header and tcp header are complete */
652 if (ntohs(iph
->tot_len
) < ip_len
+ tcp_hdrlen(skb
))
655 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
661 static void ehea_proc_skb(struct ehea_port_res
*pr
, struct ehea_cqe
*cqe
,
664 int vlan_extracted
= ((cqe
->status
& EHEA_CQE_VLAN_TAG_XTRACT
) &&
669 lro_vlan_hwaccel_receive_skb(&pr
->lro_mgr
, skb
,
674 lro_receive_skb(&pr
->lro_mgr
, skb
, cqe
);
677 vlan_hwaccel_receive_skb(skb
, pr
->port
->vgrp
,
680 netif_receive_skb(skb
);
684 static int ehea_proc_rwqes(struct net_device
*dev
,
685 struct ehea_port_res
*pr
,
688 struct ehea_port
*port
= pr
->port
;
689 struct ehea_qp
*qp
= pr
->qp
;
690 struct ehea_cqe
*cqe
;
692 struct sk_buff
**skb_arr_rq1
= pr
->rq1_skba
.arr
;
693 struct sk_buff
**skb_arr_rq2
= pr
->rq2_skba
.arr
;
694 struct sk_buff
**skb_arr_rq3
= pr
->rq3_skba
.arr
;
695 int skb_arr_rq1_len
= pr
->rq1_skba
.len
;
696 int skb_arr_rq2_len
= pr
->rq2_skba
.len
;
697 int skb_arr_rq3_len
= pr
->rq3_skba
.len
;
698 int processed
, processed_rq1
, processed_rq2
, processed_rq3
;
699 int wqe_index
, last_wqe_index
, rq
, port_reset
;
701 processed
= processed_rq1
= processed_rq2
= processed_rq3
= 0;
704 cqe
= ehea_poll_rq1(qp
, &wqe_index
);
705 while ((processed
< budget
) && cqe
) {
709 if (netif_msg_rx_status(port
))
710 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
712 last_wqe_index
= wqe_index
;
714 if (!ehea_check_cqe(cqe
, &rq
)) {
717 skb
= get_skb_by_index_ll(skb_arr_rq1
,
720 if (unlikely(!skb
)) {
721 if (netif_msg_rx_err(port
))
722 ehea_error("LL rq1: skb=NULL");
724 skb
= netdev_alloc_skb(dev
,
729 skb_copy_to_linear_data(skb
, ((char *)cqe
) + 64,
730 cqe
->num_bytes_transfered
- 4);
731 ehea_fill_skb(dev
, skb
, cqe
);
732 } else if (rq
== 2) {
734 skb
= get_skb_by_index(skb_arr_rq2
,
735 skb_arr_rq2_len
, cqe
);
736 if (unlikely(!skb
)) {
737 if (netif_msg_rx_err(port
))
738 ehea_error("rq2: skb=NULL");
741 ehea_fill_skb(dev
, skb
, cqe
);
745 skb
= get_skb_by_index(skb_arr_rq3
,
746 skb_arr_rq3_len
, cqe
);
747 if (unlikely(!skb
)) {
748 if (netif_msg_rx_err(port
))
749 ehea_error("rq3: skb=NULL");
752 ehea_fill_skb(dev
, skb
, cqe
);
756 ehea_proc_skb(pr
, cqe
, skb
);
758 pr
->p_stats
.poll_receive_errors
++;
759 port_reset
= ehea_treat_poll_error(pr
, rq
, cqe
,
765 cqe
= ehea_poll_rq1(qp
, &wqe_index
);
768 lro_flush_all(&pr
->lro_mgr
);
770 pr
->rx_packets
+= processed
;
772 ehea_refill_rq1(pr
, last_wqe_index
, processed_rq1
);
773 ehea_refill_rq2(pr
, processed_rq2
);
774 ehea_refill_rq3(pr
, processed_rq3
);
779 static struct ehea_cqe
*ehea_proc_cqes(struct ehea_port_res
*pr
, int my_quota
)
782 struct ehea_cq
*send_cq
= pr
->send_cq
;
783 struct ehea_cqe
*cqe
;
784 int quota
= my_quota
;
790 cqe
= ehea_poll_cq(send_cq
);
791 while (cqe
&& (quota
> 0)) {
792 ehea_inc_cq(send_cq
);
796 if (cqe
->status
& EHEA_CQE_STAT_ERR_MASK
) {
797 ehea_error("Bad send completion status=0x%04X",
800 if (netif_msg_tx_err(pr
->port
))
801 ehea_dump(cqe
, sizeof(*cqe
), "Send CQE");
803 if (cqe
->status
& EHEA_CQE_STAT_RESET_MASK
) {
804 ehea_error("Resetting port");
805 ehea_schedule_port_reset(pr
->port
);
810 if (netif_msg_tx_done(pr
->port
))
811 ehea_dump(cqe
, sizeof(*cqe
), "CQE");
813 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE
, cqe
->wr_id
)
814 == EHEA_SWQE2_TYPE
)) {
816 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, cqe
->wr_id
);
817 skb
= pr
->sq_skba
.arr
[index
];
819 pr
->sq_skba
.arr
[index
] = NULL
;
822 swqe_av
+= EHEA_BMASK_GET(EHEA_WR_ID_REFILL
, cqe
->wr_id
);
825 cqe
= ehea_poll_cq(send_cq
);
828 ehea_update_feca(send_cq
, cqe_counter
);
829 atomic_add(swqe_av
, &pr
->swqe_avail
);
831 spin_lock_irqsave(&pr
->netif_queue
, flags
);
833 if (pr
->queue_stopped
&& (atomic_read(&pr
->swqe_avail
)
834 >= pr
->swqe_refill_th
)) {
835 netif_wake_queue(pr
->port
->netdev
);
836 pr
->queue_stopped
= 0;
838 spin_unlock_irqrestore(&pr
->netif_queue
, flags
);
843 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
844 #define EHEA_POLL_MAX_CQES 65535
846 static int ehea_poll(struct napi_struct
*napi
, int budget
)
848 struct ehea_port_res
*pr
= container_of(napi
, struct ehea_port_res
,
850 struct net_device
*dev
= pr
->port
->netdev
;
851 struct ehea_cqe
*cqe
;
852 struct ehea_cqe
*cqe_skb
= NULL
;
853 int force_irq
, wqe_index
;
856 force_irq
= (pr
->poll_counter
> EHEA_NAPI_POLL_NUM_BEFORE_IRQ
);
857 cqe_skb
= ehea_proc_cqes(pr
, EHEA_POLL_MAX_CQES
);
860 rx
+= ehea_proc_rwqes(dev
, pr
, budget
- rx
);
862 while ((rx
!= budget
) || force_irq
) {
863 pr
->poll_counter
= 0;
866 ehea_reset_cq_ep(pr
->recv_cq
);
867 ehea_reset_cq_ep(pr
->send_cq
);
868 ehea_reset_cq_n1(pr
->recv_cq
);
869 ehea_reset_cq_n1(pr
->send_cq
);
871 cqe
= ehea_poll_rq1(pr
->qp
, &wqe_index
);
872 cqe_skb
= ehea_poll_cq(pr
->send_cq
);
874 if (!cqe
&& !cqe_skb
)
877 if (!napi_reschedule(napi
))
880 cqe_skb
= ehea_proc_cqes(pr
, EHEA_POLL_MAX_CQES
);
881 rx
+= ehea_proc_rwqes(dev
, pr
, budget
- rx
);
888 #ifdef CONFIG_NET_POLL_CONTROLLER
889 static void ehea_netpoll(struct net_device
*dev
)
891 struct ehea_port
*port
= netdev_priv(dev
);
894 for (i
= 0; i
< port
->num_def_qps
; i
++)
895 napi_schedule(&port
->port_res
[i
].napi
);
899 static irqreturn_t
ehea_recv_irq_handler(int irq
, void *param
)
901 struct ehea_port_res
*pr
= param
;
903 napi_schedule(&pr
->napi
);
908 static irqreturn_t
ehea_qp_aff_irq_handler(int irq
, void *param
)
910 struct ehea_port
*port
= param
;
911 struct ehea_eqe
*eqe
;
914 u64 resource_type
, aer
, aerr
;
917 eqe
= ehea_poll_eq(port
->qp_eq
);
920 qp_token
= EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN
, eqe
->entry
);
921 ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
922 eqe
->entry
, qp_token
);
924 qp
= port
->port_res
[qp_token
].qp
;
926 resource_type
= ehea_error_data(port
->adapter
, qp
->fw_handle
,
929 if (resource_type
== EHEA_AER_RESTYPE_QP
) {
930 if ((aer
& EHEA_AER_RESET_MASK
) ||
931 (aerr
& EHEA_AERR_RESET_MASK
))
934 reset_port
= 1; /* Reset in case of CQ or EQ error */
936 eqe
= ehea_poll_eq(port
->qp_eq
);
940 ehea_error("Resetting port");
941 ehea_schedule_port_reset(port
);
947 static struct ehea_port
*ehea_get_port(struct ehea_adapter
*adapter
,
952 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
953 if (adapter
->port
[i
])
954 if (adapter
->port
[i
]->logical_port_id
== logical_port
)
955 return adapter
->port
[i
];
959 int ehea_sense_port_attr(struct ehea_port
*port
)
963 struct hcp_ehea_port_cb0
*cb0
;
965 /* may be called via ehea_neq_tasklet() */
966 cb0
= (void *)get_zeroed_page(GFP_ATOMIC
);
968 ehea_error("no mem for cb0");
973 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
974 port
->logical_port_id
, H_PORT_CB0
,
975 EHEA_BMASK_SET(H_PORT_CB0_ALL
, 0xFFFF),
977 if (hret
!= H_SUCCESS
) {
983 port
->mac_addr
= cb0
->port_mac_addr
<< 16;
985 if (!is_valid_ether_addr((u8
*)&port
->mac_addr
)) {
986 ret
= -EADDRNOTAVAIL
;
991 switch (cb0
->port_speed
) {
993 port
->port_speed
= EHEA_SPEED_10M
;
994 port
->full_duplex
= 0;
997 port
->port_speed
= EHEA_SPEED_10M
;
998 port
->full_duplex
= 1;
1000 case H_SPEED_100M_H
:
1001 port
->port_speed
= EHEA_SPEED_100M
;
1002 port
->full_duplex
= 0;
1004 case H_SPEED_100M_F
:
1005 port
->port_speed
= EHEA_SPEED_100M
;
1006 port
->full_duplex
= 1;
1009 port
->port_speed
= EHEA_SPEED_1G
;
1010 port
->full_duplex
= 1;
1013 port
->port_speed
= EHEA_SPEED_10G
;
1014 port
->full_duplex
= 1;
1017 port
->port_speed
= 0;
1018 port
->full_duplex
= 0;
1023 port
->num_mcs
= cb0
->num_default_qps
;
1025 /* Number of default QPs */
1027 port
->num_def_qps
= cb0
->num_default_qps
;
1029 port
->num_def_qps
= 1;
1031 if (!port
->num_def_qps
) {
1036 port
->num_tx_qps
= num_tx_qps
;
1038 if (port
->num_def_qps
>= port
->num_tx_qps
)
1039 port
->num_add_tx_qps
= 0;
1041 port
->num_add_tx_qps
= port
->num_tx_qps
- port
->num_def_qps
;
1045 if (ret
|| netif_msg_probe(port
))
1046 ehea_dump(cb0
, sizeof(*cb0
), "ehea_sense_port_attr");
1047 free_page((unsigned long)cb0
);
1052 int ehea_set_portspeed(struct ehea_port
*port
, u32 port_speed
)
1054 struct hcp_ehea_port_cb4
*cb4
;
1058 cb4
= (void *)get_zeroed_page(GFP_KERNEL
);
1060 ehea_error("no mem for cb4");
1065 cb4
->port_speed
= port_speed
;
1067 netif_carrier_off(port
->netdev
);
1069 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1070 port
->logical_port_id
,
1071 H_PORT_CB4
, H_PORT_CB4_SPEED
, cb4
);
1072 if (hret
== H_SUCCESS
) {
1073 port
->autoneg
= port_speed
== EHEA_SPEED_AUTONEG
? 1 : 0;
1075 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
1076 port
->logical_port_id
,
1077 H_PORT_CB4
, H_PORT_CB4_SPEED
,
1079 if (hret
== H_SUCCESS
) {
1080 switch (cb4
->port_speed
) {
1082 port
->port_speed
= EHEA_SPEED_10M
;
1083 port
->full_duplex
= 0;
1086 port
->port_speed
= EHEA_SPEED_10M
;
1087 port
->full_duplex
= 1;
1089 case H_SPEED_100M_H
:
1090 port
->port_speed
= EHEA_SPEED_100M
;
1091 port
->full_duplex
= 0;
1093 case H_SPEED_100M_F
:
1094 port
->port_speed
= EHEA_SPEED_100M
;
1095 port
->full_duplex
= 1;
1098 port
->port_speed
= EHEA_SPEED_1G
;
1099 port
->full_duplex
= 1;
1102 port
->port_speed
= EHEA_SPEED_10G
;
1103 port
->full_duplex
= 1;
1106 port
->port_speed
= 0;
1107 port
->full_duplex
= 0;
1111 ehea_error("Failed sensing port speed");
1115 if (hret
== H_AUTHORITY
) {
1116 ehea_info("Hypervisor denied setting port speed");
1120 ehea_error("Failed setting port speed");
1123 if (!prop_carrier_state
|| (port
->phy_link
== EHEA_PHY_LINK_UP
))
1124 netif_carrier_on(port
->netdev
);
1126 free_page((unsigned long)cb4
);
1131 static void ehea_parse_eqe(struct ehea_adapter
*adapter
, u64 eqe
)
1136 struct ehea_port
*port
;
1138 ec
= EHEA_BMASK_GET(NEQE_EVENT_CODE
, eqe
);
1139 portnum
= EHEA_BMASK_GET(NEQE_PORTNUM
, eqe
);
1140 port
= ehea_get_port(adapter
, portnum
);
1143 case EHEA_EC_PORTSTATE_CHG
: /* port state change */
1146 ehea_error("unknown portnum %x", portnum
);
1150 if (EHEA_BMASK_GET(NEQE_PORT_UP
, eqe
)) {
1151 if (!netif_carrier_ok(port
->netdev
)) {
1152 ret
= ehea_sense_port_attr(port
);
1154 ehea_error("failed resensing port "
1159 if (netif_msg_link(port
))
1160 ehea_info("%s: Logical port up: %dMbps "
1164 port
->full_duplex
==
1165 1 ? "Full" : "Half");
1167 netif_carrier_on(port
->netdev
);
1168 netif_wake_queue(port
->netdev
);
1171 if (netif_carrier_ok(port
->netdev
)) {
1172 if (netif_msg_link(port
))
1173 ehea_info("%s: Logical port down",
1174 port
->netdev
->name
);
1175 netif_carrier_off(port
->netdev
);
1176 netif_stop_queue(port
->netdev
);
1179 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP
, eqe
)) {
1180 port
->phy_link
= EHEA_PHY_LINK_UP
;
1181 if (netif_msg_link(port
))
1182 ehea_info("%s: Physical port up",
1183 port
->netdev
->name
);
1184 if (prop_carrier_state
)
1185 netif_carrier_on(port
->netdev
);
1187 port
->phy_link
= EHEA_PHY_LINK_DOWN
;
1188 if (netif_msg_link(port
))
1189 ehea_info("%s: Physical port down",
1190 port
->netdev
->name
);
1191 if (prop_carrier_state
)
1192 netif_carrier_off(port
->netdev
);
1195 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY
, eqe
))
1196 ehea_info("External switch port is primary port");
1198 ehea_info("External switch port is backup port");
1201 case EHEA_EC_ADAPTER_MALFUNC
:
1202 ehea_error("Adapter malfunction");
1204 case EHEA_EC_PORT_MALFUNC
:
1205 ehea_info("Port malfunction: Device: %s", port
->netdev
->name
);
1206 netif_carrier_off(port
->netdev
);
1207 netif_stop_queue(port
->netdev
);
1210 ehea_error("unknown event code %x, eqe=0x%llX", ec
, eqe
);
1215 static void ehea_neq_tasklet(unsigned long data
)
1217 struct ehea_adapter
*adapter
= (struct ehea_adapter
*)data
;
1218 struct ehea_eqe
*eqe
;
1221 eqe
= ehea_poll_eq(adapter
->neq
);
1222 ehea_debug("eqe=%p", eqe
);
1225 ehea_debug("*eqe=%lx", eqe
->entry
);
1226 ehea_parse_eqe(adapter
, eqe
->entry
);
1227 eqe
= ehea_poll_eq(adapter
->neq
);
1228 ehea_debug("next eqe=%p", eqe
);
1231 event_mask
= EHEA_BMASK_SET(NELR_PORTSTATE_CHG
, 1)
1232 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC
, 1)
1233 | EHEA_BMASK_SET(NELR_PORT_MALFUNC
, 1);
1235 ehea_h_reset_events(adapter
->handle
,
1236 adapter
->neq
->fw_handle
, event_mask
);
1239 static irqreturn_t
ehea_interrupt_neq(int irq
, void *param
)
1241 struct ehea_adapter
*adapter
= param
;
1242 tasklet_hi_schedule(&adapter
->neq_tasklet
);
1247 static int ehea_fill_port_res(struct ehea_port_res
*pr
)
1250 struct ehea_qp_init_attr
*init_attr
= &pr
->qp
->init_attr
;
1252 ehea_init_fill_rq1(pr
, init_attr
->act_nr_rwqes_rq1
1253 - init_attr
->act_nr_rwqes_rq2
1254 - init_attr
->act_nr_rwqes_rq3
- 1);
1256 ret
= ehea_refill_rq2(pr
, init_attr
->act_nr_rwqes_rq2
- 1);
1258 ret
|= ehea_refill_rq3(pr
, init_attr
->act_nr_rwqes_rq3
- 1);
1263 static int ehea_reg_interrupts(struct net_device
*dev
)
1265 struct ehea_port
*port
= netdev_priv(dev
);
1266 struct ehea_port_res
*pr
;
1270 snprintf(port
->int_aff_name
, EHEA_IRQ_NAME_SIZE
- 1, "%s-aff",
1273 ret
= ibmebus_request_irq(port
->qp_eq
->attr
.ist1
,
1274 ehea_qp_aff_irq_handler
,
1275 IRQF_DISABLED
, port
->int_aff_name
, port
);
1277 ehea_error("failed registering irq for qp_aff_irq_handler:"
1278 "ist=%X", port
->qp_eq
->attr
.ist1
);
1282 if (netif_msg_ifup(port
))
1283 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1284 "registered", port
->qp_eq
->attr
.ist1
);
1287 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
1288 pr
= &port
->port_res
[i
];
1289 snprintf(pr
->int_send_name
, EHEA_IRQ_NAME_SIZE
- 1,
1290 "%s-queue%d", dev
->name
, i
);
1291 ret
= ibmebus_request_irq(pr
->eq
->attr
.ist1
,
1292 ehea_recv_irq_handler
,
1293 IRQF_DISABLED
, pr
->int_send_name
,
1296 ehea_error("failed registering irq for ehea_queue "
1297 "port_res_nr:%d, ist=%X", i
,
1301 if (netif_msg_ifup(port
))
1302 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1303 "%d registered", pr
->eq
->attr
.ist1
, i
);
1311 u32 ist
= port
->port_res
[i
].eq
->attr
.ist1
;
1312 ibmebus_free_irq(ist
, &port
->port_res
[i
]);
1316 ibmebus_free_irq(port
->qp_eq
->attr
.ist1
, port
);
1317 i
= port
->num_def_qps
;
1323 static void ehea_free_interrupts(struct net_device
*dev
)
1325 struct ehea_port
*port
= netdev_priv(dev
);
1326 struct ehea_port_res
*pr
;
1331 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
1332 pr
= &port
->port_res
[i
];
1333 ibmebus_free_irq(pr
->eq
->attr
.ist1
, pr
);
1334 if (netif_msg_intr(port
))
1335 ehea_info("free send irq for res %d with handle 0x%X",
1336 i
, pr
->eq
->attr
.ist1
);
1339 /* associated events */
1340 ibmebus_free_irq(port
->qp_eq
->attr
.ist1
, port
);
1341 if (netif_msg_intr(port
))
1342 ehea_info("associated event interrupt for handle 0x%X freed",
1343 port
->qp_eq
->attr
.ist1
);
1346 static int ehea_configure_port(struct ehea_port
*port
)
1350 struct hcp_ehea_port_cb0
*cb0
;
1353 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
1357 cb0
->port_rc
= EHEA_BMASK_SET(PXLY_RC_VALID
, 1)
1358 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM
, 1)
1359 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM
, 1)
1360 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT
, 1)
1361 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER
,
1362 PXLY_RC_VLAN_FILTER
)
1363 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME
, 1);
1365 for (i
= 0; i
< port
->num_mcs
; i
++)
1367 cb0
->default_qpn_arr
[i
] =
1368 port
->port_res
[i
].qp
->init_attr
.qp_nr
;
1370 cb0
->default_qpn_arr
[i
] =
1371 port
->port_res
[0].qp
->init_attr
.qp_nr
;
1373 if (netif_msg_ifup(port
))
1374 ehea_dump(cb0
, sizeof(*cb0
), "ehea_configure_port");
1376 mask
= EHEA_BMASK_SET(H_PORT_CB0_PRC
, 1)
1377 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY
, 1);
1379 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1380 port
->logical_port_id
,
1381 H_PORT_CB0
, mask
, cb0
);
1383 if (hret
!= H_SUCCESS
)
1389 free_page((unsigned long)cb0
);
1394 int ehea_gen_smrs(struct ehea_port_res
*pr
)
1397 struct ehea_adapter
*adapter
= pr
->port
->adapter
;
1399 ret
= ehea_gen_smr(adapter
, &adapter
->mr
, &pr
->send_mr
);
1403 ret
= ehea_gen_smr(adapter
, &adapter
->mr
, &pr
->recv_mr
);
1410 ehea_rem_mr(&pr
->send_mr
);
1412 ehea_error("Generating SMRS failed\n");
1416 int ehea_rem_smrs(struct ehea_port_res
*pr
)
1418 if ((ehea_rem_mr(&pr
->send_mr
)) ||
1419 (ehea_rem_mr(&pr
->recv_mr
)))
1425 static int ehea_init_q_skba(struct ehea_q_skb_arr
*q_skba
, int max_q_entries
)
1427 int arr_size
= sizeof(void *) * max_q_entries
;
1429 q_skba
->arr
= vmalloc(arr_size
);
1433 memset(q_skba
->arr
, 0, arr_size
);
1435 q_skba
->len
= max_q_entries
;
1437 q_skba
->os_skbs
= 0;
1442 static int ehea_init_port_res(struct ehea_port
*port
, struct ehea_port_res
*pr
,
1443 struct port_res_cfg
*pr_cfg
, int queue_token
)
1445 struct ehea_adapter
*adapter
= port
->adapter
;
1446 enum ehea_eq_type eq_type
= EHEA_EQ
;
1447 struct ehea_qp_init_attr
*init_attr
= NULL
;
1450 memset(pr
, 0, sizeof(struct ehea_port_res
));
1453 spin_lock_init(&pr
->xmit_lock
);
1454 spin_lock_init(&pr
->netif_queue
);
1456 pr
->eq
= ehea_create_eq(adapter
, eq_type
, EHEA_MAX_ENTRIES_EQ
, 0);
1458 ehea_error("create_eq failed (eq)");
1462 pr
->recv_cq
= ehea_create_cq(adapter
, pr_cfg
->max_entries_rcq
,
1464 port
->logical_port_id
);
1466 ehea_error("create_cq failed (cq_recv)");
1470 pr
->send_cq
= ehea_create_cq(adapter
, pr_cfg
->max_entries_scq
,
1472 port
->logical_port_id
);
1474 ehea_error("create_cq failed (cq_send)");
1478 if (netif_msg_ifup(port
))
1479 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1480 pr
->send_cq
->attr
.act_nr_of_cqes
,
1481 pr
->recv_cq
->attr
.act_nr_of_cqes
);
1483 init_attr
= kzalloc(sizeof(*init_attr
), GFP_KERNEL
);
1486 ehea_error("no mem for ehea_qp_init_attr");
1490 init_attr
->low_lat_rq1
= 1;
1491 init_attr
->signalingtype
= 1; /* generate CQE if specified in WQE */
1492 init_attr
->rq_count
= 3;
1493 init_attr
->qp_token
= queue_token
;
1494 init_attr
->max_nr_send_wqes
= pr_cfg
->max_entries_sq
;
1495 init_attr
->max_nr_rwqes_rq1
= pr_cfg
->max_entries_rq1
;
1496 init_attr
->max_nr_rwqes_rq2
= pr_cfg
->max_entries_rq2
;
1497 init_attr
->max_nr_rwqes_rq3
= pr_cfg
->max_entries_rq3
;
1498 init_attr
->wqe_size_enc_sq
= EHEA_SG_SQ
;
1499 init_attr
->wqe_size_enc_rq1
= EHEA_SG_RQ1
;
1500 init_attr
->wqe_size_enc_rq2
= EHEA_SG_RQ2
;
1501 init_attr
->wqe_size_enc_rq3
= EHEA_SG_RQ3
;
1502 init_attr
->rq2_threshold
= EHEA_RQ2_THRESHOLD
;
1503 init_attr
->rq3_threshold
= EHEA_RQ3_THRESHOLD
;
1504 init_attr
->port_nr
= port
->logical_port_id
;
1505 init_attr
->send_cq_handle
= pr
->send_cq
->fw_handle
;
1506 init_attr
->recv_cq_handle
= pr
->recv_cq
->fw_handle
;
1507 init_attr
->aff_eq_handle
= port
->qp_eq
->fw_handle
;
1509 pr
->qp
= ehea_create_qp(adapter
, adapter
->pd
, init_attr
);
1511 ehea_error("create_qp failed");
1516 if (netif_msg_ifup(port
))
1517 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1518 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr
->qp_nr
,
1519 init_attr
->act_nr_send_wqes
,
1520 init_attr
->act_nr_rwqes_rq1
,
1521 init_attr
->act_nr_rwqes_rq2
,
1522 init_attr
->act_nr_rwqes_rq3
);
1524 pr
->sq_skba_size
= init_attr
->act_nr_send_wqes
+ 1;
1526 ret
= ehea_init_q_skba(&pr
->sq_skba
, pr
->sq_skba_size
);
1527 ret
|= ehea_init_q_skba(&pr
->rq1_skba
, init_attr
->act_nr_rwqes_rq1
+ 1);
1528 ret
|= ehea_init_q_skba(&pr
->rq2_skba
, init_attr
->act_nr_rwqes_rq2
+ 1);
1529 ret
|= ehea_init_q_skba(&pr
->rq3_skba
, init_attr
->act_nr_rwqes_rq3
+ 1);
1533 pr
->swqe_refill_th
= init_attr
->act_nr_send_wqes
/ 10;
1534 if (ehea_gen_smrs(pr
) != 0) {
1539 atomic_set(&pr
->swqe_avail
, init_attr
->act_nr_send_wqes
- 1);
1543 netif_napi_add(pr
->port
->netdev
, &pr
->napi
, ehea_poll
, 64);
1545 pr
->lro_mgr
.max_aggr
= pr
->port
->lro_max_aggr
;
1546 pr
->lro_mgr
.max_desc
= MAX_LRO_DESCRIPTORS
;
1547 pr
->lro_mgr
.lro_arr
= pr
->lro_desc
;
1548 pr
->lro_mgr
.get_skb_header
= get_skb_hdr
;
1549 pr
->lro_mgr
.features
= LRO_F_NAPI
| LRO_F_EXTRACT_VLAN_ID
;
1550 pr
->lro_mgr
.dev
= port
->netdev
;
1551 pr
->lro_mgr
.ip_summed
= CHECKSUM_UNNECESSARY
;
1552 pr
->lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
1559 vfree(pr
->sq_skba
.arr
);
1560 vfree(pr
->rq1_skba
.arr
);
1561 vfree(pr
->rq2_skba
.arr
);
1562 vfree(pr
->rq3_skba
.arr
);
1563 ehea_destroy_qp(pr
->qp
);
1564 ehea_destroy_cq(pr
->send_cq
);
1565 ehea_destroy_cq(pr
->recv_cq
);
1566 ehea_destroy_eq(pr
->eq
);
1571 static int ehea_clean_portres(struct ehea_port
*port
, struct ehea_port_res
*pr
)
1576 netif_napi_del(&pr
->napi
);
1578 ret
= ehea_destroy_qp(pr
->qp
);
1581 ehea_destroy_cq(pr
->send_cq
);
1582 ehea_destroy_cq(pr
->recv_cq
);
1583 ehea_destroy_eq(pr
->eq
);
1585 for (i
= 0; i
< pr
->rq1_skba
.len
; i
++)
1586 if (pr
->rq1_skba
.arr
[i
])
1587 dev_kfree_skb(pr
->rq1_skba
.arr
[i
]);
1589 for (i
= 0; i
< pr
->rq2_skba
.len
; i
++)
1590 if (pr
->rq2_skba
.arr
[i
])
1591 dev_kfree_skb(pr
->rq2_skba
.arr
[i
]);
1593 for (i
= 0; i
< pr
->rq3_skba
.len
; i
++)
1594 if (pr
->rq3_skba
.arr
[i
])
1595 dev_kfree_skb(pr
->rq3_skba
.arr
[i
]);
1597 for (i
= 0; i
< pr
->sq_skba
.len
; i
++)
1598 if (pr
->sq_skba
.arr
[i
])
1599 dev_kfree_skb(pr
->sq_skba
.arr
[i
]);
1601 vfree(pr
->rq1_skba
.arr
);
1602 vfree(pr
->rq2_skba
.arr
);
1603 vfree(pr
->rq3_skba
.arr
);
1604 vfree(pr
->sq_skba
.arr
);
1605 ret
= ehea_rem_smrs(pr
);
1611 * The write_* functions store information in swqe which is used by
1612 * the hardware to calculate the ip/tcp/udp checksum
1615 static inline void write_ip_start_end(struct ehea_swqe
*swqe
,
1616 const struct sk_buff
*skb
)
1618 swqe
->ip_start
= skb_network_offset(skb
);
1619 swqe
->ip_end
= (u8
)(swqe
->ip_start
+ ip_hdrlen(skb
) - 1);
1622 static inline void write_tcp_offset_end(struct ehea_swqe
*swqe
,
1623 const struct sk_buff
*skb
)
1626 (u8
)(swqe
->ip_end
+ 1 + offsetof(struct tcphdr
, check
));
1628 swqe
->tcp_end
= (u16
)skb
->len
- 1;
1631 static inline void write_udp_offset_end(struct ehea_swqe
*swqe
,
1632 const struct sk_buff
*skb
)
1635 (u8
)(swqe
->ip_end
+ 1 + offsetof(struct udphdr
, check
));
1637 swqe
->tcp_end
= (u16
)skb
->len
- 1;
1641 static void write_swqe2_TSO(struct sk_buff
*skb
,
1642 struct ehea_swqe
*swqe
, u32 lkey
)
1644 struct ehea_vsgentry
*sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1645 u8
*imm_data
= &swqe
->u
.immdata_desc
.immediate_data
[0];
1646 int skb_data_size
= skb_headlen(skb
);
1649 /* Packet is TCP with TSO enabled */
1650 swqe
->tx_control
|= EHEA_SWQE_TSO
;
1651 swqe
->mss
= skb_shinfo(skb
)->gso_size
;
1652 /* copy only eth/ip/tcp headers to immediate data and
1653 * the rest of skb->data to sg1entry
1655 headersize
= ETH_HLEN
+ ip_hdrlen(skb
) + tcp_hdrlen(skb
);
1657 skb_data_size
= skb_headlen(skb
);
1659 if (skb_data_size
>= headersize
) {
1660 /* copy immediate data */
1661 skb_copy_from_linear_data(skb
, imm_data
, headersize
);
1662 swqe
->immediate_data_length
= headersize
;
1664 if (skb_data_size
> headersize
) {
1665 /* set sg1entry data */
1666 sg1entry
->l_key
= lkey
;
1667 sg1entry
->len
= skb_data_size
- headersize
;
1669 ehea_map_vaddr(skb
->data
+ headersize
);
1670 swqe
->descriptors
++;
1673 ehea_error("cannot handle fragmented headers");
1676 static void write_swqe2_nonTSO(struct sk_buff
*skb
,
1677 struct ehea_swqe
*swqe
, u32 lkey
)
1679 int skb_data_size
= skb_headlen(skb
);
1680 u8
*imm_data
= &swqe
->u
.immdata_desc
.immediate_data
[0];
1681 struct ehea_vsgentry
*sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1683 /* Packet is any nonTSO type
1685 * Copy as much as possible skb->data to immediate data and
1686 * the rest to sg1entry
1688 if (skb_data_size
>= SWQE2_MAX_IMM
) {
1689 /* copy immediate data */
1690 skb_copy_from_linear_data(skb
, imm_data
, SWQE2_MAX_IMM
);
1692 swqe
->immediate_data_length
= SWQE2_MAX_IMM
;
1694 if (skb_data_size
> SWQE2_MAX_IMM
) {
1695 /* copy sg1entry data */
1696 sg1entry
->l_key
= lkey
;
1697 sg1entry
->len
= skb_data_size
- SWQE2_MAX_IMM
;
1699 ehea_map_vaddr(skb
->data
+ SWQE2_MAX_IMM
);
1700 swqe
->descriptors
++;
1703 skb_copy_from_linear_data(skb
, imm_data
, skb_data_size
);
1704 swqe
->immediate_data_length
= skb_data_size
;
1708 static inline void write_swqe2_data(struct sk_buff
*skb
, struct net_device
*dev
,
1709 struct ehea_swqe
*swqe
, u32 lkey
)
1711 struct ehea_vsgentry
*sg_list
, *sg1entry
, *sgentry
;
1713 int nfrags
, sg1entry_contains_frag_data
, i
;
1715 nfrags
= skb_shinfo(skb
)->nr_frags
;
1716 sg1entry
= &swqe
->u
.immdata_desc
.sg_entry
;
1717 sg_list
= (struct ehea_vsgentry
*)&swqe
->u
.immdata_desc
.sg_list
;
1718 swqe
->descriptors
= 0;
1719 sg1entry_contains_frag_data
= 0;
1721 if ((dev
->features
& NETIF_F_TSO
) && skb_shinfo(skb
)->gso_size
)
1722 write_swqe2_TSO(skb
, swqe
, lkey
);
1724 write_swqe2_nonTSO(skb
, swqe
, lkey
);
1726 /* write descriptors */
1728 if (swqe
->descriptors
== 0) {
1729 /* sg1entry not yet used */
1730 frag
= &skb_shinfo(skb
)->frags
[0];
1732 /* copy sg1entry data */
1733 sg1entry
->l_key
= lkey
;
1734 sg1entry
->len
= frag
->size
;
1736 ehea_map_vaddr(page_address(frag
->page
)
1737 + frag
->page_offset
);
1738 swqe
->descriptors
++;
1739 sg1entry_contains_frag_data
= 1;
1742 for (i
= sg1entry_contains_frag_data
; i
< nfrags
; i
++) {
1744 frag
= &skb_shinfo(skb
)->frags
[i
];
1745 sgentry
= &sg_list
[i
- sg1entry_contains_frag_data
];
1747 sgentry
->l_key
= lkey
;
1748 sgentry
->len
= frag
->size
;
1750 ehea_map_vaddr(page_address(frag
->page
)
1751 + frag
->page_offset
);
1752 swqe
->descriptors
++;
1757 static int ehea_broadcast_reg_helper(struct ehea_port
*port
, u32 hcallid
)
1763 /* De/Register untagged packets */
1764 reg_type
= EHEA_BCMC_BROADCAST
| EHEA_BCMC_UNTAGGED
;
1765 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1766 port
->logical_port_id
,
1767 reg_type
, port
->mac_addr
, 0, hcallid
);
1768 if (hret
!= H_SUCCESS
) {
1769 ehea_error("%sregistering bc address failed (tagged)",
1770 hcallid
== H_REG_BCMC
? "" : "de");
1775 /* De/Register VLAN packets */
1776 reg_type
= EHEA_BCMC_BROADCAST
| EHEA_BCMC_VLANID_ALL
;
1777 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1778 port
->logical_port_id
,
1779 reg_type
, port
->mac_addr
, 0, hcallid
);
1780 if (hret
!= H_SUCCESS
) {
1781 ehea_error("%sregistering bc address failed (vlan)",
1782 hcallid
== H_REG_BCMC
? "" : "de");
1789 static int ehea_set_mac_addr(struct net_device
*dev
, void *sa
)
1791 struct ehea_port
*port
= netdev_priv(dev
);
1792 struct sockaddr
*mac_addr
= sa
;
1793 struct hcp_ehea_port_cb0
*cb0
;
1797 if (!is_valid_ether_addr(mac_addr
->sa_data
)) {
1798 ret
= -EADDRNOTAVAIL
;
1802 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
1804 ehea_error("no mem for cb0");
1809 memcpy(&(cb0
->port_mac_addr
), &(mac_addr
->sa_data
[0]), ETH_ALEN
);
1811 cb0
->port_mac_addr
= cb0
->port_mac_addr
>> 16;
1813 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1814 port
->logical_port_id
, H_PORT_CB0
,
1815 EHEA_BMASK_SET(H_PORT_CB0_MAC
, 1), cb0
);
1816 if (hret
!= H_SUCCESS
) {
1821 memcpy(dev
->dev_addr
, mac_addr
->sa_data
, dev
->addr_len
);
1823 /* Deregister old MAC in pHYP */
1824 if (port
->state
== EHEA_PORT_UP
) {
1825 ret
= ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
1830 port
->mac_addr
= cb0
->port_mac_addr
<< 16;
1832 /* Register new MAC in pHYP */
1833 if (port
->state
== EHEA_PORT_UP
) {
1834 ret
= ehea_broadcast_reg_helper(port
, H_REG_BCMC
);
1842 ehea_update_bcmc_registrations();
1844 free_page((unsigned long)cb0
);
1849 static void ehea_promiscuous_error(u64 hret
, int enable
)
1851 if (hret
== H_AUTHORITY
)
1852 ehea_info("Hypervisor denied %sabling promiscuous mode",
1853 enable
== 1 ? "en" : "dis");
1855 ehea_error("failed %sabling promiscuous mode",
1856 enable
== 1 ? "en" : "dis");
1859 static void ehea_promiscuous(struct net_device
*dev
, int enable
)
1861 struct ehea_port
*port
= netdev_priv(dev
);
1862 struct hcp_ehea_port_cb7
*cb7
;
1865 if ((enable
&& port
->promisc
) || (!enable
&& !port
->promisc
))
1868 cb7
= (void *)get_zeroed_page(GFP_ATOMIC
);
1870 ehea_error("no mem for cb7");
1874 /* Modify Pxs_DUCQPN in CB7 */
1875 cb7
->def_uc_qpn
= enable
== 1 ? port
->port_res
[0].qp
->fw_handle
: 0;
1877 hret
= ehea_h_modify_ehea_port(port
->adapter
->handle
,
1878 port
->logical_port_id
,
1879 H_PORT_CB7
, H_PORT_CB7_DUCQPN
, cb7
);
1881 ehea_promiscuous_error(hret
, enable
);
1885 port
->promisc
= enable
;
1887 free_page((unsigned long)cb7
);
1890 static u64
ehea_multicast_reg_helper(struct ehea_port
*port
, u64 mc_mac_addr
,
1896 reg_type
= EHEA_BCMC_SCOPE_ALL
| EHEA_BCMC_MULTICAST
1897 | EHEA_BCMC_UNTAGGED
;
1899 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1900 port
->logical_port_id
,
1901 reg_type
, mc_mac_addr
, 0, hcallid
);
1905 reg_type
= EHEA_BCMC_SCOPE_ALL
| EHEA_BCMC_MULTICAST
1906 | EHEA_BCMC_VLANID_ALL
;
1908 hret
= ehea_h_reg_dereg_bcmc(port
->adapter
->handle
,
1909 port
->logical_port_id
,
1910 reg_type
, mc_mac_addr
, 0, hcallid
);
1915 static int ehea_drop_multicast_list(struct net_device
*dev
)
1917 struct ehea_port
*port
= netdev_priv(dev
);
1918 struct ehea_mc_list
*mc_entry
= port
->mc_list
;
1919 struct list_head
*pos
;
1920 struct list_head
*temp
;
1924 list_for_each_safe(pos
, temp
, &(port
->mc_list
->list
)) {
1925 mc_entry
= list_entry(pos
, struct ehea_mc_list
, list
);
1927 hret
= ehea_multicast_reg_helper(port
, mc_entry
->macaddr
,
1930 ehea_error("failed deregistering mcast MAC");
1940 static void ehea_allmulti(struct net_device
*dev
, int enable
)
1942 struct ehea_port
*port
= netdev_priv(dev
);
1945 if (!port
->allmulti
) {
1947 /* Enable ALLMULTI */
1948 ehea_drop_multicast_list(dev
);
1949 hret
= ehea_multicast_reg_helper(port
, 0, H_REG_BCMC
);
1953 ehea_error("failed enabling IFF_ALLMULTI");
1957 /* Disable ALLMULTI */
1958 hret
= ehea_multicast_reg_helper(port
, 0, H_DEREG_BCMC
);
1962 ehea_error("failed disabling IFF_ALLMULTI");
1966 static void ehea_add_multicast_entry(struct ehea_port
*port
, u8
*mc_mac_addr
)
1968 struct ehea_mc_list
*ehea_mcl_entry
;
1971 ehea_mcl_entry
= kzalloc(sizeof(*ehea_mcl_entry
), GFP_ATOMIC
);
1972 if (!ehea_mcl_entry
) {
1973 ehea_error("no mem for mcl_entry");
1977 INIT_LIST_HEAD(&ehea_mcl_entry
->list
);
1979 memcpy(&ehea_mcl_entry
->macaddr
, mc_mac_addr
, ETH_ALEN
);
1981 hret
= ehea_multicast_reg_helper(port
, ehea_mcl_entry
->macaddr
,
1984 list_add(&ehea_mcl_entry
->list
, &port
->mc_list
->list
);
1986 ehea_error("failed registering mcast MAC");
1987 kfree(ehea_mcl_entry
);
1991 static void ehea_set_multicast_list(struct net_device
*dev
)
1993 struct ehea_port
*port
= netdev_priv(dev
);
1994 struct netdev_hw_addr
*ha
;
1997 if (dev
->flags
& IFF_PROMISC
) {
1998 ehea_promiscuous(dev
, 1);
2001 ehea_promiscuous(dev
, 0);
2003 if (dev
->flags
& IFF_ALLMULTI
) {
2004 ehea_allmulti(dev
, 1);
2007 ehea_allmulti(dev
, 0);
2009 if (!netdev_mc_empty(dev
)) {
2010 ret
= ehea_drop_multicast_list(dev
);
2012 /* Dropping the current multicast list failed.
2013 * Enabling ALL_MULTI is the best we can do.
2015 ehea_allmulti(dev
, 1);
2018 if (netdev_mc_count(dev
) > port
->adapter
->max_mc_mac
) {
2019 ehea_info("Mcast registration limit reached (0x%llx). "
2021 port
->adapter
->max_mc_mac
);
2025 netdev_for_each_mc_addr(ha
, dev
)
2026 ehea_add_multicast_entry(port
, ha
->addr
);
2030 ehea_update_bcmc_registrations();
2033 static int ehea_change_mtu(struct net_device
*dev
, int new_mtu
)
2035 if ((new_mtu
< 68) || (new_mtu
> EHEA_MAX_PACKET_SIZE
))
2041 static void ehea_xmit2(struct sk_buff
*skb
, struct net_device
*dev
,
2042 struct ehea_swqe
*swqe
, u32 lkey
)
2044 if (skb
->protocol
== htons(ETH_P_IP
)) {
2045 const struct iphdr
*iph
= ip_hdr(skb
);
2048 swqe
->tx_control
|= EHEA_SWQE_CRC
2049 | EHEA_SWQE_IP_CHECKSUM
2050 | EHEA_SWQE_TCP_CHECKSUM
2051 | EHEA_SWQE_IMM_DATA_PRESENT
2052 | EHEA_SWQE_DESCRIPTORS_PRESENT
;
2054 write_ip_start_end(swqe
, skb
);
2056 if (iph
->protocol
== IPPROTO_UDP
) {
2057 if ((iph
->frag_off
& IP_MF
) ||
2058 (iph
->frag_off
& IP_OFFSET
))
2059 /* IP fragment, so don't change cs */
2060 swqe
->tx_control
&= ~EHEA_SWQE_TCP_CHECKSUM
;
2062 write_udp_offset_end(swqe
, skb
);
2063 } else if (iph
->protocol
== IPPROTO_TCP
) {
2064 write_tcp_offset_end(swqe
, skb
);
2067 /* icmp (big data) and ip segmentation packets (all other ip
2068 packets) do not require any special handling */
2071 /* Other Ethernet Protocol */
2072 swqe
->tx_control
|= EHEA_SWQE_CRC
2073 | EHEA_SWQE_IMM_DATA_PRESENT
2074 | EHEA_SWQE_DESCRIPTORS_PRESENT
;
2077 write_swqe2_data(skb
, dev
, swqe
, lkey
);
2080 static void ehea_xmit3(struct sk_buff
*skb
, struct net_device
*dev
,
2081 struct ehea_swqe
*swqe
)
2083 int nfrags
= skb_shinfo(skb
)->nr_frags
;
2084 u8
*imm_data
= &swqe
->u
.immdata_nodesc
.immediate_data
[0];
2088 if (skb
->protocol
== htons(ETH_P_IP
)) {
2089 const struct iphdr
*iph
= ip_hdr(skb
);
2092 write_ip_start_end(swqe
, skb
);
2094 if (iph
->protocol
== IPPROTO_TCP
) {
2095 swqe
->tx_control
|= EHEA_SWQE_CRC
2096 | EHEA_SWQE_IP_CHECKSUM
2097 | EHEA_SWQE_TCP_CHECKSUM
2098 | EHEA_SWQE_IMM_DATA_PRESENT
;
2100 write_tcp_offset_end(swqe
, skb
);
2102 } else if (iph
->protocol
== IPPROTO_UDP
) {
2103 if ((iph
->frag_off
& IP_MF
) ||
2104 (iph
->frag_off
& IP_OFFSET
))
2105 /* IP fragment, so don't change cs */
2106 swqe
->tx_control
|= EHEA_SWQE_CRC
2107 | EHEA_SWQE_IMM_DATA_PRESENT
;
2109 swqe
->tx_control
|= EHEA_SWQE_CRC
2110 | EHEA_SWQE_IP_CHECKSUM
2111 | EHEA_SWQE_TCP_CHECKSUM
2112 | EHEA_SWQE_IMM_DATA_PRESENT
;
2114 write_udp_offset_end(swqe
, skb
);
2117 /* icmp (big data) and
2118 ip segmentation packets (all other ip packets) */
2119 swqe
->tx_control
|= EHEA_SWQE_CRC
2120 | EHEA_SWQE_IP_CHECKSUM
2121 | EHEA_SWQE_IMM_DATA_PRESENT
;
2124 /* Other Ethernet Protocol */
2125 swqe
->tx_control
|= EHEA_SWQE_CRC
| EHEA_SWQE_IMM_DATA_PRESENT
;
2127 /* copy (immediate) data */
2129 /* data is in a single piece */
2130 skb_copy_from_linear_data(skb
, imm_data
, skb
->len
);
2132 /* first copy data from the skb->data buffer ... */
2133 skb_copy_from_linear_data(skb
, imm_data
,
2135 imm_data
+= skb_headlen(skb
);
2137 /* ... then copy data from the fragments */
2138 for (i
= 0; i
< nfrags
; i
++) {
2139 frag
= &skb_shinfo(skb
)->frags
[i
];
2141 page_address(frag
->page
) + frag
->page_offset
,
2143 imm_data
+= frag
->size
;
2146 swqe
->immediate_data_length
= skb
->len
;
2150 static inline int ehea_hash_skb(struct sk_buff
*skb
, int num_qps
)
2155 if ((skb
->protocol
== htons(ETH_P_IP
)) &&
2156 (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)) {
2157 tcp
= (struct tcphdr
*)(skb_network_header(skb
) +
2158 (ip_hdr(skb
)->ihl
* 4));
2159 tmp
= (tcp
->source
+ (tcp
->dest
<< 16)) % 31;
2160 tmp
+= ip_hdr(skb
)->daddr
% 31;
2161 return tmp
% num_qps
;
2166 static int ehea_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2168 struct ehea_port
*port
= netdev_priv(dev
);
2169 struct ehea_swqe
*swqe
;
2170 unsigned long flags
;
2173 struct ehea_port_res
*pr
;
2175 pr
= &port
->port_res
[ehea_hash_skb(skb
, port
->num_tx_qps
)];
2177 if (!spin_trylock(&pr
->xmit_lock
))
2178 return NETDEV_TX_BUSY
;
2180 if (pr
->queue_stopped
) {
2181 spin_unlock(&pr
->xmit_lock
);
2182 return NETDEV_TX_BUSY
;
2185 swqe
= ehea_get_swqe(pr
->qp
, &swqe_index
);
2186 memset(swqe
, 0, SWQE_HEADER_SIZE
);
2187 atomic_dec(&pr
->swqe_avail
);
2189 if (skb
->len
<= SWQE3_MAX_IMM
) {
2190 u32 sig_iv
= port
->sig_comp_iv
;
2191 u32 swqe_num
= pr
->swqe_id_counter
;
2192 ehea_xmit3(skb
, dev
, swqe
);
2193 swqe
->wr_id
= EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, EHEA_SWQE3_TYPE
)
2194 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT
, swqe_num
);
2195 if (pr
->swqe_ll_count
>= (sig_iv
- 1)) {
2196 swqe
->wr_id
|= EHEA_BMASK_SET(EHEA_WR_ID_REFILL
,
2198 swqe
->tx_control
|= EHEA_SWQE_SIGNALLED_COMPLETION
;
2199 pr
->swqe_ll_count
= 0;
2201 pr
->swqe_ll_count
+= 1;
2204 EHEA_BMASK_SET(EHEA_WR_ID_TYPE
, EHEA_SWQE2_TYPE
)
2205 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT
, pr
->swqe_id_counter
)
2206 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL
, 1)
2207 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX
, pr
->sq_skba
.index
);
2208 pr
->sq_skba
.arr
[pr
->sq_skba
.index
] = skb
;
2210 pr
->sq_skba
.index
++;
2211 pr
->sq_skba
.index
&= (pr
->sq_skba
.len
- 1);
2213 lkey
= pr
->send_mr
.lkey
;
2214 ehea_xmit2(skb
, dev
, swqe
, lkey
);
2215 swqe
->tx_control
|= EHEA_SWQE_SIGNALLED_COMPLETION
;
2217 pr
->swqe_id_counter
+= 1;
2219 if (port
->vgrp
&& vlan_tx_tag_present(skb
)) {
2220 swqe
->tx_control
|= EHEA_SWQE_VLAN_INSERT
;
2221 swqe
->vlan_tag
= vlan_tx_tag_get(skb
);
2224 if (netif_msg_tx_queued(port
)) {
2225 ehea_info("post swqe on QP %d", pr
->qp
->init_attr
.qp_nr
);
2226 ehea_dump(swqe
, 512, "swqe");
2229 if (unlikely(test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))) {
2230 netif_stop_queue(dev
);
2231 swqe
->tx_control
|= EHEA_SWQE_PURGE
;
2234 ehea_post_swqe(pr
->qp
, swqe
);
2237 if (unlikely(atomic_read(&pr
->swqe_avail
) <= 1)) {
2238 spin_lock_irqsave(&pr
->netif_queue
, flags
);
2239 if (unlikely(atomic_read(&pr
->swqe_avail
) <= 1)) {
2240 pr
->p_stats
.queue_stopped
++;
2241 netif_stop_queue(dev
);
2242 pr
->queue_stopped
= 1;
2244 spin_unlock_irqrestore(&pr
->netif_queue
, flags
);
2246 dev
->trans_start
= jiffies
; /* NETIF_F_LLTX driver :( */
2247 spin_unlock(&pr
->xmit_lock
);
2249 return NETDEV_TX_OK
;
2252 static void ehea_vlan_rx_register(struct net_device
*dev
,
2253 struct vlan_group
*grp
)
2255 struct ehea_port
*port
= netdev_priv(dev
);
2256 struct ehea_adapter
*adapter
= port
->adapter
;
2257 struct hcp_ehea_port_cb1
*cb1
;
2262 cb1
= (void *)get_zeroed_page(GFP_KERNEL
);
2264 ehea_error("no mem for cb1");
2268 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2269 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2270 if (hret
!= H_SUCCESS
)
2271 ehea_error("modify_ehea_port failed");
2273 free_page((unsigned long)cb1
);
2278 static void ehea_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
2280 struct ehea_port
*port
= netdev_priv(dev
);
2281 struct ehea_adapter
*adapter
= port
->adapter
;
2282 struct hcp_ehea_port_cb1
*cb1
;
2286 cb1
= (void *)get_zeroed_page(GFP_KERNEL
);
2288 ehea_error("no mem for cb1");
2292 hret
= ehea_h_query_ehea_port(adapter
->handle
, port
->logical_port_id
,
2293 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2294 if (hret
!= H_SUCCESS
) {
2295 ehea_error("query_ehea_port failed");
2300 cb1
->vlan_filter
[index
] |= ((u64
)(0x8000000000000000 >> (vid
& 0x3F)));
2302 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2303 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2304 if (hret
!= H_SUCCESS
)
2305 ehea_error("modify_ehea_port failed");
2307 free_page((unsigned long)cb1
);
2311 static void ehea_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
2313 struct ehea_port
*port
= netdev_priv(dev
);
2314 struct ehea_adapter
*adapter
= port
->adapter
;
2315 struct hcp_ehea_port_cb1
*cb1
;
2319 vlan_group_set_device(port
->vgrp
, vid
, NULL
);
2321 cb1
= (void *)get_zeroed_page(GFP_KERNEL
);
2323 ehea_error("no mem for cb1");
2327 hret
= ehea_h_query_ehea_port(adapter
->handle
, port
->logical_port_id
,
2328 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2329 if (hret
!= H_SUCCESS
) {
2330 ehea_error("query_ehea_port failed");
2335 cb1
->vlan_filter
[index
] &= ~((u64
)(0x8000000000000000 >> (vid
& 0x3F)));
2337 hret
= ehea_h_modify_ehea_port(adapter
->handle
, port
->logical_port_id
,
2338 H_PORT_CB1
, H_PORT_CB1_ALL
, cb1
);
2339 if (hret
!= H_SUCCESS
)
2340 ehea_error("modify_ehea_port failed");
2342 free_page((unsigned long)cb1
);
2345 int ehea_activate_qp(struct ehea_adapter
*adapter
, struct ehea_qp
*qp
)
2351 struct hcp_modify_qp_cb0
*cb0
;
2353 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
2359 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2360 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2361 if (hret
!= H_SUCCESS
) {
2362 ehea_error("query_ehea_qp failed (1)");
2366 cb0
->qp_ctl_reg
= H_QP_CR_STATE_INITIALIZED
;
2367 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2368 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2369 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2370 if (hret
!= H_SUCCESS
) {
2371 ehea_error("modify_ehea_qp failed (1)");
2375 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2376 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2377 if (hret
!= H_SUCCESS
) {
2378 ehea_error("query_ehea_qp failed (2)");
2382 cb0
->qp_ctl_reg
= H_QP_CR_ENABLED
| H_QP_CR_STATE_INITIALIZED
;
2383 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2384 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2385 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2386 if (hret
!= H_SUCCESS
) {
2387 ehea_error("modify_ehea_qp failed (2)");
2391 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2392 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2393 if (hret
!= H_SUCCESS
) {
2394 ehea_error("query_ehea_qp failed (3)");
2398 cb0
->qp_ctl_reg
= H_QP_CR_ENABLED
| H_QP_CR_STATE_RDY2SND
;
2399 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2400 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
, 1), cb0
,
2401 &dummy64
, &dummy64
, &dummy16
, &dummy16
);
2402 if (hret
!= H_SUCCESS
) {
2403 ehea_error("modify_ehea_qp failed (3)");
2407 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2408 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF), cb0
);
2409 if (hret
!= H_SUCCESS
) {
2410 ehea_error("query_ehea_qp failed (4)");
2416 free_page((unsigned long)cb0
);
2420 static int ehea_port_res_setup(struct ehea_port
*port
, int def_qps
,
2424 struct port_res_cfg pr_cfg
, pr_cfg_small_rx
;
2425 enum ehea_eq_type eq_type
= EHEA_EQ
;
2427 port
->qp_eq
= ehea_create_eq(port
->adapter
, eq_type
,
2428 EHEA_MAX_ENTRIES_EQ
, 1);
2431 ehea_error("ehea_create_eq failed (qp_eq)");
2435 pr_cfg
.max_entries_rcq
= rq1_entries
+ rq2_entries
+ rq3_entries
;
2436 pr_cfg
.max_entries_scq
= sq_entries
* 2;
2437 pr_cfg
.max_entries_sq
= sq_entries
;
2438 pr_cfg
.max_entries_rq1
= rq1_entries
;
2439 pr_cfg
.max_entries_rq2
= rq2_entries
;
2440 pr_cfg
.max_entries_rq3
= rq3_entries
;
2442 pr_cfg_small_rx
.max_entries_rcq
= 1;
2443 pr_cfg_small_rx
.max_entries_scq
= sq_entries
;
2444 pr_cfg_small_rx
.max_entries_sq
= sq_entries
;
2445 pr_cfg_small_rx
.max_entries_rq1
= 1;
2446 pr_cfg_small_rx
.max_entries_rq2
= 1;
2447 pr_cfg_small_rx
.max_entries_rq3
= 1;
2449 for (i
= 0; i
< def_qps
; i
++) {
2450 ret
= ehea_init_port_res(port
, &port
->port_res
[i
], &pr_cfg
, i
);
2454 for (i
= def_qps
; i
< def_qps
+ add_tx_qps
; i
++) {
2455 ret
= ehea_init_port_res(port
, &port
->port_res
[i
],
2456 &pr_cfg_small_rx
, i
);
2465 ehea_clean_portres(port
, &port
->port_res
[i
]);
2468 ehea_destroy_eq(port
->qp_eq
);
2472 static int ehea_clean_all_portres(struct ehea_port
*port
)
2477 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2478 ret
|= ehea_clean_portres(port
, &port
->port_res
[i
]);
2480 ret
|= ehea_destroy_eq(port
->qp_eq
);
2485 static void ehea_remove_adapter_mr(struct ehea_adapter
*adapter
)
2487 if (adapter
->active_ports
)
2490 ehea_rem_mr(&adapter
->mr
);
2493 static int ehea_add_adapter_mr(struct ehea_adapter
*adapter
)
2495 if (adapter
->active_ports
)
2498 return ehea_reg_kernel_mr(adapter
, &adapter
->mr
);
2501 static int ehea_up(struct net_device
*dev
)
2504 struct ehea_port
*port
= netdev_priv(dev
);
2506 if (port
->state
== EHEA_PORT_UP
)
2509 ret
= ehea_port_res_setup(port
, port
->num_def_qps
,
2510 port
->num_add_tx_qps
);
2512 ehea_error("port_res_failed");
2516 /* Set default QP for this port */
2517 ret
= ehea_configure_port(port
);
2519 ehea_error("ehea_configure_port failed. ret:%d", ret
);
2523 ret
= ehea_reg_interrupts(dev
);
2525 ehea_error("reg_interrupts failed. ret:%d", ret
);
2529 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
2530 ret
= ehea_activate_qp(port
->adapter
, port
->port_res
[i
].qp
);
2532 ehea_error("activate_qp failed");
2537 for (i
= 0; i
< port
->num_def_qps
; i
++) {
2538 ret
= ehea_fill_port_res(&port
->port_res
[i
]);
2540 ehea_error("out_free_irqs");
2545 ret
= ehea_broadcast_reg_helper(port
, H_REG_BCMC
);
2551 port
->state
= EHEA_PORT_UP
;
2557 ehea_free_interrupts(dev
);
2560 ehea_clean_all_portres(port
);
2563 ehea_info("Failed starting %s. ret=%i", dev
->name
, ret
);
2565 ehea_update_bcmc_registrations();
2566 ehea_update_firmware_handles();
2571 static void port_napi_disable(struct ehea_port
*port
)
2575 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2576 napi_disable(&port
->port_res
[i
].napi
);
2579 static void port_napi_enable(struct ehea_port
*port
)
2583 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++)
2584 napi_enable(&port
->port_res
[i
].napi
);
2587 static int ehea_open(struct net_device
*dev
)
2590 struct ehea_port
*port
= netdev_priv(dev
);
2592 mutex_lock(&port
->port_lock
);
2594 if (netif_msg_ifup(port
))
2595 ehea_info("enabling port %s", dev
->name
);
2599 port_napi_enable(port
);
2600 netif_start_queue(dev
);
2603 mutex_unlock(&port
->port_lock
);
2608 static int ehea_down(struct net_device
*dev
)
2611 struct ehea_port
*port
= netdev_priv(dev
);
2613 if (port
->state
== EHEA_PORT_DOWN
)
2616 ehea_drop_multicast_list(dev
);
2617 ehea_broadcast_reg_helper(port
, H_DEREG_BCMC
);
2619 ehea_free_interrupts(dev
);
2621 port
->state
= EHEA_PORT_DOWN
;
2623 ehea_update_bcmc_registrations();
2625 ret
= ehea_clean_all_portres(port
);
2627 ehea_info("Failed freeing resources for %s. ret=%i",
2630 ehea_update_firmware_handles();
2635 static int ehea_stop(struct net_device
*dev
)
2638 struct ehea_port
*port
= netdev_priv(dev
);
2640 if (netif_msg_ifdown(port
))
2641 ehea_info("disabling port %s", dev
->name
);
2643 set_bit(__EHEA_DISABLE_PORT_RESET
, &port
->flags
);
2644 cancel_work_sync(&port
->reset_task
);
2645 mutex_lock(&port
->port_lock
);
2646 netif_stop_queue(dev
);
2647 port_napi_disable(port
);
2648 ret
= ehea_down(dev
);
2649 mutex_unlock(&port
->port_lock
);
2650 clear_bit(__EHEA_DISABLE_PORT_RESET
, &port
->flags
);
2654 static void ehea_purge_sq(struct ehea_qp
*orig_qp
)
2656 struct ehea_qp qp
= *orig_qp
;
2657 struct ehea_qp_init_attr
*init_attr
= &qp
.init_attr
;
2658 struct ehea_swqe
*swqe
;
2662 for (i
= 0; i
< init_attr
->act_nr_send_wqes
; i
++) {
2663 swqe
= ehea_get_swqe(&qp
, &wqe_index
);
2664 swqe
->tx_control
|= EHEA_SWQE_PURGE
;
2668 static void ehea_flush_sq(struct ehea_port
*port
)
2672 for (i
= 0; i
< port
->num_def_qps
+ port
->num_add_tx_qps
; i
++) {
2673 struct ehea_port_res
*pr
= &port
->port_res
[i
];
2674 int swqe_max
= pr
->sq_skba_size
- 2 - pr
->swqe_ll_count
;
2676 while (atomic_read(&pr
->swqe_avail
) < swqe_max
) {
2684 int ehea_stop_qps(struct net_device
*dev
)
2686 struct ehea_port
*port
= netdev_priv(dev
);
2687 struct ehea_adapter
*adapter
= port
->adapter
;
2688 struct hcp_modify_qp_cb0
*cb0
;
2696 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
2702 for (i
= 0; i
< (port
->num_def_qps
+ port
->num_add_tx_qps
); i
++) {
2703 struct ehea_port_res
*pr
= &port
->port_res
[i
];
2704 struct ehea_qp
*qp
= pr
->qp
;
2706 /* Purge send queue */
2709 /* Disable queue pair */
2710 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2711 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2713 if (hret
!= H_SUCCESS
) {
2714 ehea_error("query_ehea_qp failed (1)");
2718 cb0
->qp_ctl_reg
= (cb0
->qp_ctl_reg
& H_QP_CR_RES_STATE
) << 8;
2719 cb0
->qp_ctl_reg
&= ~H_QP_CR_ENABLED
;
2721 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2722 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
,
2724 &dummy64
, &dummy16
, &dummy16
);
2725 if (hret
!= H_SUCCESS
) {
2726 ehea_error("modify_ehea_qp failed (1)");
2730 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2731 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2733 if (hret
!= H_SUCCESS
) {
2734 ehea_error("query_ehea_qp failed (2)");
2738 /* deregister shared memory regions */
2739 dret
= ehea_rem_smrs(pr
);
2741 ehea_error("unreg shared memory region failed");
2748 free_page((unsigned long)cb0
);
2753 void ehea_update_rqs(struct ehea_qp
*orig_qp
, struct ehea_port_res
*pr
)
2755 struct ehea_qp qp
= *orig_qp
;
2756 struct ehea_qp_init_attr
*init_attr
= &qp
.init_attr
;
2757 struct ehea_rwqe
*rwqe
;
2758 struct sk_buff
**skba_rq2
= pr
->rq2_skba
.arr
;
2759 struct sk_buff
**skba_rq3
= pr
->rq3_skba
.arr
;
2760 struct sk_buff
*skb
;
2761 u32 lkey
= pr
->recv_mr
.lkey
;
2767 for (i
= 0; i
< init_attr
->act_nr_rwqes_rq2
+ 1; i
++) {
2768 rwqe
= ehea_get_next_rwqe(&qp
, 2);
2769 rwqe
->sg_list
[0].l_key
= lkey
;
2770 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, rwqe
->wr_id
);
2771 skb
= skba_rq2
[index
];
2773 rwqe
->sg_list
[0].vaddr
= ehea_map_vaddr(skb
->data
);
2776 for (i
= 0; i
< init_attr
->act_nr_rwqes_rq3
+ 1; i
++) {
2777 rwqe
= ehea_get_next_rwqe(&qp
, 3);
2778 rwqe
->sg_list
[0].l_key
= lkey
;
2779 index
= EHEA_BMASK_GET(EHEA_WR_ID_INDEX
, rwqe
->wr_id
);
2780 skb
= skba_rq3
[index
];
2782 rwqe
->sg_list
[0].vaddr
= ehea_map_vaddr(skb
->data
);
2786 int ehea_restart_qps(struct net_device
*dev
)
2788 struct ehea_port
*port
= netdev_priv(dev
);
2789 struct ehea_adapter
*adapter
= port
->adapter
;
2793 struct hcp_modify_qp_cb0
*cb0
;
2798 cb0
= (void *)get_zeroed_page(GFP_KERNEL
);
2804 for (i
= 0; i
< (port
->num_def_qps
+ port
->num_add_tx_qps
); i
++) {
2805 struct ehea_port_res
*pr
= &port
->port_res
[i
];
2806 struct ehea_qp
*qp
= pr
->qp
;
2808 ret
= ehea_gen_smrs(pr
);
2810 ehea_error("creation of shared memory regions failed");
2814 ehea_update_rqs(qp
, pr
);
2816 /* Enable queue pair */
2817 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2818 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2820 if (hret
!= H_SUCCESS
) {
2821 ehea_error("query_ehea_qp failed (1)");
2825 cb0
->qp_ctl_reg
= (cb0
->qp_ctl_reg
& H_QP_CR_RES_STATE
) << 8;
2826 cb0
->qp_ctl_reg
|= H_QP_CR_ENABLED
;
2828 hret
= ehea_h_modify_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2829 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG
,
2831 &dummy64
, &dummy16
, &dummy16
);
2832 if (hret
!= H_SUCCESS
) {
2833 ehea_error("modify_ehea_qp failed (1)");
2837 hret
= ehea_h_query_ehea_qp(adapter
->handle
, 0, qp
->fw_handle
,
2838 EHEA_BMASK_SET(H_QPCB0_ALL
, 0xFFFF),
2840 if (hret
!= H_SUCCESS
) {
2841 ehea_error("query_ehea_qp failed (2)");
2845 /* refill entire queue */
2846 ehea_refill_rq1(pr
, pr
->rq1_skba
.index
, 0);
2847 ehea_refill_rq2(pr
, 0);
2848 ehea_refill_rq3(pr
, 0);
2851 free_page((unsigned long)cb0
);
2856 static void ehea_reset_port(struct work_struct
*work
)
2859 struct ehea_port
*port
=
2860 container_of(work
, struct ehea_port
, reset_task
);
2861 struct net_device
*dev
= port
->netdev
;
2863 mutex_lock(&dlpar_mem_lock
);
2865 mutex_lock(&port
->port_lock
);
2866 netif_stop_queue(dev
);
2868 port_napi_disable(port
);
2876 ehea_set_multicast_list(dev
);
2878 if (netif_msg_timer(port
))
2879 ehea_info("Device %s resetted successfully", dev
->name
);
2881 port_napi_enable(port
);
2883 netif_wake_queue(dev
);
2885 mutex_unlock(&port
->port_lock
);
2886 mutex_unlock(&dlpar_mem_lock
);
2889 static void ehea_rereg_mrs(struct work_struct
*work
)
2892 struct ehea_adapter
*adapter
;
2894 ehea_info("LPAR memory changed - re-initializing driver");
2896 list_for_each_entry(adapter
, &adapter_list
, list
)
2897 if (adapter
->active_ports
) {
2898 /* Shutdown all ports */
2899 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++) {
2900 struct ehea_port
*port
= adapter
->port
[i
];
2901 struct net_device
*dev
;
2908 if (dev
->flags
& IFF_UP
) {
2909 mutex_lock(&port
->port_lock
);
2910 netif_stop_queue(dev
);
2911 ehea_flush_sq(port
);
2912 ret
= ehea_stop_qps(dev
);
2914 mutex_unlock(&port
->port_lock
);
2917 port_napi_disable(port
);
2918 mutex_unlock(&port
->port_lock
);
2922 /* Unregister old memory region */
2923 ret
= ehea_rem_mr(&adapter
->mr
);
2925 ehea_error("unregister MR failed - driver"
2931 clear_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
);
2933 list_for_each_entry(adapter
, &adapter_list
, list
)
2934 if (adapter
->active_ports
) {
2935 /* Register new memory region */
2936 ret
= ehea_reg_kernel_mr(adapter
, &adapter
->mr
);
2938 ehea_error("register MR failed - driver"
2943 /* Restart all ports */
2944 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++) {
2945 struct ehea_port
*port
= adapter
->port
[i
];
2948 struct net_device
*dev
= port
->netdev
;
2950 if (dev
->flags
& IFF_UP
) {
2951 mutex_lock(&port
->port_lock
);
2952 port_napi_enable(port
);
2953 ret
= ehea_restart_qps(dev
);
2955 netif_wake_queue(dev
);
2956 mutex_unlock(&port
->port_lock
);
2961 ehea_info("re-initializing driver complete");
2966 static void ehea_tx_watchdog(struct net_device
*dev
)
2968 struct ehea_port
*port
= netdev_priv(dev
);
2970 if (netif_carrier_ok(dev
) &&
2971 !test_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
))
2972 ehea_schedule_port_reset(port
);
2975 int ehea_sense_adapter_attr(struct ehea_adapter
*adapter
)
2977 struct hcp_query_ehea
*cb
;
2981 cb
= (void *)get_zeroed_page(GFP_KERNEL
);
2987 hret
= ehea_h_query_ehea(adapter
->handle
, cb
);
2989 if (hret
!= H_SUCCESS
) {
2994 adapter
->max_mc_mac
= cb
->max_mc_mac
- 1;
2998 free_page((unsigned long)cb
);
3003 int ehea_get_jumboframe_status(struct ehea_port
*port
, int *jumbo
)
3005 struct hcp_ehea_port_cb4
*cb4
;
3011 /* (Try to) enable *jumbo frames */
3012 cb4
= (void *)get_zeroed_page(GFP_KERNEL
);
3014 ehea_error("no mem for cb4");
3018 hret
= ehea_h_query_ehea_port(port
->adapter
->handle
,
3019 port
->logical_port_id
,
3021 H_PORT_CB4_JUMBO
, cb4
);
3022 if (hret
== H_SUCCESS
) {
3023 if (cb4
->jumbo_frame
)
3026 cb4
->jumbo_frame
= 1;
3027 hret
= ehea_h_modify_ehea_port(port
->adapter
->
3034 if (hret
== H_SUCCESS
)
3040 free_page((unsigned long)cb4
);
3046 static ssize_t
ehea_show_port_id(struct device
*dev
,
3047 struct device_attribute
*attr
, char *buf
)
3049 struct ehea_port
*port
= container_of(dev
, struct ehea_port
, ofdev
.dev
);
3050 return sprintf(buf
, "%d", port
->logical_port_id
);
3053 static DEVICE_ATTR(log_port_id
, S_IRUSR
| S_IRGRP
| S_IROTH
, ehea_show_port_id
,
3056 static void __devinit
logical_port_release(struct device
*dev
)
3058 struct ehea_port
*port
= container_of(dev
, struct ehea_port
, ofdev
.dev
);
3059 of_node_put(port
->ofdev
.dev
.of_node
);
3062 static struct device
*ehea_register_port(struct ehea_port
*port
,
3063 struct device_node
*dn
)
3067 port
->ofdev
.dev
.of_node
= of_node_get(dn
);
3068 port
->ofdev
.dev
.parent
= &port
->adapter
->ofdev
->dev
;
3069 port
->ofdev
.dev
.bus
= &ibmebus_bus_type
;
3071 dev_set_name(&port
->ofdev
.dev
, "port%d", port_name_cnt
++);
3072 port
->ofdev
.dev
.release
= logical_port_release
;
3074 ret
= of_device_register(&port
->ofdev
);
3076 ehea_error("failed to register device. ret=%d", ret
);
3080 ret
= device_create_file(&port
->ofdev
.dev
, &dev_attr_log_port_id
);
3082 ehea_error("failed to register attributes, ret=%d", ret
);
3083 goto out_unreg_of_dev
;
3086 return &port
->ofdev
.dev
;
3089 of_device_unregister(&port
->ofdev
);
3094 static void ehea_unregister_port(struct ehea_port
*port
)
3096 device_remove_file(&port
->ofdev
.dev
, &dev_attr_log_port_id
);
3097 of_device_unregister(&port
->ofdev
);
3100 static const struct net_device_ops ehea_netdev_ops
= {
3101 .ndo_open
= ehea_open
,
3102 .ndo_stop
= ehea_stop
,
3103 .ndo_start_xmit
= ehea_start_xmit
,
3104 #ifdef CONFIG_NET_POLL_CONTROLLER
3105 .ndo_poll_controller
= ehea_netpoll
,
3107 .ndo_get_stats
= ehea_get_stats
,
3108 .ndo_set_mac_address
= ehea_set_mac_addr
,
3109 .ndo_validate_addr
= eth_validate_addr
,
3110 .ndo_set_multicast_list
= ehea_set_multicast_list
,
3111 .ndo_change_mtu
= ehea_change_mtu
,
3112 .ndo_vlan_rx_register
= ehea_vlan_rx_register
,
3113 .ndo_vlan_rx_add_vid
= ehea_vlan_rx_add_vid
,
3114 .ndo_vlan_rx_kill_vid
= ehea_vlan_rx_kill_vid
,
3115 .ndo_tx_timeout
= ehea_tx_watchdog
,
3118 struct ehea_port
*ehea_setup_single_port(struct ehea_adapter
*adapter
,
3119 u32 logical_port_id
,
3120 struct device_node
*dn
)
3123 struct net_device
*dev
;
3124 struct ehea_port
*port
;
3125 struct device
*port_dev
;
3128 /* allocate memory for the port structures */
3129 dev
= alloc_etherdev(sizeof(struct ehea_port
));
3132 ehea_error("no mem for net_device");
3137 port
= netdev_priv(dev
);
3139 mutex_init(&port
->port_lock
);
3140 port
->state
= EHEA_PORT_DOWN
;
3141 port
->sig_comp_iv
= sq_entries
/ 10;
3143 port
->adapter
= adapter
;
3145 port
->logical_port_id
= logical_port_id
;
3147 port
->msg_enable
= netif_msg_init(msg_level
, EHEA_MSG_DEFAULT
);
3149 port
->mc_list
= kzalloc(sizeof(struct ehea_mc_list
), GFP_KERNEL
);
3150 if (!port
->mc_list
) {
3152 goto out_free_ethdev
;
3155 INIT_LIST_HEAD(&port
->mc_list
->list
);
3157 ret
= ehea_sense_port_attr(port
);
3159 goto out_free_mc_list
;
3161 port_dev
= ehea_register_port(port
, dn
);
3163 goto out_free_mc_list
;
3165 SET_NETDEV_DEV(dev
, port_dev
);
3167 /* initialize net_device structure */
3168 memcpy(dev
->dev_addr
, &port
->mac_addr
, ETH_ALEN
);
3170 dev
->netdev_ops
= &ehea_netdev_ops
;
3171 ehea_set_ethtool_ops(dev
);
3173 dev
->features
= NETIF_F_SG
| NETIF_F_FRAGLIST
| NETIF_F_TSO
3174 | NETIF_F_HIGHDMA
| NETIF_F_IP_CSUM
| NETIF_F_HW_VLAN_TX
3175 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
3177 dev
->watchdog_timeo
= EHEA_WATCH_DOG_TIMEOUT
;
3179 INIT_WORK(&port
->reset_task
, ehea_reset_port
);
3181 ret
= register_netdev(dev
);
3183 ehea_error("register_netdev failed. ret=%d", ret
);
3184 goto out_unreg_port
;
3187 port
->lro_max_aggr
= lro_max_aggr
;
3189 ret
= ehea_get_jumboframe_status(port
, &jumbo
);
3191 ehea_error("failed determining jumbo frame status for %s",
3192 port
->netdev
->name
);
3194 ehea_info("%s: Jumbo frames are %sabled", dev
->name
,
3195 jumbo
== 1 ? "en" : "dis");
3197 adapter
->active_ports
++;
3202 ehea_unregister_port(port
);
3205 kfree(port
->mc_list
);
3211 ehea_error("setting up logical port with id=%d failed, ret=%d",
3212 logical_port_id
, ret
);
3216 static void ehea_shutdown_single_port(struct ehea_port
*port
)
3218 struct ehea_adapter
*adapter
= port
->adapter
;
3219 unregister_netdev(port
->netdev
);
3220 ehea_unregister_port(port
);
3221 kfree(port
->mc_list
);
3222 free_netdev(port
->netdev
);
3223 adapter
->active_ports
--;
3226 static int ehea_setup_ports(struct ehea_adapter
*adapter
)
3228 struct device_node
*lhea_dn
;
3229 struct device_node
*eth_dn
= NULL
;
3231 const u32
*dn_log_port_id
;
3234 lhea_dn
= adapter
->ofdev
->dev
.of_node
;
3235 while ((eth_dn
= of_get_next_child(lhea_dn
, eth_dn
))) {
3237 dn_log_port_id
= of_get_property(eth_dn
, "ibm,hea-port-no",
3239 if (!dn_log_port_id
) {
3240 ehea_error("bad device node: eth_dn name=%s",
3245 if (ehea_add_adapter_mr(adapter
)) {
3246 ehea_error("creating MR failed");
3247 of_node_put(eth_dn
);
3251 adapter
->port
[i
] = ehea_setup_single_port(adapter
,
3254 if (adapter
->port
[i
])
3255 ehea_info("%s -> logical port id #%d",
3256 adapter
->port
[i
]->netdev
->name
,
3259 ehea_remove_adapter_mr(adapter
);
3266 static struct device_node
*ehea_get_eth_dn(struct ehea_adapter
*adapter
,
3267 u32 logical_port_id
)
3269 struct device_node
*lhea_dn
;
3270 struct device_node
*eth_dn
= NULL
;
3271 const u32
*dn_log_port_id
;
3273 lhea_dn
= adapter
->ofdev
->dev
.of_node
;
3274 while ((eth_dn
= of_get_next_child(lhea_dn
, eth_dn
))) {
3276 dn_log_port_id
= of_get_property(eth_dn
, "ibm,hea-port-no",
3279 if (*dn_log_port_id
== logical_port_id
)
3286 static ssize_t
ehea_probe_port(struct device
*dev
,
3287 struct device_attribute
*attr
,
3288 const char *buf
, size_t count
)
3290 struct ehea_adapter
*adapter
= dev_get_drvdata(dev
);
3291 struct ehea_port
*port
;
3292 struct device_node
*eth_dn
= NULL
;
3295 u32 logical_port_id
;
3297 sscanf(buf
, "%d", &logical_port_id
);
3299 port
= ehea_get_port(adapter
, logical_port_id
);
3302 ehea_info("adding port with logical port id=%d failed. port "
3303 "already configured as %s.", logical_port_id
,
3304 port
->netdev
->name
);
3308 eth_dn
= ehea_get_eth_dn(adapter
, logical_port_id
);
3311 ehea_info("no logical port with id %d found", logical_port_id
);
3315 if (ehea_add_adapter_mr(adapter
)) {
3316 ehea_error("creating MR failed");
3320 port
= ehea_setup_single_port(adapter
, logical_port_id
, eth_dn
);
3322 of_node_put(eth_dn
);
3325 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
3326 if (!adapter
->port
[i
]) {
3327 adapter
->port
[i
] = port
;
3331 ehea_info("added %s (logical port id=%d)", port
->netdev
->name
,
3334 ehea_remove_adapter_mr(adapter
);
3338 return (ssize_t
) count
;
3341 static ssize_t
ehea_remove_port(struct device
*dev
,
3342 struct device_attribute
*attr
,
3343 const char *buf
, size_t count
)
3345 struct ehea_adapter
*adapter
= dev_get_drvdata(dev
);
3346 struct ehea_port
*port
;
3348 u32 logical_port_id
;
3350 sscanf(buf
, "%d", &logical_port_id
);
3352 port
= ehea_get_port(adapter
, logical_port_id
);
3355 ehea_info("removed %s (logical port id=%d)", port
->netdev
->name
,
3358 ehea_shutdown_single_port(port
);
3360 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
3361 if (adapter
->port
[i
] == port
) {
3362 adapter
->port
[i
] = NULL
;
3366 ehea_error("removing port with logical port id=%d failed. port "
3367 "not configured.", logical_port_id
);
3371 ehea_remove_adapter_mr(adapter
);
3373 return (ssize_t
) count
;
3376 static DEVICE_ATTR(probe_port
, S_IWUSR
, NULL
, ehea_probe_port
);
3377 static DEVICE_ATTR(remove_port
, S_IWUSR
, NULL
, ehea_remove_port
);
3379 int ehea_create_device_sysfs(struct of_device
*dev
)
3381 int ret
= device_create_file(&dev
->dev
, &dev_attr_probe_port
);
3385 ret
= device_create_file(&dev
->dev
, &dev_attr_remove_port
);
3390 void ehea_remove_device_sysfs(struct of_device
*dev
)
3392 device_remove_file(&dev
->dev
, &dev_attr_probe_port
);
3393 device_remove_file(&dev
->dev
, &dev_attr_remove_port
);
3396 static int __devinit
ehea_probe_adapter(struct of_device
*dev
,
3397 const struct of_device_id
*id
)
3399 struct ehea_adapter
*adapter
;
3400 const u64
*adapter_handle
;
3403 if (!dev
|| !dev
->dev
.of_node
) {
3404 ehea_error("Invalid ibmebus device probed");
3408 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
3411 dev_err(&dev
->dev
, "no mem for ehea_adapter\n");
3415 list_add(&adapter
->list
, &adapter_list
);
3417 adapter
->ofdev
= dev
;
3419 adapter_handle
= of_get_property(dev
->dev
.of_node
, "ibm,hea-handle",
3422 adapter
->handle
= *adapter_handle
;
3424 if (!adapter
->handle
) {
3425 dev_err(&dev
->dev
, "failed getting handle for adapter"
3426 " '%s'\n", dev
->dev
.of_node
->full_name
);
3431 adapter
->pd
= EHEA_PD_ID
;
3433 dev_set_drvdata(&dev
->dev
, adapter
);
3436 /* initialize adapter and ports */
3437 /* get adapter properties */
3438 ret
= ehea_sense_adapter_attr(adapter
);
3440 dev_err(&dev
->dev
, "sense_adapter_attr failed: %d\n", ret
);
3444 adapter
->neq
= ehea_create_eq(adapter
,
3445 EHEA_NEQ
, EHEA_MAX_ENTRIES_EQ
, 1);
3446 if (!adapter
->neq
) {
3448 dev_err(&dev
->dev
, "NEQ creation failed\n");
3452 tasklet_init(&adapter
->neq_tasklet
, ehea_neq_tasklet
,
3453 (unsigned long)adapter
);
3455 ret
= ibmebus_request_irq(adapter
->neq
->attr
.ist1
,
3456 ehea_interrupt_neq
, IRQF_DISABLED
,
3457 "ehea_neq", adapter
);
3459 dev_err(&dev
->dev
, "requesting NEQ IRQ failed\n");
3463 ret
= ehea_create_device_sysfs(dev
);
3467 ret
= ehea_setup_ports(adapter
);
3469 dev_err(&dev
->dev
, "setup_ports failed\n");
3470 goto out_rem_dev_sysfs
;
3477 ehea_remove_device_sysfs(dev
);
3480 ibmebus_free_irq(adapter
->neq
->attr
.ist1
, adapter
);
3483 ehea_destroy_eq(adapter
->neq
);
3486 list_del(&adapter
->list
);
3490 ehea_update_firmware_handles();
3495 static int __devexit
ehea_remove(struct of_device
*dev
)
3497 struct ehea_adapter
*adapter
= dev_get_drvdata(&dev
->dev
);
3500 for (i
= 0; i
< EHEA_MAX_PORTS
; i
++)
3501 if (adapter
->port
[i
]) {
3502 ehea_shutdown_single_port(adapter
->port
[i
]);
3503 adapter
->port
[i
] = NULL
;
3506 ehea_remove_device_sysfs(dev
);
3508 flush_scheduled_work();
3510 ibmebus_free_irq(adapter
->neq
->attr
.ist1
, adapter
);
3511 tasklet_kill(&adapter
->neq_tasklet
);
3513 ehea_destroy_eq(adapter
->neq
);
3514 ehea_remove_adapter_mr(adapter
);
3515 list_del(&adapter
->list
);
3518 ehea_update_firmware_handles();
3523 void ehea_crash_handler(void)
3527 if (ehea_fw_handles
.arr
)
3528 for (i
= 0; i
< ehea_fw_handles
.num_entries
; i
++)
3529 ehea_h_free_resource(ehea_fw_handles
.arr
[i
].adh
,
3530 ehea_fw_handles
.arr
[i
].fwh
,
3533 if (ehea_bcmc_regs
.arr
)
3534 for (i
= 0; i
< ehea_bcmc_regs
.num_entries
; i
++)
3535 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs
.arr
[i
].adh
,
3536 ehea_bcmc_regs
.arr
[i
].port_id
,
3537 ehea_bcmc_regs
.arr
[i
].reg_type
,
3538 ehea_bcmc_regs
.arr
[i
].macaddr
,
3542 static int ehea_mem_notifier(struct notifier_block
*nb
,
3543 unsigned long action
, void *data
)
3545 int ret
= NOTIFY_BAD
;
3546 struct memory_notify
*arg
= data
;
3548 mutex_lock(&dlpar_mem_lock
);
3551 case MEM_CANCEL_OFFLINE
:
3552 ehea_info("memory offlining canceled");
3553 /* Readd canceled memory block */
3555 ehea_info("memory is going online");
3556 set_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
);
3557 if (ehea_add_sect_bmap(arg
->start_pfn
, arg
->nr_pages
))
3559 ehea_rereg_mrs(NULL
);
3561 case MEM_GOING_OFFLINE
:
3562 ehea_info("memory is going offline");
3563 set_bit(__EHEA_STOP_XFER
, &ehea_driver_flags
);
3564 if (ehea_rem_sect_bmap(arg
->start_pfn
, arg
->nr_pages
))
3566 ehea_rereg_mrs(NULL
);
3572 ehea_update_firmware_handles();
3576 mutex_unlock(&dlpar_mem_lock
);
3580 static struct notifier_block ehea_mem_nb
= {
3581 .notifier_call
= ehea_mem_notifier
,
3584 static int ehea_reboot_notifier(struct notifier_block
*nb
,
3585 unsigned long action
, void *unused
)
3587 if (action
== SYS_RESTART
) {
3588 ehea_info("Reboot: freeing all eHEA resources");
3589 ibmebus_unregister_driver(&ehea_driver
);
3594 static struct notifier_block ehea_reboot_nb
= {
3595 .notifier_call
= ehea_reboot_notifier
,
3598 static int check_module_parm(void)
3602 if ((rq1_entries
< EHEA_MIN_ENTRIES_QP
) ||
3603 (rq1_entries
> EHEA_MAX_ENTRIES_RQ1
)) {
3604 ehea_info("Bad parameter: rq1_entries");
3607 if ((rq2_entries
< EHEA_MIN_ENTRIES_QP
) ||
3608 (rq2_entries
> EHEA_MAX_ENTRIES_RQ2
)) {
3609 ehea_info("Bad parameter: rq2_entries");
3612 if ((rq3_entries
< EHEA_MIN_ENTRIES_QP
) ||
3613 (rq3_entries
> EHEA_MAX_ENTRIES_RQ3
)) {
3614 ehea_info("Bad parameter: rq3_entries");
3617 if ((sq_entries
< EHEA_MIN_ENTRIES_QP
) ||
3618 (sq_entries
> EHEA_MAX_ENTRIES_SQ
)) {
3619 ehea_info("Bad parameter: sq_entries");
3626 static ssize_t
ehea_show_capabilities(struct device_driver
*drv
,
3629 return sprintf(buf
, "%d", EHEA_CAPABILITIES
);
3632 static DRIVER_ATTR(capabilities
, S_IRUSR
| S_IRGRP
| S_IROTH
,
3633 ehea_show_capabilities
, NULL
);
3635 int __init
ehea_module_init(void)
3639 printk(KERN_INFO
"IBM eHEA ethernet device driver (Release %s)\n",
3643 INIT_WORK(&ehea_rereg_mr_task
, ehea_rereg_mrs
);
3644 memset(&ehea_fw_handles
, 0, sizeof(ehea_fw_handles
));
3645 memset(&ehea_bcmc_regs
, 0, sizeof(ehea_bcmc_regs
));
3647 mutex_init(&ehea_fw_handles
.lock
);
3648 spin_lock_init(&ehea_bcmc_regs
.lock
);
3650 ret
= check_module_parm();
3654 ret
= ehea_create_busmap();
3658 ret
= register_reboot_notifier(&ehea_reboot_nb
);
3660 ehea_info("failed registering reboot notifier");
3662 ret
= register_memory_notifier(&ehea_mem_nb
);
3664 ehea_info("failed registering memory remove notifier");
3666 ret
= crash_shutdown_register(&ehea_crash_handler
);
3668 ehea_info("failed registering crash handler");
3670 ret
= ibmebus_register_driver(&ehea_driver
);
3672 ehea_error("failed registering eHEA device driver on ebus");
3676 ret
= driver_create_file(&ehea_driver
.driver
,
3677 &driver_attr_capabilities
);
3679 ehea_error("failed to register capabilities attribute, ret=%d",
3687 ibmebus_unregister_driver(&ehea_driver
);
3689 unregister_memory_notifier(&ehea_mem_nb
);
3690 unregister_reboot_notifier(&ehea_reboot_nb
);
3691 crash_shutdown_unregister(&ehea_crash_handler
);
3696 static void __exit
ehea_module_exit(void)
3700 flush_scheduled_work();
3701 driver_remove_file(&ehea_driver
.driver
, &driver_attr_capabilities
);
3702 ibmebus_unregister_driver(&ehea_driver
);
3703 unregister_reboot_notifier(&ehea_reboot_nb
);
3704 ret
= crash_shutdown_unregister(&ehea_crash_handler
);
3706 ehea_info("failed unregistering crash handler");
3707 unregister_memory_notifier(&ehea_mem_nb
);
3708 kfree(ehea_fw_handles
.arr
);
3709 kfree(ehea_bcmc_regs
.arr
);
3710 ehea_destroy_busmap();
3713 module_init(ehea_module_init
);
3714 module_exit(ehea_module_exit
);