2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/hash.h>
42 #include <linux/mlx4/driver.h>
43 #include <linux/mlx4/device.h>
44 #include <linux/mlx4/cmd.h>
45 #include <linux/mlx4/cq.h>
50 int mlx4_en_setup_tc(struct net_device
*dev
, u8 up
)
52 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
54 unsigned int offset
= 0;
56 if (up
&& up
!= MLX4_EN_NUM_UP
)
59 netdev_set_num_tc(dev
, up
);
61 /* Partition Tx queues evenly amongst UP's */
62 for (i
= 0; i
< up
; i
++) {
63 netdev_set_tc_queue(dev
, i
, priv
->num_tx_rings_p_up
, offset
);
64 offset
+= priv
->num_tx_rings_p_up
;
70 #ifdef CONFIG_RFS_ACCEL
72 struct mlx4_en_filter
{
73 struct list_head next
;
74 struct work_struct work
;
82 struct mlx4_en_priv
*priv
;
83 u32 flow_id
; /* RFS infrastructure id */
84 int id
; /* mlx4_en driver id */
85 u64 reg_id
; /* Flow steering API id */
86 u8 activated
; /* Used to prevent expiry before filter
89 struct hlist_node filter_chain
;
92 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv
*priv
);
94 static void mlx4_en_filter_work(struct work_struct
*work
)
96 struct mlx4_en_filter
*filter
= container_of(work
,
97 struct mlx4_en_filter
,
99 struct mlx4_en_priv
*priv
= filter
->priv
;
100 struct mlx4_spec_list spec_tcp
= {
101 .id
= MLX4_NET_TRANS_RULE_ID_TCP
,
104 .dst_port
= filter
->dst_port
,
105 .dst_port_msk
= (__force __be16
)-1,
106 .src_port
= filter
->src_port
,
107 .src_port_msk
= (__force __be16
)-1,
111 struct mlx4_spec_list spec_ip
= {
112 .id
= MLX4_NET_TRANS_RULE_ID_IPV4
,
115 .dst_ip
= filter
->dst_ip
,
116 .dst_ip_msk
= (__force __be32
)-1,
117 .src_ip
= filter
->src_ip
,
118 .src_ip_msk
= (__force __be32
)-1,
122 struct mlx4_spec_list spec_eth
= {
123 .id
= MLX4_NET_TRANS_RULE_ID_ETH
,
125 struct mlx4_net_trans_rule rule
= {
126 .list
= LIST_HEAD_INIT(rule
.list
),
127 .queue_mode
= MLX4_NET_TRANS_Q_LIFO
,
130 .promisc_mode
= MLX4_FS_PROMISC_NONE
,
132 .priority
= MLX4_DOMAIN_RFS
,
135 __be64 mac_mask
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
137 list_add_tail(&spec_eth
.list
, &rule
.list
);
138 list_add_tail(&spec_ip
.list
, &rule
.list
);
139 list_add_tail(&spec_tcp
.list
, &rule
.list
);
141 rule
.qpn
= priv
->rss_map
.qps
[filter
->rxq_index
].qpn
;
142 memcpy(spec_eth
.eth
.dst_mac
, priv
->dev
->dev_addr
, ETH_ALEN
);
143 memcpy(spec_eth
.eth
.dst_mac_msk
, &mac_mask
, ETH_ALEN
);
145 filter
->activated
= 0;
147 if (filter
->reg_id
) {
148 rc
= mlx4_flow_detach(priv
->mdev
->dev
, filter
->reg_id
);
149 if (rc
&& rc
!= -ENOENT
)
150 en_err(priv
, "Error detaching flow. rc = %d\n", rc
);
153 rc
= mlx4_flow_attach(priv
->mdev
->dev
, &rule
, &filter
->reg_id
);
155 en_err(priv
, "Error attaching flow. err = %d\n", rc
);
157 mlx4_en_filter_rfs_expire(priv
);
159 filter
->activated
= 1;
162 static inline struct hlist_head
*
163 filter_hash_bucket(struct mlx4_en_priv
*priv
, __be32 src_ip
, __be32 dst_ip
,
164 __be16 src_port
, __be16 dst_port
)
169 l
= (__force
unsigned long)src_port
|
170 ((__force
unsigned long)dst_port
<< 2);
171 l
^= (__force
unsigned long)(src_ip
^ dst_ip
);
173 bucket_idx
= hash_long(l
, MLX4_EN_FILTER_HASH_SHIFT
);
175 return &priv
->filter_hash
[bucket_idx
];
178 static struct mlx4_en_filter
*
179 mlx4_en_filter_alloc(struct mlx4_en_priv
*priv
, int rxq_index
, __be32 src_ip
,
180 __be32 dst_ip
, __be16 src_port
, __be16 dst_port
,
183 struct mlx4_en_filter
*filter
= NULL
;
185 filter
= kzalloc(sizeof(struct mlx4_en_filter
), GFP_ATOMIC
);
190 filter
->rxq_index
= rxq_index
;
191 INIT_WORK(&filter
->work
, mlx4_en_filter_work
);
193 filter
->src_ip
= src_ip
;
194 filter
->dst_ip
= dst_ip
;
195 filter
->src_port
= src_port
;
196 filter
->dst_port
= dst_port
;
198 filter
->flow_id
= flow_id
;
200 filter
->id
= priv
->last_filter_id
++ % RPS_NO_FILTER
;
202 list_add_tail(&filter
->next
, &priv
->filters
);
203 hlist_add_head(&filter
->filter_chain
,
204 filter_hash_bucket(priv
, src_ip
, dst_ip
, src_port
,
210 static void mlx4_en_filter_free(struct mlx4_en_filter
*filter
)
212 struct mlx4_en_priv
*priv
= filter
->priv
;
215 list_del(&filter
->next
);
217 rc
= mlx4_flow_detach(priv
->mdev
->dev
, filter
->reg_id
);
218 if (rc
&& rc
!= -ENOENT
)
219 en_err(priv
, "Error detaching flow. rc = %d\n", rc
);
224 static inline struct mlx4_en_filter
*
225 mlx4_en_filter_find(struct mlx4_en_priv
*priv
, __be32 src_ip
, __be32 dst_ip
,
226 __be16 src_port
, __be16 dst_port
)
228 struct mlx4_en_filter
*filter
;
229 struct mlx4_en_filter
*ret
= NULL
;
231 hlist_for_each_entry(filter
,
232 filter_hash_bucket(priv
, src_ip
, dst_ip
,
235 if (filter
->src_ip
== src_ip
&&
236 filter
->dst_ip
== dst_ip
&&
237 filter
->src_port
== src_port
&&
238 filter
->dst_port
== dst_port
) {
248 mlx4_en_filter_rfs(struct net_device
*net_dev
, const struct sk_buff
*skb
,
249 u16 rxq_index
, u32 flow_id
)
251 struct mlx4_en_priv
*priv
= netdev_priv(net_dev
);
252 struct mlx4_en_filter
*filter
;
253 const struct iphdr
*ip
;
259 int nhoff
= skb_network_offset(skb
);
262 if (skb
->protocol
!= htons(ETH_P_IP
))
263 return -EPROTONOSUPPORT
;
265 ip
= (const struct iphdr
*)(skb
->data
+ nhoff
);
266 if (ip_is_fragment(ip
))
267 return -EPROTONOSUPPORT
;
269 ports
= (const __be16
*)(skb
->data
+ nhoff
+ 4 * ip
->ihl
);
276 if (ip
->protocol
!= IPPROTO_TCP
)
277 return -EPROTONOSUPPORT
;
279 spin_lock_bh(&priv
->filters_lock
);
280 filter
= mlx4_en_filter_find(priv
, src_ip
, dst_ip
, src_port
, dst_port
);
282 if (filter
->rxq_index
== rxq_index
)
285 filter
->rxq_index
= rxq_index
;
287 filter
= mlx4_en_filter_alloc(priv
, rxq_index
,
289 src_port
, dst_port
, flow_id
);
296 queue_work(priv
->mdev
->workqueue
, &filter
->work
);
301 spin_unlock_bh(&priv
->filters_lock
);
306 void mlx4_en_cleanup_filters(struct mlx4_en_priv
*priv
,
307 struct mlx4_en_rx_ring
*rx_ring
)
309 struct mlx4_en_filter
*filter
, *tmp
;
312 spin_lock_bh(&priv
->filters_lock
);
313 list_for_each_entry_safe(filter
, tmp
, &priv
->filters
, next
) {
314 list_move(&filter
->next
, &del_list
);
315 hlist_del(&filter
->filter_chain
);
317 spin_unlock_bh(&priv
->filters_lock
);
319 list_for_each_entry_safe(filter
, tmp
, &del_list
, next
) {
320 cancel_work_sync(&filter
->work
);
321 mlx4_en_filter_free(filter
);
325 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv
*priv
)
327 struct mlx4_en_filter
*filter
= NULL
, *tmp
, *last_filter
= NULL
;
331 spin_lock_bh(&priv
->filters_lock
);
332 list_for_each_entry_safe(filter
, tmp
, &priv
->filters
, next
) {
333 if (i
> MLX4_EN_FILTER_EXPIRY_QUOTA
)
336 if (filter
->activated
&&
337 !work_pending(&filter
->work
) &&
338 rps_may_expire_flow(priv
->dev
,
339 filter
->rxq_index
, filter
->flow_id
,
341 list_move(&filter
->next
, &del_list
);
342 hlist_del(&filter
->filter_chain
);
344 last_filter
= filter
;
349 if (last_filter
&& (&last_filter
->next
!= priv
->filters
.next
))
350 list_move(&priv
->filters
, &last_filter
->next
);
352 spin_unlock_bh(&priv
->filters_lock
);
354 list_for_each_entry_safe(filter
, tmp
, &del_list
, next
)
355 mlx4_en_filter_free(filter
);
359 static int mlx4_en_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
361 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
362 struct mlx4_en_dev
*mdev
= priv
->mdev
;
366 en_dbg(HW
, priv
, "adding VLAN:%d\n", vid
);
368 set_bit(vid
, priv
->active_vlans
);
370 /* Add VID to port VLAN filter */
371 mutex_lock(&mdev
->state_lock
);
372 if (mdev
->device_up
&& priv
->port_up
) {
373 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
375 en_err(priv
, "Failed configuring VLAN filter\n");
377 if (mlx4_register_vlan(mdev
->dev
, priv
->port
, vid
, &idx
))
378 en_err(priv
, "failed adding vlan %d\n", vid
);
379 mutex_unlock(&mdev
->state_lock
);
384 static int mlx4_en_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
386 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
387 struct mlx4_en_dev
*mdev
= priv
->mdev
;
391 en_dbg(HW
, priv
, "Killing VID:%d\n", vid
);
393 clear_bit(vid
, priv
->active_vlans
);
395 /* Remove VID from port VLAN filter */
396 mutex_lock(&mdev
->state_lock
);
397 if (!mlx4_find_cached_vlan(mdev
->dev
, priv
->port
, vid
, &idx
))
398 mlx4_unregister_vlan(mdev
->dev
, priv
->port
, idx
);
400 en_err(priv
, "could not find vid %d in cache\n", vid
);
402 if (mdev
->device_up
&& priv
->port_up
) {
403 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
405 en_err(priv
, "Failed configuring VLAN filter\n");
407 mutex_unlock(&mdev
->state_lock
);
412 static void mlx4_en_u64_to_mac(unsigned char dst_mac
[ETH_ALEN
+ 2], u64 src_mac
)
415 for (i
= ETH_ALEN
- 1; i
; --i
) {
416 dst_mac
[i
] = src_mac
& 0xff;
419 memset(&dst_mac
[ETH_ALEN
], 0, 2);
422 static int mlx4_en_uc_steer_add(struct mlx4_en_priv
*priv
,
423 unsigned char *mac
, int *qpn
, u64
*reg_id
)
425 struct mlx4_en_dev
*mdev
= priv
->mdev
;
426 struct mlx4_dev
*dev
= mdev
->dev
;
429 switch (dev
->caps
.steering_mode
) {
430 case MLX4_STEERING_MODE_B0
: {
435 memcpy(&gid
[10], mac
, ETH_ALEN
);
438 err
= mlx4_unicast_attach(dev
, &qp
, gid
, 0, MLX4_PROT_ETH
);
441 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
442 struct mlx4_spec_list spec_eth
= { {NULL
} };
443 __be64 mac_mask
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
445 struct mlx4_net_trans_rule rule
= {
446 .queue_mode
= MLX4_NET_TRANS_Q_FIFO
,
449 .promisc_mode
= MLX4_FS_PROMISC_NONE
,
450 .priority
= MLX4_DOMAIN_NIC
,
453 rule
.port
= priv
->port
;
455 INIT_LIST_HEAD(&rule
.list
);
457 spec_eth
.id
= MLX4_NET_TRANS_RULE_ID_ETH
;
458 memcpy(spec_eth
.eth
.dst_mac
, mac
, ETH_ALEN
);
459 memcpy(spec_eth
.eth
.dst_mac_msk
, &mac_mask
, ETH_ALEN
);
460 list_add_tail(&spec_eth
.list
, &rule
.list
);
462 err
= mlx4_flow_attach(dev
, &rule
, reg_id
);
469 en_warn(priv
, "Failed Attaching Unicast\n");
474 static void mlx4_en_uc_steer_release(struct mlx4_en_priv
*priv
,
475 unsigned char *mac
, int qpn
, u64 reg_id
)
477 struct mlx4_en_dev
*mdev
= priv
->mdev
;
478 struct mlx4_dev
*dev
= mdev
->dev
;
480 switch (dev
->caps
.steering_mode
) {
481 case MLX4_STEERING_MODE_B0
: {
486 memcpy(&gid
[10], mac
, ETH_ALEN
);
489 mlx4_unicast_detach(dev
, &qp
, gid
, MLX4_PROT_ETH
);
492 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
493 mlx4_flow_detach(dev
, reg_id
);
497 en_err(priv
, "Invalid steering mode.\n");
501 static int mlx4_en_get_qp(struct mlx4_en_priv
*priv
)
503 struct mlx4_en_dev
*mdev
= priv
->mdev
;
504 struct mlx4_dev
*dev
= mdev
->dev
;
505 struct mlx4_mac_entry
*entry
;
509 int *qpn
= &priv
->base_qpn
;
510 u64 mac
= mlx4_en_mac_to_u64(priv
->dev
->dev_addr
);
512 en_dbg(DRV
, priv
, "Registering MAC: %pM for adding\n",
513 priv
->dev
->dev_addr
);
514 index
= mlx4_register_mac(dev
, priv
->port
, mac
);
517 en_err(priv
, "Failed adding MAC: %pM\n",
518 priv
->dev
->dev_addr
);
522 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_A0
) {
523 int base_qpn
= mlx4_get_base_qpn(dev
, priv
->port
);
524 *qpn
= base_qpn
+ index
;
528 err
= mlx4_qp_reserve_range(dev
, 1, 1, qpn
);
529 en_dbg(DRV
, priv
, "Reserved qp %d\n", *qpn
);
531 en_err(priv
, "Failed to reserve qp for mac registration\n");
535 err
= mlx4_en_uc_steer_add(priv
, priv
->dev
->dev_addr
, qpn
, ®_id
);
539 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
544 memcpy(entry
->mac
, priv
->dev
->dev_addr
, sizeof(entry
->mac
));
545 entry
->reg_id
= reg_id
;
547 hlist_add_head_rcu(&entry
->hlist
,
548 &priv
->mac_hash
[entry
->mac
[MLX4_EN_MAC_HASH_IDX
]]);
553 mlx4_en_uc_steer_release(priv
, priv
->dev
->dev_addr
, *qpn
, reg_id
);
556 mlx4_qp_release_range(dev
, *qpn
, 1);
559 mlx4_unregister_mac(dev
, priv
->port
, mac
);
563 static void mlx4_en_put_qp(struct mlx4_en_priv
*priv
)
565 struct mlx4_en_dev
*mdev
= priv
->mdev
;
566 struct mlx4_dev
*dev
= mdev
->dev
;
567 int qpn
= priv
->base_qpn
;
570 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_A0
) {
571 mac
= mlx4_en_mac_to_u64(priv
->dev
->dev_addr
);
572 en_dbg(DRV
, priv
, "Registering MAC: %pM for deleting\n",
573 priv
->dev
->dev_addr
);
574 mlx4_unregister_mac(dev
, priv
->port
, mac
);
576 struct mlx4_mac_entry
*entry
;
577 struct hlist_node
*tmp
;
578 struct hlist_head
*bucket
;
581 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
) {
582 bucket
= &priv
->mac_hash
[i
];
583 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
584 mac
= mlx4_en_mac_to_u64(entry
->mac
);
585 en_dbg(DRV
, priv
, "Registering MAC: %pM for deleting\n",
587 mlx4_en_uc_steer_release(priv
, entry
->mac
,
590 mlx4_unregister_mac(dev
, priv
->port
, mac
);
591 hlist_del_rcu(&entry
->hlist
);
592 kfree_rcu(entry
, rcu
);
596 en_dbg(DRV
, priv
, "Releasing qp: port %d, qpn %d\n",
598 mlx4_qp_release_range(dev
, qpn
, 1);
599 priv
->flags
&= ~MLX4_EN_FLAG_FORCE_PROMISC
;
603 static int mlx4_en_replace_mac(struct mlx4_en_priv
*priv
, int qpn
,
604 unsigned char *new_mac
, unsigned char *prev_mac
)
606 struct mlx4_en_dev
*mdev
= priv
->mdev
;
607 struct mlx4_dev
*dev
= mdev
->dev
;
609 u64 new_mac_u64
= mlx4_en_mac_to_u64(new_mac
);
611 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
) {
612 struct hlist_head
*bucket
;
613 unsigned int mac_hash
;
614 struct mlx4_mac_entry
*entry
;
615 struct hlist_node
*tmp
;
616 u64 prev_mac_u64
= mlx4_en_mac_to_u64(prev_mac
);
618 bucket
= &priv
->mac_hash
[prev_mac
[MLX4_EN_MAC_HASH_IDX
]];
619 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
620 if (ether_addr_equal_64bits(entry
->mac
, prev_mac
)) {
621 mlx4_en_uc_steer_release(priv
, entry
->mac
,
623 mlx4_unregister_mac(dev
, priv
->port
,
625 hlist_del_rcu(&entry
->hlist
);
627 memcpy(entry
->mac
, new_mac
, ETH_ALEN
);
629 mac_hash
= new_mac
[MLX4_EN_MAC_HASH_IDX
];
630 hlist_add_head_rcu(&entry
->hlist
,
631 &priv
->mac_hash
[mac_hash
]);
632 mlx4_register_mac(dev
, priv
->port
, new_mac_u64
);
633 err
= mlx4_en_uc_steer_add(priv
, new_mac
,
642 return __mlx4_replace_mac(dev
, priv
->port
, qpn
, new_mac_u64
);
645 u64
mlx4_en_mac_to_u64(u8
*addr
)
650 for (i
= 0; i
< ETH_ALEN
; i
++) {
657 static int mlx4_en_do_set_mac(struct mlx4_en_priv
*priv
)
662 /* Remove old MAC and insert the new one */
663 err
= mlx4_en_replace_mac(priv
, priv
->base_qpn
,
664 priv
->dev
->dev_addr
, priv
->prev_mac
);
666 en_err(priv
, "Failed changing HW MAC address\n");
667 memcpy(priv
->prev_mac
, priv
->dev
->dev_addr
,
668 sizeof(priv
->prev_mac
));
670 en_dbg(HW
, priv
, "Port is down while registering mac, exiting...\n");
675 static int mlx4_en_set_mac(struct net_device
*dev
, void *addr
)
677 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
678 struct mlx4_en_dev
*mdev
= priv
->mdev
;
679 struct sockaddr
*saddr
= addr
;
682 if (!is_valid_ether_addr(saddr
->sa_data
))
683 return -EADDRNOTAVAIL
;
685 memcpy(dev
->dev_addr
, saddr
->sa_data
, ETH_ALEN
);
687 mutex_lock(&mdev
->state_lock
);
688 err
= mlx4_en_do_set_mac(priv
);
689 mutex_unlock(&mdev
->state_lock
);
694 static void mlx4_en_clear_list(struct net_device
*dev
)
696 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
697 struct mlx4_en_mc_list
*tmp
, *mc_to_del
;
699 list_for_each_entry_safe(mc_to_del
, tmp
, &priv
->mc_list
, list
) {
700 list_del(&mc_to_del
->list
);
705 static void mlx4_en_cache_mclist(struct net_device
*dev
)
707 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
708 struct netdev_hw_addr
*ha
;
709 struct mlx4_en_mc_list
*tmp
;
711 mlx4_en_clear_list(dev
);
712 netdev_for_each_mc_addr(ha
, dev
) {
713 tmp
= kzalloc(sizeof(struct mlx4_en_mc_list
), GFP_ATOMIC
);
715 mlx4_en_clear_list(dev
);
718 memcpy(tmp
->addr
, ha
->addr
, ETH_ALEN
);
719 list_add_tail(&tmp
->list
, &priv
->mc_list
);
723 static void update_mclist_flags(struct mlx4_en_priv
*priv
,
724 struct list_head
*dst
,
725 struct list_head
*src
)
727 struct mlx4_en_mc_list
*dst_tmp
, *src_tmp
, *new_mc
;
730 /* Find all the entries that should be removed from dst,
731 * These are the entries that are not found in src
733 list_for_each_entry(dst_tmp
, dst
, list
) {
735 list_for_each_entry(src_tmp
, src
, list
) {
736 if (!memcmp(dst_tmp
->addr
, src_tmp
->addr
, ETH_ALEN
)) {
742 dst_tmp
->action
= MCLIST_REM
;
745 /* Add entries that exist in src but not in dst
746 * mark them as need to add
748 list_for_each_entry(src_tmp
, src
, list
) {
750 list_for_each_entry(dst_tmp
, dst
, list
) {
751 if (!memcmp(dst_tmp
->addr
, src_tmp
->addr
, ETH_ALEN
)) {
752 dst_tmp
->action
= MCLIST_NONE
;
758 new_mc
= kmemdup(src_tmp
,
759 sizeof(struct mlx4_en_mc_list
),
764 new_mc
->action
= MCLIST_ADD
;
765 list_add_tail(&new_mc
->list
, dst
);
770 static void mlx4_en_set_rx_mode(struct net_device
*dev
)
772 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
777 queue_work(priv
->mdev
->workqueue
, &priv
->rx_mode_task
);
780 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv
*priv
,
781 struct mlx4_en_dev
*mdev
)
785 if (!(priv
->flags
& MLX4_EN_FLAG_PROMISC
)) {
786 if (netif_msg_rx_status(priv
))
787 en_warn(priv
, "Entering promiscuous mode\n");
788 priv
->flags
|= MLX4_EN_FLAG_PROMISC
;
790 /* Enable promiscouos mode */
791 switch (mdev
->dev
->caps
.steering_mode
) {
792 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
793 err
= mlx4_flow_steer_promisc_add(mdev
->dev
,
796 MLX4_FS_PROMISC_UPLINK
);
798 en_err(priv
, "Failed enabling promiscuous mode\n");
799 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
802 case MLX4_STEERING_MODE_B0
:
803 err
= mlx4_unicast_promisc_add(mdev
->dev
,
807 en_err(priv
, "Failed enabling unicast promiscuous mode\n");
809 /* Add the default qp number as multicast
812 if (!(priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
)) {
813 err
= mlx4_multicast_promisc_add(mdev
->dev
,
817 en_err(priv
, "Failed enabling multicast promiscuous mode\n");
818 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
822 case MLX4_STEERING_MODE_A0
:
823 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
,
828 en_err(priv
, "Failed enabling promiscuous mode\n");
832 /* Disable port multicast filter (unconditionally) */
833 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
834 0, MLX4_MCAST_DISABLE
);
836 en_err(priv
, "Failed disabling multicast filter\n");
838 /* Disable port VLAN filter */
839 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
841 en_err(priv
, "Failed disabling VLAN filter\n");
845 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv
*priv
,
846 struct mlx4_en_dev
*mdev
)
850 if (netif_msg_rx_status(priv
))
851 en_warn(priv
, "Leaving promiscuous mode\n");
852 priv
->flags
&= ~MLX4_EN_FLAG_PROMISC
;
854 /* Disable promiscouos mode */
855 switch (mdev
->dev
->caps
.steering_mode
) {
856 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
857 err
= mlx4_flow_steer_promisc_remove(mdev
->dev
,
859 MLX4_FS_PROMISC_UPLINK
);
861 en_err(priv
, "Failed disabling promiscuous mode\n");
862 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
865 case MLX4_STEERING_MODE_B0
:
866 err
= mlx4_unicast_promisc_remove(mdev
->dev
,
870 en_err(priv
, "Failed disabling unicast promiscuous mode\n");
871 /* Disable Multicast promisc */
872 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
873 err
= mlx4_multicast_promisc_remove(mdev
->dev
,
877 en_err(priv
, "Failed disabling multicast promiscuous mode\n");
878 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
882 case MLX4_STEERING_MODE_A0
:
883 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
,
887 en_err(priv
, "Failed disabling promiscuous mode\n");
891 /* Enable port VLAN filter */
892 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
894 en_err(priv
, "Failed enabling VLAN filter\n");
897 static void mlx4_en_do_multicast(struct mlx4_en_priv
*priv
,
898 struct net_device
*dev
,
899 struct mlx4_en_dev
*mdev
)
901 struct mlx4_en_mc_list
*mclist
, *tmp
;
903 u8 mc_list
[16] = {0};
906 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
907 if (dev
->flags
& IFF_ALLMULTI
) {
908 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
909 0, MLX4_MCAST_DISABLE
);
911 en_err(priv
, "Failed disabling multicast filter\n");
913 /* Add the default qp number as multicast promisc */
914 if (!(priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
)) {
915 switch (mdev
->dev
->caps
.steering_mode
) {
916 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
917 err
= mlx4_flow_steer_promisc_add(mdev
->dev
,
920 MLX4_FS_PROMISC_ALL_MULTI
);
923 case MLX4_STEERING_MODE_B0
:
924 err
= mlx4_multicast_promisc_add(mdev
->dev
,
929 case MLX4_STEERING_MODE_A0
:
933 en_err(priv
, "Failed entering multicast promisc mode\n");
934 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
937 /* Disable Multicast promisc */
938 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
939 switch (mdev
->dev
->caps
.steering_mode
) {
940 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
941 err
= mlx4_flow_steer_promisc_remove(mdev
->dev
,
943 MLX4_FS_PROMISC_ALL_MULTI
);
946 case MLX4_STEERING_MODE_B0
:
947 err
= mlx4_multicast_promisc_remove(mdev
->dev
,
952 case MLX4_STEERING_MODE_A0
:
956 en_err(priv
, "Failed disabling multicast promiscuous mode\n");
957 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
960 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
961 0, MLX4_MCAST_DISABLE
);
963 en_err(priv
, "Failed disabling multicast filter\n");
965 /* Flush mcast filter and init it with broadcast address */
966 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, ETH_BCAST
,
967 1, MLX4_MCAST_CONFIG
);
969 /* Update multicast list - we cache all addresses so they won't
970 * change while HW is updated holding the command semaphor */
971 netif_addr_lock_bh(dev
);
972 mlx4_en_cache_mclist(dev
);
973 netif_addr_unlock_bh(dev
);
974 list_for_each_entry(mclist
, &priv
->mc_list
, list
) {
975 mcast_addr
= mlx4_en_mac_to_u64(mclist
->addr
);
976 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
,
977 mcast_addr
, 0, MLX4_MCAST_CONFIG
);
979 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
980 0, MLX4_MCAST_ENABLE
);
982 en_err(priv
, "Failed enabling multicast filter\n");
984 update_mclist_flags(priv
, &priv
->curr_list
, &priv
->mc_list
);
985 list_for_each_entry_safe(mclist
, tmp
, &priv
->curr_list
, list
) {
986 if (mclist
->action
== MCLIST_REM
) {
987 /* detach this address and delete from list */
988 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
989 mc_list
[5] = priv
->port
;
990 err
= mlx4_multicast_detach(mdev
->dev
,
991 &priv
->rss_map
.indir_qp
,
996 en_err(priv
, "Fail to detach multicast address\n");
998 /* remove from list */
999 list_del(&mclist
->list
);
1001 } else if (mclist
->action
== MCLIST_ADD
) {
1002 /* attach the address */
1003 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
1004 /* needed for B0 steering support */
1005 mc_list
[5] = priv
->port
;
1006 err
= mlx4_multicast_attach(mdev
->dev
,
1007 &priv
->rss_map
.indir_qp
,
1013 en_err(priv
, "Fail to attach multicast address\n");
1020 static void mlx4_en_do_uc_filter(struct mlx4_en_priv
*priv
,
1021 struct net_device
*dev
,
1022 struct mlx4_en_dev
*mdev
)
1024 struct netdev_hw_addr
*ha
;
1025 struct mlx4_mac_entry
*entry
;
1026 struct hlist_node
*tmp
;
1030 struct hlist_head
*bucket
;
1035 /* Note that we do not need to protect our mac_hash traversal with rcu,
1036 * since all modification code is protected by mdev->state_lock
1039 /* find what to remove */
1040 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
) {
1041 bucket
= &priv
->mac_hash
[i
];
1042 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
1044 netdev_for_each_uc_addr(ha
, dev
) {
1045 if (ether_addr_equal_64bits(entry
->mac
,
1052 /* MAC address of the port is not in uc list */
1053 if (ether_addr_equal_64bits(entry
->mac
, dev
->dev_addr
))
1057 mac
= mlx4_en_mac_to_u64(entry
->mac
);
1058 mlx4_en_uc_steer_release(priv
, entry
->mac
,
1061 mlx4_unregister_mac(mdev
->dev
, priv
->port
, mac
);
1063 hlist_del_rcu(&entry
->hlist
);
1064 kfree_rcu(entry
, rcu
);
1065 en_dbg(DRV
, priv
, "Removed MAC %pM on port:%d\n",
1066 entry
->mac
, priv
->port
);
1072 /* if we didn't remove anything, there is no use in trying to add
1073 * again once we are in a forced promisc mode state
1075 if ((priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
) && 0 == removed
)
1078 prev_flags
= priv
->flags
;
1079 priv
->flags
&= ~MLX4_EN_FLAG_FORCE_PROMISC
;
1081 /* find what to add */
1082 netdev_for_each_uc_addr(ha
, dev
) {
1084 bucket
= &priv
->mac_hash
[ha
->addr
[MLX4_EN_MAC_HASH_IDX
]];
1085 hlist_for_each_entry(entry
, bucket
, hlist
) {
1086 if (ether_addr_equal_64bits(entry
->mac
, ha
->addr
)) {
1093 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1095 en_err(priv
, "Failed adding MAC %pM on port:%d (out of memory)\n",
1096 ha
->addr
, priv
->port
);
1097 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1100 mac
= mlx4_en_mac_to_u64(ha
->addr
);
1101 memcpy(entry
->mac
, ha
->addr
, ETH_ALEN
);
1102 err
= mlx4_register_mac(mdev
->dev
, priv
->port
, mac
);
1104 en_err(priv
, "Failed registering MAC %pM on port %d: %d\n",
1105 ha
->addr
, priv
->port
, err
);
1107 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1110 err
= mlx4_en_uc_steer_add(priv
, ha
->addr
,
1114 en_err(priv
, "Failed adding MAC %pM on port %d: %d\n",
1115 ha
->addr
, priv
->port
, err
);
1116 mlx4_unregister_mac(mdev
->dev
, priv
->port
, mac
);
1118 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1121 unsigned int mac_hash
;
1122 en_dbg(DRV
, priv
, "Added MAC %pM on port:%d\n",
1123 ha
->addr
, priv
->port
);
1124 mac_hash
= ha
->addr
[MLX4_EN_MAC_HASH_IDX
];
1125 bucket
= &priv
->mac_hash
[mac_hash
];
1126 hlist_add_head_rcu(&entry
->hlist
, bucket
);
1131 if (priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
) {
1132 en_warn(priv
, "Forcing promiscuous mode on port:%d\n",
1134 } else if (prev_flags
& MLX4_EN_FLAG_FORCE_PROMISC
) {
1135 en_warn(priv
, "Stop forcing promiscuous mode on port:%d\n",
1140 static void mlx4_en_do_set_rx_mode(struct work_struct
*work
)
1142 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1144 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1145 struct net_device
*dev
= priv
->dev
;
1147 mutex_lock(&mdev
->state_lock
);
1148 if (!mdev
->device_up
) {
1149 en_dbg(HW
, priv
, "Card is not up, ignoring rx mode change.\n");
1152 if (!priv
->port_up
) {
1153 en_dbg(HW
, priv
, "Port is down, ignoring rx mode change.\n");
1157 if (!netif_carrier_ok(dev
)) {
1158 if (!mlx4_en_QUERY_PORT(mdev
, priv
->port
)) {
1159 if (priv
->port_state
.link_state
) {
1160 priv
->last_link_state
= MLX4_DEV_EVENT_PORT_UP
;
1161 netif_carrier_on(dev
);
1162 en_dbg(LINK
, priv
, "Link Up\n");
1167 if (dev
->priv_flags
& IFF_UNICAST_FLT
)
1168 mlx4_en_do_uc_filter(priv
, dev
, mdev
);
1170 /* Promsicuous mode: disable all filters */
1171 if ((dev
->flags
& IFF_PROMISC
) ||
1172 (priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
)) {
1173 mlx4_en_set_promisc_mode(priv
, mdev
);
1177 /* Not in promiscuous mode */
1178 if (priv
->flags
& MLX4_EN_FLAG_PROMISC
)
1179 mlx4_en_clear_promisc_mode(priv
, mdev
);
1181 mlx4_en_do_multicast(priv
, dev
, mdev
);
1183 mutex_unlock(&mdev
->state_lock
);
1186 #ifdef CONFIG_NET_POLL_CONTROLLER
1187 static void mlx4_en_netpoll(struct net_device
*dev
)
1189 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1190 struct mlx4_en_cq
*cq
;
1191 unsigned long flags
;
1194 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1195 cq
= &priv
->rx_cq
[i
];
1196 spin_lock_irqsave(&cq
->lock
, flags
);
1197 napi_synchronize(&cq
->napi
);
1198 mlx4_en_process_rx_cq(dev
, cq
, 0);
1199 spin_unlock_irqrestore(&cq
->lock
, flags
);
1204 static void mlx4_en_tx_timeout(struct net_device
*dev
)
1206 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1207 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1209 if (netif_msg_timer(priv
))
1210 en_warn(priv
, "Tx timeout called on port:%d\n", priv
->port
);
1212 priv
->port_stats
.tx_timeout
++;
1213 en_dbg(DRV
, priv
, "Scheduling watchdog\n");
1214 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
1218 static struct net_device_stats
*mlx4_en_get_stats(struct net_device
*dev
)
1220 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1222 spin_lock_bh(&priv
->stats_lock
);
1223 memcpy(&priv
->ret_stats
, &priv
->stats
, sizeof(priv
->stats
));
1224 spin_unlock_bh(&priv
->stats_lock
);
1226 return &priv
->ret_stats
;
1229 static void mlx4_en_set_default_moderation(struct mlx4_en_priv
*priv
)
1231 struct mlx4_en_cq
*cq
;
1234 /* If we haven't received a specific coalescing setting
1235 * (module param), we set the moderation parameters as follows:
1236 * - moder_cnt is set to the number of mtu sized packets to
1237 * satisfy our coalescing target.
1238 * - moder_time is set to a fixed value.
1240 priv
->rx_frames
= MLX4_EN_RX_COAL_TARGET
;
1241 priv
->rx_usecs
= MLX4_EN_RX_COAL_TIME
;
1242 priv
->tx_frames
= MLX4_EN_TX_COAL_PKTS
;
1243 priv
->tx_usecs
= MLX4_EN_TX_COAL_TIME
;
1244 en_dbg(INTR
, priv
, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1245 priv
->dev
->mtu
, priv
->rx_frames
, priv
->rx_usecs
);
1247 /* Setup cq moderation params */
1248 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1249 cq
= &priv
->rx_cq
[i
];
1250 cq
->moder_cnt
= priv
->rx_frames
;
1251 cq
->moder_time
= priv
->rx_usecs
;
1252 priv
->last_moder_time
[i
] = MLX4_EN_AUTO_CONF
;
1253 priv
->last_moder_packets
[i
] = 0;
1254 priv
->last_moder_bytes
[i
] = 0;
1257 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1258 cq
= &priv
->tx_cq
[i
];
1259 cq
->moder_cnt
= priv
->tx_frames
;
1260 cq
->moder_time
= priv
->tx_usecs
;
1263 /* Reset auto-moderation params */
1264 priv
->pkt_rate_low
= MLX4_EN_RX_RATE_LOW
;
1265 priv
->rx_usecs_low
= MLX4_EN_RX_COAL_TIME_LOW
;
1266 priv
->pkt_rate_high
= MLX4_EN_RX_RATE_HIGH
;
1267 priv
->rx_usecs_high
= MLX4_EN_RX_COAL_TIME_HIGH
;
1268 priv
->sample_interval
= MLX4_EN_SAMPLE_INTERVAL
;
1269 priv
->adaptive_rx_coal
= 1;
1270 priv
->last_moder_jiffies
= 0;
1271 priv
->last_moder_tx_packets
= 0;
1274 static void mlx4_en_auto_moderation(struct mlx4_en_priv
*priv
)
1276 unsigned long period
= (unsigned long) (jiffies
- priv
->last_moder_jiffies
);
1277 struct mlx4_en_cq
*cq
;
1278 unsigned long packets
;
1280 unsigned long avg_pkt_size
;
1281 unsigned long rx_packets
;
1282 unsigned long rx_bytes
;
1283 unsigned long rx_pkt_diff
;
1287 if (!priv
->adaptive_rx_coal
|| period
< priv
->sample_interval
* HZ
)
1290 for (ring
= 0; ring
< priv
->rx_ring_num
; ring
++) {
1291 spin_lock_bh(&priv
->stats_lock
);
1292 rx_packets
= priv
->rx_ring
[ring
].packets
;
1293 rx_bytes
= priv
->rx_ring
[ring
].bytes
;
1294 spin_unlock_bh(&priv
->stats_lock
);
1296 rx_pkt_diff
= ((unsigned long) (rx_packets
-
1297 priv
->last_moder_packets
[ring
]));
1298 packets
= rx_pkt_diff
;
1299 rate
= packets
* HZ
/ period
;
1300 avg_pkt_size
= packets
? ((unsigned long) (rx_bytes
-
1301 priv
->last_moder_bytes
[ring
])) / packets
: 0;
1303 /* Apply auto-moderation only when packet rate
1304 * exceeds a rate that it matters */
1305 if (rate
> (MLX4_EN_RX_RATE_THRESH
/ priv
->rx_ring_num
) &&
1306 avg_pkt_size
> MLX4_EN_AVG_PKT_SMALL
) {
1307 if (rate
< priv
->pkt_rate_low
)
1308 moder_time
= priv
->rx_usecs_low
;
1309 else if (rate
> priv
->pkt_rate_high
)
1310 moder_time
= priv
->rx_usecs_high
;
1312 moder_time
= (rate
- priv
->pkt_rate_low
) *
1313 (priv
->rx_usecs_high
- priv
->rx_usecs_low
) /
1314 (priv
->pkt_rate_high
- priv
->pkt_rate_low
) +
1317 moder_time
= priv
->rx_usecs_low
;
1320 if (moder_time
!= priv
->last_moder_time
[ring
]) {
1321 priv
->last_moder_time
[ring
] = moder_time
;
1322 cq
= &priv
->rx_cq
[ring
];
1323 cq
->moder_time
= moder_time
;
1324 err
= mlx4_en_set_cq_moder(priv
, cq
);
1326 en_err(priv
, "Failed modifying moderation for cq:%d\n",
1329 priv
->last_moder_packets
[ring
] = rx_packets
;
1330 priv
->last_moder_bytes
[ring
] = rx_bytes
;
1333 priv
->last_moder_jiffies
= jiffies
;
1336 static void mlx4_en_do_get_stats(struct work_struct
*work
)
1338 struct delayed_work
*delay
= to_delayed_work(work
);
1339 struct mlx4_en_priv
*priv
= container_of(delay
, struct mlx4_en_priv
,
1341 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1344 mutex_lock(&mdev
->state_lock
);
1345 if (mdev
->device_up
) {
1346 err
= mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 0);
1348 en_dbg(HW
, priv
, "Could not update stats\n");
1351 mlx4_en_auto_moderation(priv
);
1353 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
1355 if (mdev
->mac_removed
[MLX4_MAX_PORTS
+ 1 - priv
->port
]) {
1356 mlx4_en_do_set_mac(priv
);
1357 mdev
->mac_removed
[MLX4_MAX_PORTS
+ 1 - priv
->port
] = 0;
1359 mutex_unlock(&mdev
->state_lock
);
1362 static void mlx4_en_linkstate(struct work_struct
*work
)
1364 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1366 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1367 int linkstate
= priv
->link_state
;
1369 mutex_lock(&mdev
->state_lock
);
1370 /* If observable port state changed set carrier state and
1371 * report to system log */
1372 if (priv
->last_link_state
!= linkstate
) {
1373 if (linkstate
== MLX4_DEV_EVENT_PORT_DOWN
) {
1374 en_info(priv
, "Link Down\n");
1375 netif_carrier_off(priv
->dev
);
1377 en_info(priv
, "Link Up\n");
1378 netif_carrier_on(priv
->dev
);
1381 priv
->last_link_state
= linkstate
;
1382 mutex_unlock(&mdev
->state_lock
);
1386 int mlx4_en_start_port(struct net_device
*dev
)
1388 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1389 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1390 struct mlx4_en_cq
*cq
;
1391 struct mlx4_en_tx_ring
*tx_ring
;
1397 u8 mc_list
[16] = {0};
1399 if (priv
->port_up
) {
1400 en_dbg(DRV
, priv
, "start port called while port already up\n");
1404 INIT_LIST_HEAD(&priv
->mc_list
);
1405 INIT_LIST_HEAD(&priv
->curr_list
);
1406 INIT_LIST_HEAD(&priv
->ethtool_list
);
1407 memset(&priv
->ethtool_rules
[0], 0,
1408 sizeof(struct ethtool_flow_id
) * MAX_NUM_OF_FS_RULES
);
1410 /* Calculate Rx buf size */
1411 dev
->mtu
= min(dev
->mtu
, priv
->max_mtu
);
1412 mlx4_en_calc_rx_buf(dev
);
1413 en_dbg(DRV
, priv
, "Rx buf size:%d\n", priv
->rx_skb_size
);
1415 /* Configure rx cq's and rings */
1416 err
= mlx4_en_activate_rx_rings(priv
);
1418 en_err(priv
, "Failed to activate RX rings\n");
1421 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1422 cq
= &priv
->rx_cq
[i
];
1424 err
= mlx4_en_activate_cq(priv
, cq
, i
);
1426 en_err(priv
, "Failed activating Rx CQ\n");
1429 for (j
= 0; j
< cq
->size
; j
++)
1430 cq
->buf
[j
].owner_sr_opcode
= MLX4_CQE_OWNER_MASK
;
1431 err
= mlx4_en_set_cq_moder(priv
, cq
);
1433 en_err(priv
, "Failed setting cq moderation parameters");
1434 mlx4_en_deactivate_cq(priv
, cq
);
1437 mlx4_en_arm_cq(priv
, cq
);
1438 priv
->rx_ring
[i
].cqn
= cq
->mcq
.cqn
;
1443 en_dbg(DRV
, priv
, "Getting qp number for port %d\n", priv
->port
);
1444 err
= mlx4_en_get_qp(priv
);
1446 en_err(priv
, "Failed getting eth qp\n");
1449 mdev
->mac_removed
[priv
->port
] = 0;
1451 err
= mlx4_en_config_rss_steer(priv
);
1453 en_err(priv
, "Failed configuring rss steering\n");
1457 err
= mlx4_en_create_drop_qp(priv
);
1461 /* Configure tx cq's and rings */
1462 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1464 cq
= &priv
->tx_cq
[i
];
1465 err
= mlx4_en_activate_cq(priv
, cq
, i
);
1467 en_err(priv
, "Failed allocating Tx CQ\n");
1470 err
= mlx4_en_set_cq_moder(priv
, cq
);
1472 en_err(priv
, "Failed setting cq moderation parameters");
1473 mlx4_en_deactivate_cq(priv
, cq
);
1476 en_dbg(DRV
, priv
, "Resetting index of collapsed CQ:%d to -1\n", i
);
1477 cq
->buf
->wqe_index
= cpu_to_be16(0xffff);
1479 /* Configure ring */
1480 tx_ring
= &priv
->tx_ring
[i
];
1481 err
= mlx4_en_activate_tx_ring(priv
, tx_ring
, cq
->mcq
.cqn
,
1482 i
/ priv
->num_tx_rings_p_up
);
1484 en_err(priv
, "Failed allocating Tx ring\n");
1485 mlx4_en_deactivate_cq(priv
, cq
);
1488 tx_ring
->tx_queue
= netdev_get_tx_queue(dev
, i
);
1490 /* Arm CQ for TX completions */
1491 mlx4_en_arm_cq(priv
, cq
);
1493 /* Set initial ownership of all Tx TXBBs to SW (1) */
1494 for (j
= 0; j
< tx_ring
->buf_size
; j
+= STAMP_STRIDE
)
1495 *((u32
*) (tx_ring
->buf
+ j
)) = 0xffffffff;
1499 /* Configure port */
1500 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
1501 priv
->rx_skb_size
+ ETH_FCS_LEN
,
1502 priv
->prof
->tx_pause
,
1504 priv
->prof
->rx_pause
,
1505 priv
->prof
->rx_ppp
);
1507 en_err(priv
, "Failed setting port general configurations for port %d, with error %d\n",
1511 /* Set default qp number */
1512 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
, priv
->port
, priv
->base_qpn
, 0);
1514 en_err(priv
, "Failed setting default qp numbers\n");
1519 en_dbg(HW
, priv
, "Initializing port\n");
1520 err
= mlx4_INIT_PORT(mdev
->dev
, priv
->port
);
1522 en_err(priv
, "Failed Initializing port\n");
1526 /* Attach rx QP to bradcast address */
1527 memset(&mc_list
[10], 0xff, ETH_ALEN
);
1528 mc_list
[5] = priv
->port
; /* needed for B0 steering support */
1529 if (mlx4_multicast_attach(mdev
->dev
, &priv
->rss_map
.indir_qp
, mc_list
,
1530 priv
->port
, 0, MLX4_PROT_ETH
,
1531 &priv
->broadcast_id
))
1532 mlx4_warn(mdev
, "Failed Attaching Broadcast\n");
1534 /* Must redo promiscuous mode setup. */
1535 priv
->flags
&= ~(MLX4_EN_FLAG_PROMISC
| MLX4_EN_FLAG_MC_PROMISC
);
1537 /* Schedule multicast task to populate multicast list */
1538 queue_work(mdev
->workqueue
, &priv
->rx_mode_task
);
1540 mlx4_set_stats_bitmap(mdev
->dev
, &priv
->stats_bitmap
);
1542 priv
->port_up
= true;
1543 netif_tx_start_all_queues(dev
);
1544 netif_device_attach(dev
);
1549 while (tx_index
--) {
1550 mlx4_en_deactivate_tx_ring(priv
, &priv
->tx_ring
[tx_index
]);
1551 mlx4_en_deactivate_cq(priv
, &priv
->tx_cq
[tx_index
]);
1553 mlx4_en_destroy_drop_qp(priv
);
1555 mlx4_en_release_rss_steer(priv
);
1557 mlx4_en_put_qp(priv
);
1560 mlx4_en_deactivate_cq(priv
, &priv
->rx_cq
[rx_index
]);
1561 for (i
= 0; i
< priv
->rx_ring_num
; i
++)
1562 mlx4_en_deactivate_rx_ring(priv
, &priv
->rx_ring
[i
]);
1564 return err
; /* need to close devices */
1568 void mlx4_en_stop_port(struct net_device
*dev
, int detach
)
1570 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1571 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1572 struct mlx4_en_mc_list
*mclist
, *tmp
;
1573 struct ethtool_flow_id
*flow
, *tmp_flow
;
1575 u8 mc_list
[16] = {0};
1577 if (!priv
->port_up
) {
1578 en_dbg(DRV
, priv
, "stop port called while port already down\n");
1582 /* Synchronize with tx routine */
1583 netif_tx_lock_bh(dev
);
1585 netif_device_detach(dev
);
1586 netif_tx_stop_all_queues(dev
);
1587 netif_tx_unlock_bh(dev
);
1589 netif_tx_disable(dev
);
1591 /* Set port as not active */
1592 priv
->port_up
= false;
1594 /* Promsicuous mode */
1595 if (mdev
->dev
->caps
.steering_mode
==
1596 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1597 priv
->flags
&= ~(MLX4_EN_FLAG_PROMISC
|
1598 MLX4_EN_FLAG_MC_PROMISC
);
1599 mlx4_flow_steer_promisc_remove(mdev
->dev
,
1601 MLX4_FS_PROMISC_UPLINK
);
1602 mlx4_flow_steer_promisc_remove(mdev
->dev
,
1604 MLX4_FS_PROMISC_ALL_MULTI
);
1605 } else if (priv
->flags
& MLX4_EN_FLAG_PROMISC
) {
1606 priv
->flags
&= ~MLX4_EN_FLAG_PROMISC
;
1608 /* Disable promiscouos mode */
1609 mlx4_unicast_promisc_remove(mdev
->dev
, priv
->base_qpn
,
1612 /* Disable Multicast promisc */
1613 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
1614 mlx4_multicast_promisc_remove(mdev
->dev
, priv
->base_qpn
,
1616 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
1620 /* Detach All multicasts */
1621 memset(&mc_list
[10], 0xff, ETH_ALEN
);
1622 mc_list
[5] = priv
->port
; /* needed for B0 steering support */
1623 mlx4_multicast_detach(mdev
->dev
, &priv
->rss_map
.indir_qp
, mc_list
,
1624 MLX4_PROT_ETH
, priv
->broadcast_id
);
1625 list_for_each_entry(mclist
, &priv
->curr_list
, list
) {
1626 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
1627 mc_list
[5] = priv
->port
;
1628 mlx4_multicast_detach(mdev
->dev
, &priv
->rss_map
.indir_qp
,
1629 mc_list
, MLX4_PROT_ETH
, mclist
->reg_id
);
1631 mlx4_en_clear_list(dev
);
1632 list_for_each_entry_safe(mclist
, tmp
, &priv
->curr_list
, list
) {
1633 list_del(&mclist
->list
);
1637 /* Flush multicast filter */
1638 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0, 1, MLX4_MCAST_CONFIG
);
1640 mlx4_en_destroy_drop_qp(priv
);
1643 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1644 mlx4_en_deactivate_tx_ring(priv
, &priv
->tx_ring
[i
]);
1645 mlx4_en_deactivate_cq(priv
, &priv
->tx_cq
[i
]);
1649 for (i
= 0; i
< priv
->tx_ring_num
; i
++)
1650 mlx4_en_free_tx_buf(dev
, &priv
->tx_ring
[i
]);
1653 mlx4_en_release_rss_steer(priv
);
1655 /* Unregister Mac address for the port */
1656 mlx4_en_put_qp(priv
);
1657 if (!(mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN
))
1658 mdev
->mac_removed
[priv
->port
] = 1;
1660 /* Remove flow steering rules for the port*/
1661 if (mdev
->dev
->caps
.steering_mode
==
1662 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1664 list_for_each_entry_safe(flow
, tmp_flow
,
1665 &priv
->ethtool_list
, list
) {
1666 mlx4_flow_detach(mdev
->dev
, flow
->id
);
1667 list_del(&flow
->list
);
1672 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1673 mlx4_en_deactivate_rx_ring(priv
, &priv
->rx_ring
[i
]);
1674 while (test_bit(NAPI_STATE_SCHED
, &priv
->rx_cq
[i
].napi
.state
))
1676 mlx4_en_deactivate_cq(priv
, &priv
->rx_cq
[i
]);
1680 mlx4_CLOSE_PORT(mdev
->dev
, priv
->port
);
1683 static void mlx4_en_restart(struct work_struct
*work
)
1685 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1687 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1688 struct net_device
*dev
= priv
->dev
;
1690 en_dbg(DRV
, priv
, "Watchdog task called for port %d\n", priv
->port
);
1692 mutex_lock(&mdev
->state_lock
);
1693 if (priv
->port_up
) {
1694 mlx4_en_stop_port(dev
, 1);
1695 if (mlx4_en_start_port(dev
))
1696 en_err(priv
, "Failed restarting port %d\n", priv
->port
);
1698 mutex_unlock(&mdev
->state_lock
);
1701 static void mlx4_en_clear_stats(struct net_device
*dev
)
1703 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1704 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1707 if (mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 1))
1708 en_dbg(HW
, priv
, "Failed dumping statistics\n");
1710 memset(&priv
->stats
, 0, sizeof(priv
->stats
));
1711 memset(&priv
->pstats
, 0, sizeof(priv
->pstats
));
1712 memset(&priv
->pkstats
, 0, sizeof(priv
->pkstats
));
1713 memset(&priv
->port_stats
, 0, sizeof(priv
->port_stats
));
1715 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1716 priv
->tx_ring
[i
].bytes
= 0;
1717 priv
->tx_ring
[i
].packets
= 0;
1718 priv
->tx_ring
[i
].tx_csum
= 0;
1720 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1721 priv
->rx_ring
[i
].bytes
= 0;
1722 priv
->rx_ring
[i
].packets
= 0;
1723 priv
->rx_ring
[i
].csum_ok
= 0;
1724 priv
->rx_ring
[i
].csum_none
= 0;
1728 static int mlx4_en_open(struct net_device
*dev
)
1730 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1731 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1734 mutex_lock(&mdev
->state_lock
);
1736 if (!mdev
->device_up
) {
1737 en_err(priv
, "Cannot open - device down/disabled\n");
1742 /* Reset HW statistics and SW counters */
1743 mlx4_en_clear_stats(dev
);
1745 err
= mlx4_en_start_port(dev
);
1747 en_err(priv
, "Failed starting port:%d\n", priv
->port
);
1750 mutex_unlock(&mdev
->state_lock
);
1755 static int mlx4_en_close(struct net_device
*dev
)
1757 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1758 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1760 en_dbg(IFDOWN
, priv
, "Close port called\n");
1762 mutex_lock(&mdev
->state_lock
);
1764 mlx4_en_stop_port(dev
, 0);
1765 netif_carrier_off(dev
);
1767 mutex_unlock(&mdev
->state_lock
);
1771 void mlx4_en_free_resources(struct mlx4_en_priv
*priv
)
1775 #ifdef CONFIG_RFS_ACCEL
1776 free_irq_cpu_rmap(priv
->dev
->rx_cpu_rmap
);
1777 priv
->dev
->rx_cpu_rmap
= NULL
;
1780 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1781 if (priv
->tx_ring
[i
].tx_info
)
1782 mlx4_en_destroy_tx_ring(priv
, &priv
->tx_ring
[i
]);
1783 if (priv
->tx_cq
[i
].buf
)
1784 mlx4_en_destroy_cq(priv
, &priv
->tx_cq
[i
]);
1787 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1788 if (priv
->rx_ring
[i
].rx_info
)
1789 mlx4_en_destroy_rx_ring(priv
, &priv
->rx_ring
[i
],
1790 priv
->prof
->rx_ring_size
, priv
->stride
);
1791 if (priv
->rx_cq
[i
].buf
)
1792 mlx4_en_destroy_cq(priv
, &priv
->rx_cq
[i
]);
1795 if (priv
->base_tx_qpn
) {
1796 mlx4_qp_release_range(priv
->mdev
->dev
, priv
->base_tx_qpn
, priv
->tx_ring_num
);
1797 priv
->base_tx_qpn
= 0;
1801 int mlx4_en_alloc_resources(struct mlx4_en_priv
*priv
)
1803 struct mlx4_en_port_profile
*prof
= priv
->prof
;
1807 err
= mlx4_qp_reserve_range(priv
->mdev
->dev
, priv
->tx_ring_num
, 256, &priv
->base_tx_qpn
);
1809 en_err(priv
, "failed reserving range for TX rings\n");
1813 /* Create tx Rings */
1814 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1815 if (mlx4_en_create_cq(priv
, &priv
->tx_cq
[i
],
1816 prof
->tx_ring_size
, i
, TX
))
1819 if (mlx4_en_create_tx_ring(priv
, &priv
->tx_ring
[i
], priv
->base_tx_qpn
+ i
,
1820 prof
->tx_ring_size
, TXBB_SIZE
))
1824 /* Create rx Rings */
1825 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1826 if (mlx4_en_create_cq(priv
, &priv
->rx_cq
[i
],
1827 prof
->rx_ring_size
, i
, RX
))
1830 if (mlx4_en_create_rx_ring(priv
, &priv
->rx_ring
[i
],
1831 prof
->rx_ring_size
, priv
->stride
))
1835 #ifdef CONFIG_RFS_ACCEL
1836 if (priv
->mdev
->dev
->caps
.comp_pool
) {
1837 priv
->dev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(priv
->mdev
->dev
->caps
.comp_pool
);
1838 if (!priv
->dev
->rx_cpu_rmap
)
1846 en_err(priv
, "Failed to allocate NIC resources\n");
1851 void mlx4_en_destroy_netdev(struct net_device
*dev
)
1853 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1854 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1856 en_dbg(DRV
, priv
, "Destroying netdev on port:%d\n", priv
->port
);
1858 /* Unregister device - this will close the port if it was up */
1859 if (priv
->registered
)
1860 unregister_netdev(dev
);
1862 if (priv
->allocated
)
1863 mlx4_free_hwq_res(mdev
->dev
, &priv
->res
, MLX4_EN_PAGE_SIZE
);
1865 cancel_delayed_work(&priv
->stats_task
);
1866 /* flush any pending task for this netdev */
1867 flush_workqueue(mdev
->workqueue
);
1869 /* Detach the netdev so tasks would not attempt to access it */
1870 mutex_lock(&mdev
->state_lock
);
1871 mdev
->pndev
[priv
->port
] = NULL
;
1872 mutex_unlock(&mdev
->state_lock
);
1874 mlx4_en_free_resources(priv
);
1876 kfree(priv
->tx_ring
);
1882 static int mlx4_en_change_mtu(struct net_device
*dev
, int new_mtu
)
1884 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1885 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1888 en_dbg(DRV
, priv
, "Change MTU called - current:%d new:%d\n",
1891 if ((new_mtu
< MLX4_EN_MIN_MTU
) || (new_mtu
> priv
->max_mtu
)) {
1892 en_err(priv
, "Bad MTU size:%d.\n", new_mtu
);
1897 if (netif_running(dev
)) {
1898 mutex_lock(&mdev
->state_lock
);
1899 if (!mdev
->device_up
) {
1900 /* NIC is probably restarting - let watchdog task reset
1902 en_dbg(DRV
, priv
, "Change MTU called with card down!?\n");
1904 mlx4_en_stop_port(dev
, 1);
1905 err
= mlx4_en_start_port(dev
);
1907 en_err(priv
, "Failed restarting port:%d\n",
1909 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
1912 mutex_unlock(&mdev
->state_lock
);
1917 static int mlx4_en_set_features(struct net_device
*netdev
,
1918 netdev_features_t features
)
1920 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
1922 if (features
& NETIF_F_LOOPBACK
)
1923 priv
->ctrl_flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
1926 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK
);
1928 mlx4_en_update_loopback_state(netdev
, features
);
1934 static int mlx4_en_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
1935 struct net_device
*dev
,
1936 const unsigned char *addr
, u16 flags
)
1938 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1939 struct mlx4_dev
*mdev
= priv
->mdev
->dev
;
1942 if (!mlx4_is_mfunc(mdev
))
1945 /* Hardware does not support aging addresses, allow only
1946 * permanent addresses if ndm_state is given
1948 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
1949 en_info(priv
, "Add FDB only supports static addresses\n");
1953 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
1954 err
= dev_uc_add_excl(dev
, addr
);
1955 else if (is_multicast_ether_addr(addr
))
1956 err
= dev_mc_add_excl(dev
, addr
);
1960 /* Only return duplicate errors if NLM_F_EXCL is set */
1961 if (err
== -EEXIST
&& !(flags
& NLM_F_EXCL
))
1967 static int mlx4_en_fdb_del(struct ndmsg
*ndm
,
1968 struct nlattr
*tb
[],
1969 struct net_device
*dev
,
1970 const unsigned char *addr
)
1972 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1973 struct mlx4_dev
*mdev
= priv
->mdev
->dev
;
1976 if (!mlx4_is_mfunc(mdev
))
1979 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
1980 en_info(priv
, "Del FDB only supports static addresses\n");
1984 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
1985 err
= dev_uc_del(dev
, addr
);
1986 else if (is_multicast_ether_addr(addr
))
1987 err
= dev_mc_del(dev
, addr
);
1994 static int mlx4_en_fdb_dump(struct sk_buff
*skb
,
1995 struct netlink_callback
*cb
,
1996 struct net_device
*dev
, int idx
)
1998 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1999 struct mlx4_dev
*mdev
= priv
->mdev
->dev
;
2001 if (mlx4_is_mfunc(mdev
))
2002 idx
= ndo_dflt_fdb_dump(skb
, cb
, dev
, idx
);
2007 static const struct net_device_ops mlx4_netdev_ops
= {
2008 .ndo_open
= mlx4_en_open
,
2009 .ndo_stop
= mlx4_en_close
,
2010 .ndo_start_xmit
= mlx4_en_xmit
,
2011 .ndo_select_queue
= mlx4_en_select_queue
,
2012 .ndo_get_stats
= mlx4_en_get_stats
,
2013 .ndo_set_rx_mode
= mlx4_en_set_rx_mode
,
2014 .ndo_set_mac_address
= mlx4_en_set_mac
,
2015 .ndo_validate_addr
= eth_validate_addr
,
2016 .ndo_change_mtu
= mlx4_en_change_mtu
,
2017 .ndo_tx_timeout
= mlx4_en_tx_timeout
,
2018 .ndo_vlan_rx_add_vid
= mlx4_en_vlan_rx_add_vid
,
2019 .ndo_vlan_rx_kill_vid
= mlx4_en_vlan_rx_kill_vid
,
2020 #ifdef CONFIG_NET_POLL_CONTROLLER
2021 .ndo_poll_controller
= mlx4_en_netpoll
,
2023 .ndo_set_features
= mlx4_en_set_features
,
2024 .ndo_setup_tc
= mlx4_en_setup_tc
,
2025 #ifdef CONFIG_RFS_ACCEL
2026 .ndo_rx_flow_steer
= mlx4_en_filter_rfs
,
2028 .ndo_fdb_add
= mlx4_en_fdb_add
,
2029 .ndo_fdb_del
= mlx4_en_fdb_del
,
2030 .ndo_fdb_dump
= mlx4_en_fdb_dump
,
2033 int mlx4_en_init_netdev(struct mlx4_en_dev
*mdev
, int port
,
2034 struct mlx4_en_port_profile
*prof
)
2036 struct net_device
*dev
;
2037 struct mlx4_en_priv
*priv
;
2041 dev
= alloc_etherdev_mqs(sizeof(struct mlx4_en_priv
),
2042 MAX_TX_RINGS
, MAX_RX_RINGS
);
2046 netif_set_real_num_tx_queues(dev
, prof
->tx_ring_num
);
2047 netif_set_real_num_rx_queues(dev
, prof
->rx_ring_num
);
2049 SET_NETDEV_DEV(dev
, &mdev
->dev
->pdev
->dev
);
2050 dev
->dev_id
= port
- 1;
2053 * Initialize driver private data
2056 priv
= netdev_priv(dev
);
2057 memset(priv
, 0, sizeof(struct mlx4_en_priv
));
2060 priv
->ddev
= &mdev
->pdev
->dev
;
2063 priv
->port_up
= false;
2064 priv
->flags
= prof
->flags
;
2065 priv
->ctrl_flags
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
|
2066 MLX4_WQE_CTRL_SOLICITED
);
2067 priv
->num_tx_rings_p_up
= mdev
->profile
.num_tx_rings_p_up
;
2068 priv
->tx_ring_num
= prof
->tx_ring_num
;
2070 priv
->tx_ring
= kzalloc(sizeof(struct mlx4_en_tx_ring
) * MAX_TX_RINGS
,
2072 if (!priv
->tx_ring
) {
2076 priv
->tx_cq
= kzalloc(sizeof(struct mlx4_en_cq
) * MAX_TX_RINGS
,
2082 priv
->rx_ring_num
= prof
->rx_ring_num
;
2083 priv
->cqe_factor
= (mdev
->dev
->caps
.cqe_size
== 64) ? 1 : 0;
2084 priv
->mac_index
= -1;
2085 priv
->msg_enable
= MLX4_EN_MSG_LEVEL
;
2086 spin_lock_init(&priv
->stats_lock
);
2087 INIT_WORK(&priv
->rx_mode_task
, mlx4_en_do_set_rx_mode
);
2088 INIT_WORK(&priv
->watchdog_task
, mlx4_en_restart
);
2089 INIT_WORK(&priv
->linkstate_task
, mlx4_en_linkstate
);
2090 INIT_DELAYED_WORK(&priv
->stats_task
, mlx4_en_do_get_stats
);
2091 #ifdef CONFIG_MLX4_EN_DCB
2092 if (!mlx4_is_slave(priv
->mdev
->dev
))
2093 dev
->dcbnl_ops
= &mlx4_en_dcbnl_ops
;
2096 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
)
2097 INIT_HLIST_HEAD(&priv
->mac_hash
[i
]);
2099 /* Query for default mac and max mtu */
2100 priv
->max_mtu
= mdev
->dev
->caps
.eth_mtu_cap
[priv
->port
];
2102 /* Set default MAC */
2103 dev
->addr_len
= ETH_ALEN
;
2104 mlx4_en_u64_to_mac(dev
->dev_addr
, mdev
->dev
->caps
.def_mac
[priv
->port
]);
2105 if (!is_valid_ether_addr(dev
->dev_addr
)) {
2106 en_err(priv
, "Port: %d, invalid mac burned: %pM, quiting\n",
2107 priv
->port
, dev
->dev_addr
);
2112 memcpy(priv
->prev_mac
, dev
->dev_addr
, sizeof(priv
->prev_mac
));
2114 priv
->stride
= roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc
) +
2115 DS_SIZE
* MLX4_EN_MAX_RX_FRAGS
);
2116 err
= mlx4_en_alloc_resources(priv
);
2120 #ifdef CONFIG_RFS_ACCEL
2121 INIT_LIST_HEAD(&priv
->filters
);
2122 spin_lock_init(&priv
->filters_lock
);
2125 /* Allocate page for receive rings */
2126 err
= mlx4_alloc_hwq_res(mdev
->dev
, &priv
->res
,
2127 MLX4_EN_PAGE_SIZE
, MLX4_EN_PAGE_SIZE
);
2129 en_err(priv
, "Failed to allocate page for rx qps\n");
2132 priv
->allocated
= 1;
2135 * Initialize netdev entry points
2137 dev
->netdev_ops
= &mlx4_netdev_ops
;
2138 dev
->watchdog_timeo
= MLX4_EN_WATCHDOG_TIMEOUT
;
2139 netif_set_real_num_tx_queues(dev
, priv
->tx_ring_num
);
2140 netif_set_real_num_rx_queues(dev
, priv
->rx_ring_num
);
2142 SET_ETHTOOL_OPS(dev
, &mlx4_en_ethtool_ops
);
2145 * Set driver features
2147 dev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2148 if (mdev
->LSO_support
)
2149 dev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
2151 dev
->vlan_features
= dev
->hw_features
;
2153 dev
->hw_features
|= NETIF_F_RXCSUM
| NETIF_F_RXHASH
;
2154 dev
->features
= dev
->hw_features
| NETIF_F_HIGHDMA
|
2155 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
|
2156 NETIF_F_HW_VLAN_FILTER
;
2157 dev
->hw_features
|= NETIF_F_LOOPBACK
;
2159 if (mdev
->dev
->caps
.steering_mode
==
2160 MLX4_STEERING_MODE_DEVICE_MANAGED
)
2161 dev
->hw_features
|= NETIF_F_NTUPLE
;
2163 if (mdev
->dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
)
2164 dev
->priv_flags
|= IFF_UNICAST_FLT
;
2166 mdev
->pndev
[port
] = dev
;
2168 netif_carrier_off(dev
);
2169 err
= register_netdev(dev
);
2171 en_err(priv
, "Netdev registration failed for port %d\n", port
);
2174 priv
->registered
= 1;
2176 en_warn(priv
, "Using %d TX rings\n", prof
->tx_ring_num
);
2177 en_warn(priv
, "Using %d RX rings\n", prof
->rx_ring_num
);
2179 mlx4_en_update_loopback_state(priv
->dev
, priv
->dev
->features
);
2181 /* Configure port */
2182 mlx4_en_calc_rx_buf(dev
);
2183 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
2184 priv
->rx_skb_size
+ ETH_FCS_LEN
,
2185 prof
->tx_pause
, prof
->tx_ppp
,
2186 prof
->rx_pause
, prof
->rx_ppp
);
2188 en_err(priv
, "Failed setting port general configurations "
2189 "for port %d, with error %d\n", priv
->port
, err
);
2194 en_warn(priv
, "Initializing port\n");
2195 err
= mlx4_INIT_PORT(mdev
->dev
, priv
->port
);
2197 en_err(priv
, "Failed Initializing port\n");
2200 mlx4_en_set_default_moderation(priv
);
2201 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
2205 mlx4_en_destroy_netdev(dev
);