]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / lib / librte_eal / linuxapp / kni / ethtool / igb / igb_vmdq.c
1 /*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "LICENSE.GPL".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28
29 #include <linux/tcp.h>
30
31 #include "igb.h"
32 #include "igb_vmdq.h"
33 #include <linux/if_vlan.h>
34
35 #ifdef CONFIG_IGB_VMDQ_NETDEV
36 int igb_vmdq_open(struct net_device *dev)
37 {
38 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
39 struct igb_adapter *adapter = vadapter->real_adapter;
40 struct net_device *main_netdev = adapter->netdev;
41 int hw_queue = vadapter->rx_ring->queue_index +
42 adapter->vfs_allocated_count;
43
44 if (test_bit(__IGB_DOWN, &adapter->state)) {
45 DPRINTK(DRV, WARNING,
46 "Open %s before opening this device.\n",
47 main_netdev->name);
48 return -EAGAIN;
49 }
50 netif_carrier_off(dev);
51 vadapter->tx_ring->vmdq_netdev = dev;
52 vadapter->rx_ring->vmdq_netdev = dev;
53 if (is_valid_ether_addr(dev->dev_addr)) {
54 igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
55 igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
56 }
57 netif_carrier_on(dev);
58 return 0;
59 }
60
61 int igb_vmdq_close(struct net_device *dev)
62 {
63 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
64 struct igb_adapter *adapter = vadapter->real_adapter;
65 int hw_queue = vadapter->rx_ring->queue_index +
66 adapter->vfs_allocated_count;
67
68 netif_carrier_off(dev);
69 igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
70
71 vadapter->tx_ring->vmdq_netdev = NULL;
72 vadapter->rx_ring->vmdq_netdev = NULL;
73 return 0;
74 }
75
76 netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev)
77 {
78 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
79
80 return igb_xmit_frame_ring(skb, vadapter->tx_ring);
81 }
82
83 struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev)
84 {
85 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
86 struct igb_adapter *adapter = vadapter->real_adapter;
87 struct e1000_hw *hw = &adapter->hw;
88 int hw_queue = vadapter->rx_ring->queue_index +
89 adapter->vfs_allocated_count;
90
91 vadapter->net_stats.rx_packets +=
92 E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue));
93 E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0);
94 vadapter->net_stats.tx_packets +=
95 E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue));
96 E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0);
97 vadapter->net_stats.rx_bytes +=
98 E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue));
99 E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0);
100 vadapter->net_stats.tx_bytes +=
101 E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue));
102 E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0);
103 vadapter->net_stats.multicast +=
104 E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue));
105 E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0);
106 /* only return the current stats */
107 return &vadapter->net_stats;
108 }
109
110 /**
111 * igb_write_vm_addr_list - write unicast addresses to RAR table
112 * @netdev: network interface device structure
113 *
114 * Writes unicast address list to the RAR table.
115 * Returns: -ENOMEM on failure/insufficient address space
116 * 0 on no addresses written
117 * X on writing X addresses to the RAR table
118 **/
119 static int igb_write_vm_addr_list(struct net_device *netdev)
120 {
121 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
122 struct igb_adapter *adapter = vadapter->real_adapter;
123 int count = 0;
124 int hw_queue = vadapter->rx_ring->queue_index +
125 adapter->vfs_allocated_count;
126
127 /* return ENOMEM indicating insufficient memory for addresses */
128 if (netdev_uc_count(netdev) > igb_available_rars(adapter))
129 return -ENOMEM;
130
131 if (!netdev_uc_empty(netdev)) {
132 #ifdef NETDEV_HW_ADDR_T_UNICAST
133 struct netdev_hw_addr *ha;
134 #else
135 struct dev_mc_list *ha;
136 #endif
137 netdev_for_each_uc_addr(ha, netdev) {
138 #ifdef NETDEV_HW_ADDR_T_UNICAST
139 igb_del_mac_filter(adapter, ha->addr, hw_queue);
140 igb_add_mac_filter(adapter, ha->addr, hw_queue);
141 #else
142 igb_del_mac_filter(adapter, ha->da_addr, hw_queue);
143 igb_add_mac_filter(adapter, ha->da_addr, hw_queue);
144 #endif
145 count++;
146 }
147 }
148 return count;
149 }
150
151
152 #define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */
153 void igb_vmdq_set_rx_mode(struct net_device *dev)
154 {
155 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
156 struct igb_adapter *adapter = vadapter->real_adapter;
157 struct e1000_hw *hw = &adapter->hw;
158 u32 vmolr, rctl;
159 int hw_queue = vadapter->rx_ring->queue_index +
160 adapter->vfs_allocated_count;
161
162 /* Check for Promiscuous and All Multicast modes */
163 vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue));
164
165 /* clear the affected bits */
166 vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME |
167 E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE);
168
169 if (dev->flags & IFF_PROMISC) {
170 vmolr |= E1000_VMOLR_UPE;
171 rctl = E1000_READ_REG(hw, E1000_RCTL);
172 rctl |= E1000_RCTL_UPE;
173 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
174 } else {
175 rctl = E1000_READ_REG(hw, E1000_RCTL);
176 rctl &= ~E1000_RCTL_UPE;
177 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
178 if (dev->flags & IFF_ALLMULTI) {
179 vmolr |= E1000_VMOLR_MPME;
180 } else {
181 /*
182 * Write addresses to the MTA, if the attempt fails
183 * then we should just turn on promiscuous mode so
184 * that we can at least receive multicast traffic
185 */
186 if (igb_write_mc_addr_list(adapter->netdev) != 0)
187 vmolr |= E1000_VMOLR_ROMPE;
188 }
189 #ifdef HAVE_SET_RX_MODE
190 /*
191 * Write addresses to available RAR registers, if there is not
192 * sufficient space to store all the addresses then enable
193 * unicast promiscuous mode
194 */
195 if (igb_write_vm_addr_list(dev) < 0)
196 vmolr |= E1000_VMOLR_UPE;
197 #endif
198 }
199 E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr);
200
201 return;
202 }
203
204 int igb_vmdq_set_mac(struct net_device *dev, void *p)
205 {
206 struct sockaddr *addr = p;
207 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
208 struct igb_adapter *adapter = vadapter->real_adapter;
209 int hw_queue = vadapter->rx_ring->queue_index +
210 adapter->vfs_allocated_count;
211
212 igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
213 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
214 return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
215 }
216
217 int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu)
218 {
219 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
220 struct igb_adapter *adapter = vadapter->real_adapter;
221
222 if (adapter->netdev->mtu < new_mtu) {
223 DPRINTK(PROBE, INFO,
224 "Set MTU on %s to >= %d "
225 "before changing MTU on %s\n",
226 adapter->netdev->name, new_mtu, dev->name);
227 return -EINVAL;
228 }
229 dev->mtu = new_mtu;
230 return 0;
231 }
232
233 void igb_vmdq_tx_timeout(struct net_device *dev)
234 {
235 return;
236 }
237
238 void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
239 {
240 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
241 struct igb_adapter *adapter = vadapter->real_adapter;
242 struct e1000_hw *hw = &adapter->hw;
243 int hw_queue = vadapter->rx_ring->queue_index +
244 adapter->vfs_allocated_count;
245
246 vadapter->vlgrp = grp;
247
248 igb_enable_vlan_tags(adapter);
249 E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0);
250
251 return;
252 }
253 void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
254 {
255 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
256 struct igb_adapter *adapter = vadapter->real_adapter;
257 #ifndef HAVE_NETDEV_VLAN_FEATURES
258 struct net_device *v_netdev;
259 #endif
260 int hw_queue = vadapter->rx_ring->queue_index +
261 adapter->vfs_allocated_count;
262
263 /* attempt to add filter to vlvf array */
264 igb_vlvf_set(adapter, vid, TRUE, hw_queue);
265
266 #ifndef HAVE_NETDEV_VLAN_FEATURES
267
268 /* Copy feature flags from netdev to the vlan netdev for this vid.
269 * This allows things like TSO to bubble down to our vlan device.
270 */
271 v_netdev = vlan_group_get_device(vadapter->vlgrp, vid);
272 v_netdev->features |= adapter->netdev->features;
273 vlan_group_set_device(vadapter->vlgrp, vid, v_netdev);
274 #endif
275
276 return;
277 }
278 void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
279 {
280 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
281 struct igb_adapter *adapter = vadapter->real_adapter;
282 int hw_queue = vadapter->rx_ring->queue_index +
283 adapter->vfs_allocated_count;
284
285 vlan_group_set_device(vadapter->vlgrp, vid, NULL);
286 /* remove vlan from VLVF table array */
287 igb_vlvf_set(adapter, vid, FALSE, hw_queue);
288
289
290 return;
291 }
292
293 static int igb_vmdq_get_settings(struct net_device *netdev,
294 struct ethtool_cmd *ecmd)
295 {
296 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
297 struct igb_adapter *adapter = vadapter->real_adapter;
298 struct e1000_hw *hw = &adapter->hw;
299 u32 status;
300
301 if (hw->phy.media_type == e1000_media_type_copper) {
302
303 ecmd->supported = (SUPPORTED_10baseT_Half |
304 SUPPORTED_10baseT_Full |
305 SUPPORTED_100baseT_Half |
306 SUPPORTED_100baseT_Full |
307 SUPPORTED_1000baseT_Full|
308 SUPPORTED_Autoneg |
309 SUPPORTED_TP);
310 ecmd->advertising = ADVERTISED_TP;
311
312 if (hw->mac.autoneg == 1) {
313 ecmd->advertising |= ADVERTISED_Autoneg;
314 /* the e1000 autoneg seems to match ethtool nicely */
315 ecmd->advertising |= hw->phy.autoneg_advertised;
316 }
317
318 ecmd->port = PORT_TP;
319 ecmd->phy_address = hw->phy.addr;
320 } else {
321 ecmd->supported = (SUPPORTED_1000baseT_Full |
322 SUPPORTED_FIBRE |
323 SUPPORTED_Autoneg);
324
325 ecmd->advertising = (ADVERTISED_1000baseT_Full |
326 ADVERTISED_FIBRE |
327 ADVERTISED_Autoneg);
328
329 ecmd->port = PORT_FIBRE;
330 }
331
332 ecmd->transceiver = XCVR_INTERNAL;
333
334 status = E1000_READ_REG(hw, E1000_STATUS);
335
336 if (status & E1000_STATUS_LU) {
337
338 if ((status & E1000_STATUS_SPEED_1000) ||
339 hw->phy.media_type != e1000_media_type_copper)
340 ecmd->speed = SPEED_1000;
341 else if (status & E1000_STATUS_SPEED_100)
342 ecmd->speed = SPEED_100;
343 else
344 ecmd->speed = SPEED_10;
345
346 if ((status & E1000_STATUS_FD) ||
347 hw->phy.media_type != e1000_media_type_copper)
348 ecmd->duplex = DUPLEX_FULL;
349 else
350 ecmd->duplex = DUPLEX_HALF;
351 } else {
352 ecmd->speed = -1;
353 ecmd->duplex = -1;
354 }
355
356 ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
357 return 0;
358 }
359
360
361 static u32 igb_vmdq_get_msglevel(struct net_device *netdev)
362 {
363 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
364 struct igb_adapter *adapter = vadapter->real_adapter;
365 return adapter->msg_enable;
366 }
367
368 static void igb_vmdq_get_drvinfo(struct net_device *netdev,
369 struct ethtool_drvinfo *drvinfo)
370 {
371 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
372 struct igb_adapter *adapter = vadapter->real_adapter;
373 struct net_device *main_netdev = adapter->netdev;
374
375 strncpy(drvinfo->driver, igb_driver_name, 32);
376 strncpy(drvinfo->version, igb_driver_version, 32);
377
378 strncpy(drvinfo->fw_version, "N/A", 4);
379 snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name,
380 vadapter->rx_ring->queue_index);
381 drvinfo->n_stats = 0;
382 drvinfo->testinfo_len = 0;
383 drvinfo->regdump_len = 0;
384 }
385
386 static void igb_vmdq_get_ringparam(struct net_device *netdev,
387 struct ethtool_ringparam *ring)
388 {
389 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
390
391 struct igb_ring *tx_ring = vadapter->tx_ring;
392 struct igb_ring *rx_ring = vadapter->rx_ring;
393
394 ring->rx_max_pending = IGB_MAX_RXD;
395 ring->tx_max_pending = IGB_MAX_TXD;
396 ring->rx_mini_max_pending = 0;
397 ring->rx_jumbo_max_pending = 0;
398 ring->rx_pending = rx_ring->count;
399 ring->tx_pending = tx_ring->count;
400 ring->rx_mini_pending = 0;
401 ring->rx_jumbo_pending = 0;
402 }
403 static u32 igb_vmdq_get_rx_csum(struct net_device *netdev)
404 {
405 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
406 struct igb_adapter *adapter = vadapter->real_adapter;
407
408 return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags);
409 }
410
411
412 static struct ethtool_ops igb_vmdq_ethtool_ops = {
413 .get_settings = igb_vmdq_get_settings,
414 .get_drvinfo = igb_vmdq_get_drvinfo,
415 .get_link = ethtool_op_get_link,
416 .get_ringparam = igb_vmdq_get_ringparam,
417 .get_rx_csum = igb_vmdq_get_rx_csum,
418 .get_tx_csum = ethtool_op_get_tx_csum,
419 .get_sg = ethtool_op_get_sg,
420 .set_sg = ethtool_op_set_sg,
421 .get_msglevel = igb_vmdq_get_msglevel,
422 #ifdef NETIF_F_TSO
423 .get_tso = ethtool_op_get_tso,
424 #endif
425 #ifdef HAVE_ETHTOOL_GET_PERM_ADDR
426 .get_perm_addr = ethtool_op_get_perm_addr,
427 #endif
428 };
429
430 void igb_vmdq_set_ethtool_ops(struct net_device *netdev)
431 {
432 SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops);
433 }
434
435
436 #endif /* CONFIG_IGB_VMDQ_NETDEV */