]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
Merge tag 'mlx5-updates-2017-06-27' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / netronome / nfp / nfp_net_repr.c
1 /*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/etherdevice.h>
35 #include <linux/io-64-nonatomic-hi-lo.h>
36 #include <linux/lockdep.h>
37 #include <net/dst_metadata.h>
38
39 #include "nfpcore/nfp_cpp.h"
40 #include "nfpcore/nfp_nsp.h"
41 #include "nfp_app.h"
42 #include "nfp_main.h"
43 #include "nfp_net_ctrl.h"
44 #include "nfp_net_repr.h"
45 #include "nfp_port.h"
46
47 static void
48 nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
49 int tx_status)
50 {
51 struct nfp_repr *repr = netdev_priv(netdev);
52 struct nfp_repr_pcpu_stats *stats;
53
54 if (unlikely(tx_status != NET_XMIT_SUCCESS &&
55 tx_status != NET_XMIT_CN)) {
56 this_cpu_inc(repr->stats->tx_drops);
57 return;
58 }
59
60 stats = this_cpu_ptr(repr->stats);
61 u64_stats_update_begin(&stats->syncp);
62 stats->tx_packets++;
63 stats->tx_bytes += len;
64 u64_stats_update_end(&stats->syncp);
65 }
66
67 void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
68 {
69 struct nfp_repr *repr = netdev_priv(netdev);
70 struct nfp_repr_pcpu_stats *stats;
71
72 stats = this_cpu_ptr(repr->stats);
73 u64_stats_update_begin(&stats->syncp);
74 stats->rx_packets++;
75 stats->rx_bytes += len;
76 u64_stats_update_end(&stats->syncp);
77 }
78
79 static void
80 nfp_repr_phy_port_get_stats64(const struct nfp_app *app, u8 phy_port,
81 struct rtnl_link_stats64 *stats)
82 {
83 u8 __iomem *mem;
84
85 mem = app->pf->mac_stats_mem + phy_port * NFP_MAC_STATS_SIZE;
86
87 /* TX and RX stats are flipped as we are returning the stats as seen
88 * at the switch port corresponding to the phys port.
89 */
90 stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
91 stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
92 stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
93
94 stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
95 stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
96 stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
97 }
98
99 static void
100 nfp_repr_vf_get_stats64(const struct nfp_app *app, u8 vf,
101 struct rtnl_link_stats64 *stats)
102 {
103 u8 __iomem *mem;
104
105 mem = app->pf->vf_cfg_mem + vf * NFP_NET_CFG_BAR_SZ;
106
107 /* TX and RX stats are flipped as we are returning the stats as seen
108 * at the switch port corresponding to the VF.
109 */
110 stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES);
111 stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS);
112 stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS);
113
114 stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES);
115 stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS);
116 stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS);
117 }
118
119 static void
120 nfp_repr_pf_get_stats64(const struct nfp_app *app, u8 pf,
121 struct rtnl_link_stats64 *stats)
122 {
123 u8 __iomem *mem;
124
125 if (pf)
126 return;
127
128 mem = nfp_cpp_area_iomem(app->pf->data_vnic_bar);
129
130 stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES);
131 stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS);
132 stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS);
133
134 stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES);
135 stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS);
136 stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS);
137 }
138
139 static void
140 nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
141 {
142 struct nfp_repr *repr = netdev_priv(netdev);
143 struct nfp_eth_table_port *eth_port;
144 struct nfp_app *app = repr->app;
145
146 if (WARN_ON(!repr->port))
147 return;
148
149 switch (repr->port->type) {
150 case NFP_PORT_PHYS_PORT:
151 eth_port = __nfp_port_get_eth_port(repr->port);
152 if (!eth_port)
153 break;
154 nfp_repr_phy_port_get_stats64(app, eth_port->index, stats);
155 break;
156 case NFP_PORT_PF_PORT:
157 nfp_repr_pf_get_stats64(app, repr->port->pf_id, stats);
158 break;
159 case NFP_PORT_VF_PORT:
160 nfp_repr_vf_get_stats64(app, repr->port->vf_id, stats);
161 default:
162 break;
163 }
164 }
165
166 static bool
167 nfp_repr_has_offload_stats(const struct net_device *dev, int attr_id)
168 {
169 switch (attr_id) {
170 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
171 return true;
172 }
173
174 return false;
175 }
176
177 static int
178 nfp_repr_get_host_stats64(const struct net_device *netdev,
179 struct rtnl_link_stats64 *stats)
180 {
181 struct nfp_repr *repr = netdev_priv(netdev);
182 int i;
183
184 for_each_possible_cpu(i) {
185 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
186 struct nfp_repr_pcpu_stats *repr_stats;
187 unsigned int start;
188
189 repr_stats = per_cpu_ptr(repr->stats, i);
190 do {
191 start = u64_stats_fetch_begin_irq(&repr_stats->syncp);
192 tbytes = repr_stats->tx_bytes;
193 tpkts = repr_stats->tx_packets;
194 tdrops = repr_stats->tx_drops;
195 rbytes = repr_stats->rx_bytes;
196 rpkts = repr_stats->rx_packets;
197 } while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start));
198
199 stats->tx_bytes += tbytes;
200 stats->tx_packets += tpkts;
201 stats->tx_dropped += tdrops;
202 stats->rx_bytes += rbytes;
203 stats->rx_packets += rpkts;
204 }
205
206 return 0;
207 }
208
209 static int
210 nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
211 void *stats)
212 {
213 switch (attr_id) {
214 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
215 return nfp_repr_get_host_stats64(dev, stats);
216 }
217
218 return -EINVAL;
219 }
220
221 static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
222 {
223 struct nfp_repr *repr = netdev_priv(netdev);
224 unsigned int len = skb->len;
225 int ret;
226
227 skb_dst_drop(skb);
228 dst_hold((struct dst_entry *)repr->dst);
229 skb_dst_set(skb, (struct dst_entry *)repr->dst);
230 skb->dev = repr->dst->u.port_info.lower_dev;
231
232 ret = dev_queue_xmit(skb);
233 nfp_repr_inc_tx_stats(netdev, len, ret);
234
235 return ret;
236 }
237
238 static int nfp_repr_stop(struct net_device *netdev)
239 {
240 struct nfp_repr *repr = netdev_priv(netdev);
241
242 return nfp_app_repr_stop(repr->app, repr);
243 }
244
245 static int nfp_repr_open(struct net_device *netdev)
246 {
247 struct nfp_repr *repr = netdev_priv(netdev);
248
249 return nfp_app_repr_open(repr->app, repr);
250 }
251
252 const struct net_device_ops nfp_repr_netdev_ops = {
253 .ndo_open = nfp_repr_open,
254 .ndo_stop = nfp_repr_stop,
255 .ndo_start_xmit = nfp_repr_xmit,
256 .ndo_get_stats64 = nfp_repr_get_stats64,
257 .ndo_has_offload_stats = nfp_repr_has_offload_stats,
258 .ndo_get_offload_stats = nfp_repr_get_offload_stats,
259 .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
260 };
261
262 static void nfp_repr_clean(struct nfp_repr *repr)
263 {
264 unregister_netdev(repr->netdev);
265 dst_release((struct dst_entry *)repr->dst);
266 nfp_port_free(repr->port);
267 }
268
269 static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
270 static struct lock_class_key nfp_repr_netdev_addr_lock_key;
271
272 static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
273 struct netdev_queue *txq,
274 void *_unused)
275 {
276 lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
277 }
278
279 static void nfp_repr_set_lockdep_class(struct net_device *dev)
280 {
281 lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
282 netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
283 }
284
285 int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
286 u32 cmsg_port_id, struct nfp_port *port,
287 struct net_device *pf_netdev)
288 {
289 struct nfp_repr *repr = netdev_priv(netdev);
290 int err;
291
292 nfp_repr_set_lockdep_class(netdev);
293
294 repr->port = port;
295 repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
296 if (!repr->dst)
297 return -ENOMEM;
298 repr->dst->u.port_info.port_id = cmsg_port_id;
299 repr->dst->u.port_info.lower_dev = pf_netdev;
300
301 netdev->netdev_ops = &nfp_repr_netdev_ops;
302
303 err = register_netdev(netdev);
304 if (err)
305 goto err_clean;
306
307 return 0;
308
309 err_clean:
310 dst_release((struct dst_entry *)repr->dst);
311 return err;
312 }
313
314 static void nfp_repr_free(struct nfp_repr *repr)
315 {
316 free_percpu(repr->stats);
317 free_netdev(repr->netdev);
318 }
319
320 struct net_device *nfp_repr_alloc(struct nfp_app *app)
321 {
322 struct net_device *netdev;
323 struct nfp_repr *repr;
324
325 netdev = alloc_etherdev(sizeof(*repr));
326 if (!netdev)
327 return NULL;
328
329 repr = netdev_priv(netdev);
330 repr->netdev = netdev;
331 repr->app = app;
332
333 repr->stats = netdev_alloc_pcpu_stats(struct nfp_repr_pcpu_stats);
334 if (!repr->stats)
335 goto err_free_netdev;
336
337 return netdev;
338
339 err_free_netdev:
340 free_netdev(netdev);
341 return NULL;
342 }
343
344 static void nfp_repr_clean_and_free(struct nfp_repr *repr)
345 {
346 nfp_info(repr->app->cpp, "Destroying Representor(%s)\n",
347 repr->netdev->name);
348 nfp_repr_clean(repr);
349 nfp_repr_free(repr);
350 }
351
352 void nfp_reprs_clean_and_free(struct nfp_reprs *reprs)
353 {
354 unsigned int i;
355
356 for (i = 0; i < reprs->num_reprs; i++)
357 if (reprs->reprs[i])
358 nfp_repr_clean_and_free(netdev_priv(reprs->reprs[i]));
359
360 kfree(reprs);
361 }
362
363 void
364 nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
365 enum nfp_repr_type type)
366 {
367 struct nfp_reprs *reprs;
368
369 reprs = nfp_app_reprs_set(app, type, NULL);
370 if (!reprs)
371 return;
372
373 synchronize_rcu();
374 nfp_reprs_clean_and_free(reprs);
375 }
376
377 struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
378 {
379 struct nfp_reprs *reprs;
380
381 reprs = kzalloc(sizeof(*reprs) +
382 num_reprs * sizeof(struct net_device *), GFP_KERNEL);
383 if (!reprs)
384 return NULL;
385 reprs->num_reprs = num_reprs;
386
387 return reprs;
388 }