]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright(c) 2010-2014 Intel Corporation. | |
7c673cae FG |
4 | */ |
5 | ||
6 | /* | |
7 | * This code is inspired from the book "Linux Device Drivers" by | |
8 | * Alessandro Rubini and Jonathan Corbet, published by O'Reilly & Associates | |
9 | */ | |
10 | ||
11 | #include <linux/device.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/version.h> | |
14 | #include <linux/netdevice.h> | |
15 | #include <linux/etherdevice.h> /* eth_type_trans */ | |
16 | #include <linux/skbuff.h> | |
17 | #include <linux/kthread.h> | |
18 | #include <linux/delay.h> | |
19 | ||
9f95a23c | 20 | #include <rte_kni_common.h> |
7c673cae FG |
21 | #include <kni_fifo.h> |
22 | ||
23 | #include "compat.h" | |
24 | #include "kni_dev.h" | |
25 | ||
26 | #define WD_TIMEOUT 5 /*jiffies */ | |
27 | ||
28 | #define KNI_WAIT_RESPONSE_TIMEOUT 300 /* 3 seconds */ | |
29 | ||
30 | /* typedef for rx function */ | |
31 | typedef void (*kni_net_rx_t)(struct kni_dev *kni); | |
32 | ||
33 | static void kni_net_rx_normal(struct kni_dev *kni); | |
34 | ||
35 | /* kni rx function pointer, with default to normal rx */ | |
36 | static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal; | |
37 | ||
38 | /* physical address to kernel virtual address */ | |
39 | static void * | |
40 | pa2kva(void *pa) | |
41 | { | |
42 | return phys_to_virt((unsigned long)pa); | |
43 | } | |
44 | ||
45 | /* physical address to virtual address */ | |
46 | static void * | |
47 | pa2va(void *pa, struct rte_kni_mbuf *m) | |
48 | { | |
49 | void *va; | |
50 | ||
51 | va = (void *)((unsigned long)pa + | |
52 | (unsigned long)m->buf_addr - | |
53 | (unsigned long)m->buf_physaddr); | |
54 | return va; | |
55 | } | |
56 | ||
57 | /* mbuf data kernel virtual address from mbuf kernel virtual address */ | |
58 | static void * | |
59 | kva2data_kva(struct rte_kni_mbuf *m) | |
60 | { | |
61 | return phys_to_virt(m->buf_physaddr + m->data_off); | |
62 | } | |
63 | ||
64 | /* virtual address to physical address */ | |
65 | static void * | |
66 | va2pa(void *va, struct rte_kni_mbuf *m) | |
67 | { | |
68 | void *pa; | |
69 | ||
70 | pa = (void *)((unsigned long)va - | |
71 | ((unsigned long)m->buf_addr - | |
72 | (unsigned long)m->buf_physaddr)); | |
73 | return pa; | |
74 | } | |
75 | ||
76 | /* | |
77 | * It can be called to process the request. | |
78 | */ | |
79 | static int | |
80 | kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req) | |
81 | { | |
82 | int ret = -1; | |
83 | void *resp_va; | |
84 | uint32_t num; | |
85 | int ret_val; | |
86 | ||
87 | if (!kni || !req) { | |
88 | pr_err("No kni instance or request\n"); | |
89 | return -EINVAL; | |
90 | } | |
91 | ||
92 | mutex_lock(&kni->sync_lock); | |
93 | ||
94 | /* Construct data */ | |
95 | memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request)); | |
96 | num = kni_fifo_put(kni->req_q, &kni->sync_va, 1); | |
97 | if (num < 1) { | |
98 | pr_err("Cannot send to req_q\n"); | |
99 | ret = -EBUSY; | |
100 | goto fail; | |
101 | } | |
102 | ||
103 | ret_val = wait_event_interruptible_timeout(kni->wq, | |
104 | kni_fifo_count(kni->resp_q), 3 * HZ); | |
105 | if (signal_pending(current) || ret_val <= 0) { | |
106 | ret = -ETIME; | |
107 | goto fail; | |
108 | } | |
109 | num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1); | |
110 | if (num != 1 || resp_va != kni->sync_va) { | |
111 | /* This should never happen */ | |
112 | pr_err("No data in resp_q\n"); | |
113 | ret = -ENODATA; | |
114 | goto fail; | |
115 | } | |
116 | ||
117 | memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request)); | |
118 | ret = 0; | |
119 | ||
120 | fail: | |
121 | mutex_unlock(&kni->sync_lock); | |
122 | return ret; | |
123 | } | |
124 | ||
125 | /* | |
126 | * Open and close | |
127 | */ | |
128 | static int | |
129 | kni_net_open(struct net_device *dev) | |
130 | { | |
131 | int ret; | |
132 | struct rte_kni_request req; | |
133 | struct kni_dev *kni = netdev_priv(dev); | |
134 | ||
135 | netif_start_queue(dev); | |
9f95a23c TL |
136 | if (dflt_carrier == 1) |
137 | netif_carrier_on(dev); | |
138 | else | |
139 | netif_carrier_off(dev); | |
7c673cae FG |
140 | |
141 | memset(&req, 0, sizeof(req)); | |
142 | req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF; | |
143 | ||
144 | /* Setting if_up to non-zero means up */ | |
145 | req.if_up = 1; | |
146 | ret = kni_net_process_request(kni, &req); | |
147 | ||
148 | return (ret == 0) ? req.result : ret; | |
149 | } | |
150 | ||
151 | static int | |
152 | kni_net_release(struct net_device *dev) | |
153 | { | |
154 | int ret; | |
155 | struct rte_kni_request req; | |
156 | struct kni_dev *kni = netdev_priv(dev); | |
157 | ||
158 | netif_stop_queue(dev); /* can't transmit any more */ | |
9f95a23c | 159 | netif_carrier_off(dev); |
7c673cae FG |
160 | |
161 | memset(&req, 0, sizeof(req)); | |
162 | req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF; | |
163 | ||
164 | /* Setting if_up to 0 means down */ | |
165 | req.if_up = 0; | |
166 | ret = kni_net_process_request(kni, &req); | |
167 | ||
168 | return (ret == 0) ? req.result : ret; | |
169 | } | |
170 | ||
11fdf7f2 TL |
171 | static void |
172 | kni_fifo_trans_pa2va(struct kni_dev *kni, | |
173 | struct rte_kni_fifo *src_pa, struct rte_kni_fifo *dst_va) | |
174 | { | |
175 | uint32_t ret, i, num_dst, num_rx; | |
176 | void *kva; | |
177 | do { | |
178 | num_dst = kni_fifo_free_count(dst_va); | |
179 | if (num_dst == 0) | |
180 | return; | |
181 | ||
182 | num_rx = min_t(uint32_t, num_dst, MBUF_BURST_SZ); | |
183 | ||
184 | num_rx = kni_fifo_get(src_pa, kni->pa, num_rx); | |
185 | if (num_rx == 0) | |
186 | return; | |
187 | ||
188 | for (i = 0; i < num_rx; i++) { | |
189 | kva = pa2kva(kni->pa[i]); | |
190 | kni->va[i] = pa2va(kni->pa[i], kva); | |
191 | } | |
192 | ||
193 | ret = kni_fifo_put(dst_va, kni->va, num_rx); | |
194 | if (ret != num_rx) { | |
195 | /* Failing should not happen */ | |
196 | pr_err("Fail to enqueue entries into dst_va\n"); | |
197 | return; | |
198 | } | |
199 | } while (1); | |
200 | } | |
201 | ||
202 | /* Try to release mbufs when kni release */ | |
203 | void kni_net_release_fifo_phy(struct kni_dev *kni) | |
204 | { | |
205 | /* release rx_q first, because it can't release in userspace */ | |
206 | kni_fifo_trans_pa2va(kni, kni->rx_q, kni->free_q); | |
207 | /* release alloc_q for speeding up kni release in userspace */ | |
208 | kni_fifo_trans_pa2va(kni, kni->alloc_q, kni->free_q); | |
209 | } | |
210 | ||
7c673cae FG |
211 | /* |
212 | * Configuration changes (passed on by ifconfig) | |
213 | */ | |
214 | static int | |
215 | kni_net_config(struct net_device *dev, struct ifmap *map) | |
216 | { | |
217 | if (dev->flags & IFF_UP) /* can't act on a running interface */ | |
218 | return -EBUSY; | |
219 | ||
220 | /* ignore other fields */ | |
221 | return 0; | |
222 | } | |
223 | ||
224 | /* | |
225 | * Transmit a packet (called by the kernel) | |
226 | */ | |
7c673cae FG |
227 | static int |
228 | kni_net_tx(struct sk_buff *skb, struct net_device *dev) | |
229 | { | |
230 | int len = 0; | |
231 | uint32_t ret; | |
232 | struct kni_dev *kni = netdev_priv(dev); | |
233 | struct rte_kni_mbuf *pkt_kva = NULL; | |
234 | void *pkt_pa = NULL; | |
235 | void *pkt_va = NULL; | |
236 | ||
237 | /* save the timestamp */ | |
238 | #ifdef HAVE_TRANS_START_HELPER | |
239 | netif_trans_update(dev); | |
240 | #else | |
241 | dev->trans_start = jiffies; | |
242 | #endif | |
243 | ||
244 | /* Check if the length of skb is less than mbuf size */ | |
245 | if (skb->len > kni->mbuf_size) | |
246 | goto drop; | |
247 | ||
248 | /** | |
249 | * Check if it has at least one free entry in tx_q and | |
250 | * one entry in alloc_q. | |
251 | */ | |
252 | if (kni_fifo_free_count(kni->tx_q) == 0 || | |
253 | kni_fifo_count(kni->alloc_q) == 0) { | |
254 | /** | |
255 | * If no free entry in tx_q or no entry in alloc_q, | |
256 | * drops skb and goes out. | |
257 | */ | |
258 | goto drop; | |
259 | } | |
260 | ||
261 | /* dequeue a mbuf from alloc_q */ | |
262 | ret = kni_fifo_get(kni->alloc_q, &pkt_pa, 1); | |
263 | if (likely(ret == 1)) { | |
264 | void *data_kva; | |
265 | ||
266 | pkt_kva = pa2kva(pkt_pa); | |
267 | data_kva = kva2data_kva(pkt_kva); | |
268 | pkt_va = pa2va(pkt_pa, pkt_kva); | |
269 | ||
270 | len = skb->len; | |
271 | memcpy(data_kva, skb->data, len); | |
272 | if (unlikely(len < ETH_ZLEN)) { | |
273 | memset(data_kva + len, 0, ETH_ZLEN - len); | |
274 | len = ETH_ZLEN; | |
275 | } | |
276 | pkt_kva->pkt_len = len; | |
277 | pkt_kva->data_len = len; | |
278 | ||
279 | /* enqueue mbuf into tx_q */ | |
280 | ret = kni_fifo_put(kni->tx_q, &pkt_va, 1); | |
281 | if (unlikely(ret != 1)) { | |
282 | /* Failing should not happen */ | |
283 | pr_err("Fail to enqueue mbuf into tx_q\n"); | |
284 | goto drop; | |
285 | } | |
286 | } else { | |
287 | /* Failing should not happen */ | |
288 | pr_err("Fail to dequeue mbuf from alloc_q\n"); | |
289 | goto drop; | |
290 | } | |
291 | ||
292 | /* Free skb and update statistics */ | |
293 | dev_kfree_skb(skb); | |
294 | kni->stats.tx_bytes += len; | |
295 | kni->stats.tx_packets++; | |
296 | ||
297 | return NETDEV_TX_OK; | |
298 | ||
299 | drop: | |
300 | /* Free skb and update statistics */ | |
301 | dev_kfree_skb(skb); | |
302 | kni->stats.tx_dropped++; | |
303 | ||
304 | return NETDEV_TX_OK; | |
305 | } | |
7c673cae FG |
306 | |
307 | /* | |
308 | * RX: normal working mode | |
309 | */ | |
310 | static void | |
311 | kni_net_rx_normal(struct kni_dev *kni) | |
312 | { | |
313 | uint32_t ret; | |
314 | uint32_t len; | |
315 | uint32_t i, num_rx, num_fq; | |
316 | struct rte_kni_mbuf *kva; | |
317 | void *data_kva; | |
318 | struct sk_buff *skb; | |
319 | struct net_device *dev = kni->net_dev; | |
320 | ||
321 | /* Get the number of free entries in free_q */ | |
322 | num_fq = kni_fifo_free_count(kni->free_q); | |
323 | if (num_fq == 0) { | |
324 | /* No room on the free_q, bail out */ | |
325 | return; | |
326 | } | |
327 | ||
328 | /* Calculate the number of entries to dequeue from rx_q */ | |
329 | num_rx = min_t(uint32_t, num_fq, MBUF_BURST_SZ); | |
330 | ||
331 | /* Burst dequeue from rx_q */ | |
332 | num_rx = kni_fifo_get(kni->rx_q, kni->pa, num_rx); | |
333 | if (num_rx == 0) | |
334 | return; | |
335 | ||
336 | /* Transfer received packets to netif */ | |
337 | for (i = 0; i < num_rx; i++) { | |
338 | kva = pa2kva(kni->pa[i]); | |
339 | len = kva->pkt_len; | |
340 | data_kva = kva2data_kva(kva); | |
341 | kni->va[i] = pa2va(kni->pa[i], kva); | |
342 | ||
343 | skb = dev_alloc_skb(len + 2); | |
344 | if (!skb) { | |
345 | /* Update statistics */ | |
346 | kni->stats.rx_dropped++; | |
347 | continue; | |
348 | } | |
349 | ||
350 | /* Align IP on 16B boundary */ | |
351 | skb_reserve(skb, 2); | |
352 | ||
353 | if (kva->nb_segs == 1) { | |
354 | memcpy(skb_put(skb, len), data_kva, len); | |
355 | } else { | |
356 | int nb_segs; | |
357 | int kva_nb_segs = kva->nb_segs; | |
358 | ||
359 | for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) { | |
360 | memcpy(skb_put(skb, kva->data_len), | |
361 | data_kva, kva->data_len); | |
362 | ||
363 | if (!kva->next) | |
364 | break; | |
365 | ||
366 | kva = pa2kva(va2pa(kva->next, kva)); | |
367 | data_kva = kva2data_kva(kva); | |
368 | } | |
369 | } | |
370 | ||
371 | skb->dev = dev; | |
372 | skb->protocol = eth_type_trans(skb, dev); | |
373 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
374 | ||
375 | /* Call netif interface */ | |
376 | netif_rx_ni(skb); | |
377 | ||
378 | /* Update statistics */ | |
379 | kni->stats.rx_bytes += len; | |
380 | kni->stats.rx_packets++; | |
381 | } | |
382 | ||
383 | /* Burst enqueue mbufs into free_q */ | |
384 | ret = kni_fifo_put(kni->free_q, kni->va, num_rx); | |
385 | if (ret != num_rx) | |
386 | /* Failing should not happen */ | |
387 | pr_err("Fail to enqueue entries into free_q\n"); | |
388 | } | |
389 | ||
390 | /* | |
391 | * RX: loopback with enqueue/dequeue fifos. | |
392 | */ | |
393 | static void | |
394 | kni_net_rx_lo_fifo(struct kni_dev *kni) | |
395 | { | |
396 | uint32_t ret; | |
397 | uint32_t len; | |
398 | uint32_t i, num, num_rq, num_tq, num_aq, num_fq; | |
399 | struct rte_kni_mbuf *kva; | |
400 | void *data_kva; | |
401 | struct rte_kni_mbuf *alloc_kva; | |
402 | void *alloc_data_kva; | |
403 | ||
404 | /* Get the number of entries in rx_q */ | |
405 | num_rq = kni_fifo_count(kni->rx_q); | |
406 | ||
407 | /* Get the number of free entrie in tx_q */ | |
408 | num_tq = kni_fifo_free_count(kni->tx_q); | |
409 | ||
410 | /* Get the number of entries in alloc_q */ | |
411 | num_aq = kni_fifo_count(kni->alloc_q); | |
412 | ||
413 | /* Get the number of free entries in free_q */ | |
414 | num_fq = kni_fifo_free_count(kni->free_q); | |
415 | ||
416 | /* Calculate the number of entries to be dequeued from rx_q */ | |
417 | num = min(num_rq, num_tq); | |
418 | num = min(num, num_aq); | |
419 | num = min(num, num_fq); | |
420 | num = min_t(uint32_t, num, MBUF_BURST_SZ); | |
421 | ||
422 | /* Return if no entry to dequeue from rx_q */ | |
423 | if (num == 0) | |
424 | return; | |
425 | ||
426 | /* Burst dequeue from rx_q */ | |
427 | ret = kni_fifo_get(kni->rx_q, kni->pa, num); | |
428 | if (ret == 0) | |
429 | return; /* Failing should not happen */ | |
430 | ||
431 | /* Dequeue entries from alloc_q */ | |
432 | ret = kni_fifo_get(kni->alloc_q, kni->alloc_pa, num); | |
433 | if (ret) { | |
434 | num = ret; | |
435 | /* Copy mbufs */ | |
436 | for (i = 0; i < num; i++) { | |
437 | kva = pa2kva(kni->pa[i]); | |
438 | len = kva->pkt_len; | |
439 | data_kva = kva2data_kva(kva); | |
440 | kni->va[i] = pa2va(kni->pa[i], kva); | |
441 | ||
442 | alloc_kva = pa2kva(kni->alloc_pa[i]); | |
443 | alloc_data_kva = kva2data_kva(alloc_kva); | |
444 | kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva); | |
445 | ||
446 | memcpy(alloc_data_kva, data_kva, len); | |
447 | alloc_kva->pkt_len = len; | |
448 | alloc_kva->data_len = len; | |
449 | ||
450 | kni->stats.tx_bytes += len; | |
451 | kni->stats.rx_bytes += len; | |
452 | } | |
453 | ||
454 | /* Burst enqueue mbufs into tx_q */ | |
455 | ret = kni_fifo_put(kni->tx_q, kni->alloc_va, num); | |
456 | if (ret != num) | |
457 | /* Failing should not happen */ | |
458 | pr_err("Fail to enqueue mbufs into tx_q\n"); | |
459 | } | |
460 | ||
461 | /* Burst enqueue mbufs into free_q */ | |
462 | ret = kni_fifo_put(kni->free_q, kni->va, num); | |
463 | if (ret != num) | |
464 | /* Failing should not happen */ | |
465 | pr_err("Fail to enqueue mbufs into free_q\n"); | |
466 | ||
467 | /** | |
468 | * Update statistic, and enqueue/dequeue failure is impossible, | |
469 | * as all queues are checked at first. | |
470 | */ | |
471 | kni->stats.tx_packets += num; | |
472 | kni->stats.rx_packets += num; | |
473 | } | |
474 | ||
475 | /* | |
476 | * RX: loopback with enqueue/dequeue fifos and sk buffer copies. | |
477 | */ | |
478 | static void | |
479 | kni_net_rx_lo_fifo_skb(struct kni_dev *kni) | |
480 | { | |
481 | uint32_t ret; | |
482 | uint32_t len; | |
483 | uint32_t i, num_rq, num_fq, num; | |
484 | struct rte_kni_mbuf *kva; | |
485 | void *data_kva; | |
486 | struct sk_buff *skb; | |
487 | struct net_device *dev = kni->net_dev; | |
488 | ||
489 | /* Get the number of entries in rx_q */ | |
490 | num_rq = kni_fifo_count(kni->rx_q); | |
491 | ||
492 | /* Get the number of free entries in free_q */ | |
493 | num_fq = kni_fifo_free_count(kni->free_q); | |
494 | ||
495 | /* Calculate the number of entries to dequeue from rx_q */ | |
496 | num = min(num_rq, num_fq); | |
497 | num = min_t(uint32_t, num, MBUF_BURST_SZ); | |
498 | ||
499 | /* Return if no entry to dequeue from rx_q */ | |
500 | if (num == 0) | |
501 | return; | |
502 | ||
503 | /* Burst dequeue mbufs from rx_q */ | |
504 | ret = kni_fifo_get(kni->rx_q, kni->pa, num); | |
505 | if (ret == 0) | |
506 | return; | |
507 | ||
508 | /* Copy mbufs to sk buffer and then call tx interface */ | |
509 | for (i = 0; i < num; i++) { | |
510 | kva = pa2kva(kni->pa[i]); | |
511 | len = kva->pkt_len; | |
512 | data_kva = kva2data_kva(kva); | |
513 | kni->va[i] = pa2va(kni->pa[i], kva); | |
514 | ||
515 | skb = dev_alloc_skb(len + 2); | |
516 | if (skb) { | |
517 | /* Align IP on 16B boundary */ | |
518 | skb_reserve(skb, 2); | |
519 | memcpy(skb_put(skb, len), data_kva, len); | |
520 | skb->dev = dev; | |
521 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
522 | dev_kfree_skb(skb); | |
523 | } | |
524 | ||
525 | /* Simulate real usage, allocate/copy skb twice */ | |
526 | skb = dev_alloc_skb(len + 2); | |
527 | if (skb == NULL) { | |
528 | kni->stats.rx_dropped++; | |
529 | continue; | |
530 | } | |
531 | ||
532 | /* Align IP on 16B boundary */ | |
533 | skb_reserve(skb, 2); | |
534 | ||
535 | if (kva->nb_segs == 1) { | |
536 | memcpy(skb_put(skb, len), data_kva, len); | |
537 | } else { | |
538 | int nb_segs; | |
539 | int kva_nb_segs = kva->nb_segs; | |
540 | ||
541 | for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) { | |
542 | memcpy(skb_put(skb, kva->data_len), | |
543 | data_kva, kva->data_len); | |
544 | ||
545 | if (!kva->next) | |
546 | break; | |
547 | ||
548 | kva = pa2kva(va2pa(kva->next, kva)); | |
549 | data_kva = kva2data_kva(kva); | |
550 | } | |
551 | } | |
552 | ||
553 | skb->dev = dev; | |
554 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
555 | ||
556 | kni->stats.rx_bytes += len; | |
557 | kni->stats.rx_packets++; | |
558 | ||
559 | /* call tx interface */ | |
560 | kni_net_tx(skb, dev); | |
561 | } | |
562 | ||
563 | /* enqueue all the mbufs from rx_q into free_q */ | |
564 | ret = kni_fifo_put(kni->free_q, kni->va, num); | |
565 | if (ret != num) | |
566 | /* Failing should not happen */ | |
567 | pr_err("Fail to enqueue mbufs into free_q\n"); | |
568 | } | |
569 | ||
570 | /* rx interface */ | |
571 | void | |
572 | kni_net_rx(struct kni_dev *kni) | |
573 | { | |
574 | /** | |
575 | * It doesn't need to check if it is NULL pointer, | |
576 | * as it has a default value | |
577 | */ | |
578 | (*kni_net_rx_func)(kni); | |
579 | } | |
580 | ||
581 | /* | |
582 | * Deal with a transmit timeout. | |
583 | */ | |
584 | static void | |
585 | kni_net_tx_timeout(struct net_device *dev) | |
586 | { | |
587 | struct kni_dev *kni = netdev_priv(dev); | |
588 | ||
589 | pr_debug("Transmit timeout at %ld, latency %ld\n", jiffies, | |
590 | jiffies - dev_trans_start(dev)); | |
591 | ||
592 | kni->stats.tx_errors++; | |
593 | netif_wake_queue(dev); | |
594 | } | |
595 | ||
596 | /* | |
597 | * Ioctl commands | |
598 | */ | |
599 | static int | |
600 | kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
601 | { | |
602 | pr_debug("kni_net_ioctl group:%d cmd:%d\n", | |
603 | ((struct kni_dev *)netdev_priv(dev))->group_id, cmd); | |
604 | ||
9f95a23c | 605 | return -EOPNOTSUPP; |
7c673cae FG |
606 | } |
607 | ||
608 | static void | |
609 | kni_net_set_rx_mode(struct net_device *dev) | |
610 | { | |
611 | } | |
612 | ||
613 | static int | |
614 | kni_net_change_mtu(struct net_device *dev, int new_mtu) | |
615 | { | |
616 | int ret; | |
617 | struct rte_kni_request req; | |
618 | struct kni_dev *kni = netdev_priv(dev); | |
619 | ||
620 | pr_debug("kni_net_change_mtu new mtu %d to be set\n", new_mtu); | |
621 | ||
622 | memset(&req, 0, sizeof(req)); | |
623 | req.req_id = RTE_KNI_REQ_CHANGE_MTU; | |
624 | req.new_mtu = new_mtu; | |
625 | ret = kni_net_process_request(kni, &req); | |
626 | if (ret == 0 && req.result == 0) | |
627 | dev->mtu = new_mtu; | |
628 | ||
629 | return (ret == 0) ? req.result : ret; | |
630 | } | |
631 | ||
11fdf7f2 TL |
632 | static void |
633 | kni_net_set_promiscusity(struct net_device *netdev, int flags) | |
634 | { | |
635 | struct rte_kni_request req; | |
636 | struct kni_dev *kni = netdev_priv(netdev); | |
637 | ||
638 | memset(&req, 0, sizeof(req)); | |
639 | req.req_id = RTE_KNI_REQ_CHANGE_PROMISC; | |
640 | ||
641 | if (netdev->flags & IFF_PROMISC) | |
642 | req.promiscusity = 1; | |
643 | else | |
644 | req.promiscusity = 0; | |
645 | kni_net_process_request(kni, &req); | |
646 | } | |
647 | ||
7c673cae FG |
648 | /* |
649 | * Checks if the user space application provided the resp message | |
650 | */ | |
651 | void | |
652 | kni_net_poll_resp(struct kni_dev *kni) | |
653 | { | |
654 | if (kni_fifo_count(kni->resp_q)) | |
655 | wake_up_interruptible(&kni->wq); | |
656 | } | |
657 | ||
658 | /* | |
659 | * Return statistics to the caller | |
660 | */ | |
661 | static struct net_device_stats * | |
662 | kni_net_stats(struct net_device *dev) | |
663 | { | |
664 | struct kni_dev *kni = netdev_priv(dev); | |
665 | ||
666 | return &kni->stats; | |
667 | } | |
668 | ||
669 | /* | |
670 | * Fill the eth header | |
671 | */ | |
672 | static int | |
673 | kni_net_header(struct sk_buff *skb, struct net_device *dev, | |
674 | unsigned short type, const void *daddr, | |
675 | const void *saddr, uint32_t len) | |
676 | { | |
677 | struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); | |
678 | ||
679 | memcpy(eth->h_source, saddr ? saddr : dev->dev_addr, dev->addr_len); | |
680 | memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len); | |
681 | eth->h_proto = htons(type); | |
682 | ||
683 | return dev->hard_header_len; | |
684 | } | |
685 | ||
686 | /* | |
687 | * Re-fill the eth header | |
688 | */ | |
689 | #ifdef HAVE_REBUILD_HEADER | |
690 | static int | |
691 | kni_net_rebuild_header(struct sk_buff *skb) | |
692 | { | |
693 | struct net_device *dev = skb->dev; | |
694 | struct ethhdr *eth = (struct ethhdr *) skb->data; | |
695 | ||
696 | memcpy(eth->h_source, dev->dev_addr, dev->addr_len); | |
697 | memcpy(eth->h_dest, dev->dev_addr, dev->addr_len); | |
698 | ||
699 | return 0; | |
700 | } | |
701 | #endif /* < 4.1.0 */ | |
702 | ||
703 | /** | |
704 | * kni_net_set_mac - Change the Ethernet Address of the KNI NIC | |
705 | * @netdev: network interface device structure | |
706 | * @p: pointer to an address structure | |
707 | * | |
708 | * Returns 0 on success, negative on failure | |
709 | **/ | |
710 | static int | |
711 | kni_net_set_mac(struct net_device *netdev, void *p) | |
712 | { | |
11fdf7f2 TL |
713 | int ret; |
714 | struct rte_kni_request req; | |
715 | struct kni_dev *kni; | |
7c673cae FG |
716 | struct sockaddr *addr = p; |
717 | ||
11fdf7f2 TL |
718 | memset(&req, 0, sizeof(req)); |
719 | req.req_id = RTE_KNI_REQ_CHANGE_MAC_ADDR; | |
720 | ||
7c673cae FG |
721 | if (!is_valid_ether_addr((unsigned char *)(addr->sa_data))) |
722 | return -EADDRNOTAVAIL; | |
11fdf7f2 TL |
723 | |
724 | memcpy(req.mac_addr, addr->sa_data, netdev->addr_len); | |
7c673cae | 725 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
11fdf7f2 TL |
726 | |
727 | kni = netdev_priv(netdev); | |
728 | ret = kni_net_process_request(kni, &req); | |
729 | ||
730 | return (ret == 0 ? req.result : ret); | |
7c673cae FG |
731 | } |
732 | ||
733 | #ifdef HAVE_CHANGE_CARRIER_CB | |
734 | static int | |
735 | kni_net_change_carrier(struct net_device *dev, bool new_carrier) | |
736 | { | |
737 | if (new_carrier) | |
738 | netif_carrier_on(dev); | |
739 | else | |
740 | netif_carrier_off(dev); | |
741 | return 0; | |
742 | } | |
743 | #endif | |
744 | ||
745 | static const struct header_ops kni_net_header_ops = { | |
746 | .create = kni_net_header, | |
9f95a23c | 747 | .parse = eth_header_parse, |
7c673cae FG |
748 | #ifdef HAVE_REBUILD_HEADER |
749 | .rebuild = kni_net_rebuild_header, | |
750 | #endif /* < 4.1.0 */ | |
751 | .cache = NULL, /* disable caching */ | |
752 | }; | |
753 | ||
754 | static const struct net_device_ops kni_net_netdev_ops = { | |
755 | .ndo_open = kni_net_open, | |
756 | .ndo_stop = kni_net_release, | |
757 | .ndo_set_config = kni_net_config, | |
11fdf7f2 | 758 | .ndo_change_rx_flags = kni_net_set_promiscusity, |
7c673cae FG |
759 | .ndo_start_xmit = kni_net_tx, |
760 | .ndo_change_mtu = kni_net_change_mtu, | |
761 | .ndo_do_ioctl = kni_net_ioctl, | |
762 | .ndo_set_rx_mode = kni_net_set_rx_mode, | |
763 | .ndo_get_stats = kni_net_stats, | |
764 | .ndo_tx_timeout = kni_net_tx_timeout, | |
765 | .ndo_set_mac_address = kni_net_set_mac, | |
766 | #ifdef HAVE_CHANGE_CARRIER_CB | |
767 | .ndo_change_carrier = kni_net_change_carrier, | |
768 | #endif | |
769 | }; | |
770 | ||
771 | void | |
772 | kni_net_init(struct net_device *dev) | |
773 | { | |
774 | struct kni_dev *kni = netdev_priv(dev); | |
775 | ||
776 | init_waitqueue_head(&kni->wq); | |
777 | mutex_init(&kni->sync_lock); | |
778 | ||
779 | ether_setup(dev); /* assign some of the fields */ | |
780 | dev->netdev_ops = &kni_net_netdev_ops; | |
781 | dev->header_ops = &kni_net_header_ops; | |
782 | dev->watchdog_timeo = WD_TIMEOUT; | |
783 | } | |
784 | ||
785 | void | |
786 | kni_net_config_lo_mode(char *lo_str) | |
787 | { | |
788 | if (!lo_str) { | |
789 | pr_debug("loopback disabled"); | |
790 | return; | |
791 | } | |
792 | ||
793 | if (!strcmp(lo_str, "lo_mode_none")) | |
794 | pr_debug("loopback disabled"); | |
795 | else if (!strcmp(lo_str, "lo_mode_fifo")) { | |
796 | pr_debug("loopback mode=lo_mode_fifo enabled"); | |
797 | kni_net_rx_func = kni_net_rx_lo_fifo; | |
798 | } else if (!strcmp(lo_str, "lo_mode_fifo_skb")) { | |
799 | pr_debug("loopback mode=lo_mode_fifo_skb enabled"); | |
800 | kni_net_rx_func = kni_net_rx_lo_fifo_skb; | |
801 | } else | |
802 | pr_debug("Incognizant parameter, loopback disabled"); | |
803 | } |