]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /*- |
2 | * GPL LICENSE SUMMARY | |
3 | * | |
4 | * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of version 2 of the GNU General Public License as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | * The full GNU General Public License is included in this distribution | |
19 | * in the file called LICENSE.GPL. | |
20 | * | |
21 | * Contact Information: | |
22 | * Intel Corporation | |
23 | */ | |
24 | ||
25 | /* | |
26 | * This code is inspired from the book "Linux Device Drivers" by | |
27 | * Alessandro Rubini and Jonathan Corbet, published by O'Reilly & Associates | |
28 | */ | |
29 | ||
30 | #include <linux/device.h> | |
31 | #include <linux/module.h> | |
32 | #include <linux/version.h> | |
33 | #include <linux/netdevice.h> | |
34 | #include <linux/etherdevice.h> /* eth_type_trans */ | |
35 | #include <linux/skbuff.h> | |
36 | #include <linux/kthread.h> | |
37 | #include <linux/delay.h> | |
38 | ||
39 | #include <exec-env/rte_kni_common.h> | |
40 | #include <kni_fifo.h> | |
41 | ||
42 | #include "compat.h" | |
43 | #include "kni_dev.h" | |
44 | ||
45 | #define WD_TIMEOUT 5 /*jiffies */ | |
46 | ||
47 | #define KNI_WAIT_RESPONSE_TIMEOUT 300 /* 3 seconds */ | |
48 | ||
49 | /* typedef for rx function */ | |
50 | typedef void (*kni_net_rx_t)(struct kni_dev *kni); | |
51 | ||
52 | static void kni_net_rx_normal(struct kni_dev *kni); | |
53 | ||
54 | /* kni rx function pointer, with default to normal rx */ | |
55 | static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal; | |
56 | ||
57 | /* physical address to kernel virtual address */ | |
58 | static void * | |
59 | pa2kva(void *pa) | |
60 | { | |
61 | return phys_to_virt((unsigned long)pa); | |
62 | } | |
63 | ||
64 | /* physical address to virtual address */ | |
65 | static void * | |
66 | pa2va(void *pa, struct rte_kni_mbuf *m) | |
67 | { | |
68 | void *va; | |
69 | ||
70 | va = (void *)((unsigned long)pa + | |
71 | (unsigned long)m->buf_addr - | |
72 | (unsigned long)m->buf_physaddr); | |
73 | return va; | |
74 | } | |
75 | ||
76 | /* mbuf data kernel virtual address from mbuf kernel virtual address */ | |
77 | static void * | |
78 | kva2data_kva(struct rte_kni_mbuf *m) | |
79 | { | |
80 | return phys_to_virt(m->buf_physaddr + m->data_off); | |
81 | } | |
82 | ||
83 | /* virtual address to physical address */ | |
84 | static void * | |
85 | va2pa(void *va, struct rte_kni_mbuf *m) | |
86 | { | |
87 | void *pa; | |
88 | ||
89 | pa = (void *)((unsigned long)va - | |
90 | ((unsigned long)m->buf_addr - | |
91 | (unsigned long)m->buf_physaddr)); | |
92 | return pa; | |
93 | } | |
94 | ||
95 | /* | |
96 | * It can be called to process the request. | |
97 | */ | |
98 | static int | |
99 | kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req) | |
100 | { | |
101 | int ret = -1; | |
102 | void *resp_va; | |
103 | uint32_t num; | |
104 | int ret_val; | |
105 | ||
106 | if (!kni || !req) { | |
107 | pr_err("No kni instance or request\n"); | |
108 | return -EINVAL; | |
109 | } | |
110 | ||
111 | mutex_lock(&kni->sync_lock); | |
112 | ||
113 | /* Construct data */ | |
114 | memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request)); | |
115 | num = kni_fifo_put(kni->req_q, &kni->sync_va, 1); | |
116 | if (num < 1) { | |
117 | pr_err("Cannot send to req_q\n"); | |
118 | ret = -EBUSY; | |
119 | goto fail; | |
120 | } | |
121 | ||
122 | ret_val = wait_event_interruptible_timeout(kni->wq, | |
123 | kni_fifo_count(kni->resp_q), 3 * HZ); | |
124 | if (signal_pending(current) || ret_val <= 0) { | |
125 | ret = -ETIME; | |
126 | goto fail; | |
127 | } | |
128 | num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1); | |
129 | if (num != 1 || resp_va != kni->sync_va) { | |
130 | /* This should never happen */ | |
131 | pr_err("No data in resp_q\n"); | |
132 | ret = -ENODATA; | |
133 | goto fail; | |
134 | } | |
135 | ||
136 | memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request)); | |
137 | ret = 0; | |
138 | ||
139 | fail: | |
140 | mutex_unlock(&kni->sync_lock); | |
141 | return ret; | |
142 | } | |
143 | ||
144 | /* | |
145 | * Open and close | |
146 | */ | |
147 | static int | |
148 | kni_net_open(struct net_device *dev) | |
149 | { | |
150 | int ret; | |
151 | struct rte_kni_request req; | |
152 | struct kni_dev *kni = netdev_priv(dev); | |
153 | ||
154 | netif_start_queue(dev); | |
155 | ||
156 | memset(&req, 0, sizeof(req)); | |
157 | req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF; | |
158 | ||
159 | /* Setting if_up to non-zero means up */ | |
160 | req.if_up = 1; | |
161 | ret = kni_net_process_request(kni, &req); | |
162 | ||
163 | return (ret == 0) ? req.result : ret; | |
164 | } | |
165 | ||
166 | static int | |
167 | kni_net_release(struct net_device *dev) | |
168 | { | |
169 | int ret; | |
170 | struct rte_kni_request req; | |
171 | struct kni_dev *kni = netdev_priv(dev); | |
172 | ||
173 | netif_stop_queue(dev); /* can't transmit any more */ | |
174 | ||
175 | memset(&req, 0, sizeof(req)); | |
176 | req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF; | |
177 | ||
178 | /* Setting if_up to 0 means down */ | |
179 | req.if_up = 0; | |
180 | ret = kni_net_process_request(kni, &req); | |
181 | ||
182 | return (ret == 0) ? req.result : ret; | |
183 | } | |
184 | ||
185 | /* | |
186 | * Configuration changes (passed on by ifconfig) | |
187 | */ | |
188 | static int | |
189 | kni_net_config(struct net_device *dev, struct ifmap *map) | |
190 | { | |
191 | if (dev->flags & IFF_UP) /* can't act on a running interface */ | |
192 | return -EBUSY; | |
193 | ||
194 | /* ignore other fields */ | |
195 | return 0; | |
196 | } | |
197 | ||
198 | /* | |
199 | * Transmit a packet (called by the kernel) | |
200 | */ | |
201 | #ifdef RTE_KNI_VHOST | |
202 | static int | |
203 | kni_net_tx(struct sk_buff *skb, struct net_device *dev) | |
204 | { | |
205 | struct kni_dev *kni = netdev_priv(dev); | |
206 | ||
207 | dev_kfree_skb(skb); | |
208 | kni->stats.tx_dropped++; | |
209 | ||
210 | return NETDEV_TX_OK; | |
211 | } | |
212 | #else | |
213 | static int | |
214 | kni_net_tx(struct sk_buff *skb, struct net_device *dev) | |
215 | { | |
216 | int len = 0; | |
217 | uint32_t ret; | |
218 | struct kni_dev *kni = netdev_priv(dev); | |
219 | struct rte_kni_mbuf *pkt_kva = NULL; | |
220 | void *pkt_pa = NULL; | |
221 | void *pkt_va = NULL; | |
222 | ||
223 | /* save the timestamp */ | |
224 | #ifdef HAVE_TRANS_START_HELPER | |
225 | netif_trans_update(dev); | |
226 | #else | |
227 | dev->trans_start = jiffies; | |
228 | #endif | |
229 | ||
230 | /* Check if the length of skb is less than mbuf size */ | |
231 | if (skb->len > kni->mbuf_size) | |
232 | goto drop; | |
233 | ||
234 | /** | |
235 | * Check if it has at least one free entry in tx_q and | |
236 | * one entry in alloc_q. | |
237 | */ | |
238 | if (kni_fifo_free_count(kni->tx_q) == 0 || | |
239 | kni_fifo_count(kni->alloc_q) == 0) { | |
240 | /** | |
241 | * If no free entry in tx_q or no entry in alloc_q, | |
242 | * drops skb and goes out. | |
243 | */ | |
244 | goto drop; | |
245 | } | |
246 | ||
247 | /* dequeue a mbuf from alloc_q */ | |
248 | ret = kni_fifo_get(kni->alloc_q, &pkt_pa, 1); | |
249 | if (likely(ret == 1)) { | |
250 | void *data_kva; | |
251 | ||
252 | pkt_kva = pa2kva(pkt_pa); | |
253 | data_kva = kva2data_kva(pkt_kva); | |
254 | pkt_va = pa2va(pkt_pa, pkt_kva); | |
255 | ||
256 | len = skb->len; | |
257 | memcpy(data_kva, skb->data, len); | |
258 | if (unlikely(len < ETH_ZLEN)) { | |
259 | memset(data_kva + len, 0, ETH_ZLEN - len); | |
260 | len = ETH_ZLEN; | |
261 | } | |
262 | pkt_kva->pkt_len = len; | |
263 | pkt_kva->data_len = len; | |
264 | ||
265 | /* enqueue mbuf into tx_q */ | |
266 | ret = kni_fifo_put(kni->tx_q, &pkt_va, 1); | |
267 | if (unlikely(ret != 1)) { | |
268 | /* Failing should not happen */ | |
269 | pr_err("Fail to enqueue mbuf into tx_q\n"); | |
270 | goto drop; | |
271 | } | |
272 | } else { | |
273 | /* Failing should not happen */ | |
274 | pr_err("Fail to dequeue mbuf from alloc_q\n"); | |
275 | goto drop; | |
276 | } | |
277 | ||
278 | /* Free skb and update statistics */ | |
279 | dev_kfree_skb(skb); | |
280 | kni->stats.tx_bytes += len; | |
281 | kni->stats.tx_packets++; | |
282 | ||
283 | return NETDEV_TX_OK; | |
284 | ||
285 | drop: | |
286 | /* Free skb and update statistics */ | |
287 | dev_kfree_skb(skb); | |
288 | kni->stats.tx_dropped++; | |
289 | ||
290 | return NETDEV_TX_OK; | |
291 | } | |
292 | #endif | |
293 | ||
294 | /* | |
295 | * RX: normal working mode | |
296 | */ | |
297 | static void | |
298 | kni_net_rx_normal(struct kni_dev *kni) | |
299 | { | |
300 | uint32_t ret; | |
301 | uint32_t len; | |
302 | uint32_t i, num_rx, num_fq; | |
303 | struct rte_kni_mbuf *kva; | |
304 | void *data_kva; | |
305 | struct sk_buff *skb; | |
306 | struct net_device *dev = kni->net_dev; | |
307 | ||
308 | /* Get the number of free entries in free_q */ | |
309 | num_fq = kni_fifo_free_count(kni->free_q); | |
310 | if (num_fq == 0) { | |
311 | /* No room on the free_q, bail out */ | |
312 | return; | |
313 | } | |
314 | ||
315 | /* Calculate the number of entries to dequeue from rx_q */ | |
316 | num_rx = min_t(uint32_t, num_fq, MBUF_BURST_SZ); | |
317 | ||
318 | /* Burst dequeue from rx_q */ | |
319 | num_rx = kni_fifo_get(kni->rx_q, kni->pa, num_rx); | |
320 | if (num_rx == 0) | |
321 | return; | |
322 | ||
323 | /* Transfer received packets to netif */ | |
324 | for (i = 0; i < num_rx; i++) { | |
325 | kva = pa2kva(kni->pa[i]); | |
326 | len = kva->pkt_len; | |
327 | data_kva = kva2data_kva(kva); | |
328 | kni->va[i] = pa2va(kni->pa[i], kva); | |
329 | ||
330 | skb = dev_alloc_skb(len + 2); | |
331 | if (!skb) { | |
332 | /* Update statistics */ | |
333 | kni->stats.rx_dropped++; | |
334 | continue; | |
335 | } | |
336 | ||
337 | /* Align IP on 16B boundary */ | |
338 | skb_reserve(skb, 2); | |
339 | ||
340 | if (kva->nb_segs == 1) { | |
341 | memcpy(skb_put(skb, len), data_kva, len); | |
342 | } else { | |
343 | int nb_segs; | |
344 | int kva_nb_segs = kva->nb_segs; | |
345 | ||
346 | for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) { | |
347 | memcpy(skb_put(skb, kva->data_len), | |
348 | data_kva, kva->data_len); | |
349 | ||
350 | if (!kva->next) | |
351 | break; | |
352 | ||
353 | kva = pa2kva(va2pa(kva->next, kva)); | |
354 | data_kva = kva2data_kva(kva); | |
355 | } | |
356 | } | |
357 | ||
358 | skb->dev = dev; | |
359 | skb->protocol = eth_type_trans(skb, dev); | |
360 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
361 | ||
362 | /* Call netif interface */ | |
363 | netif_rx_ni(skb); | |
364 | ||
365 | /* Update statistics */ | |
366 | kni->stats.rx_bytes += len; | |
367 | kni->stats.rx_packets++; | |
368 | } | |
369 | ||
370 | /* Burst enqueue mbufs into free_q */ | |
371 | ret = kni_fifo_put(kni->free_q, kni->va, num_rx); | |
372 | if (ret != num_rx) | |
373 | /* Failing should not happen */ | |
374 | pr_err("Fail to enqueue entries into free_q\n"); | |
375 | } | |
376 | ||
377 | /* | |
378 | * RX: loopback with enqueue/dequeue fifos. | |
379 | */ | |
380 | static void | |
381 | kni_net_rx_lo_fifo(struct kni_dev *kni) | |
382 | { | |
383 | uint32_t ret; | |
384 | uint32_t len; | |
385 | uint32_t i, num, num_rq, num_tq, num_aq, num_fq; | |
386 | struct rte_kni_mbuf *kva; | |
387 | void *data_kva; | |
388 | struct rte_kni_mbuf *alloc_kva; | |
389 | void *alloc_data_kva; | |
390 | ||
391 | /* Get the number of entries in rx_q */ | |
392 | num_rq = kni_fifo_count(kni->rx_q); | |
393 | ||
394 | /* Get the number of free entrie in tx_q */ | |
395 | num_tq = kni_fifo_free_count(kni->tx_q); | |
396 | ||
397 | /* Get the number of entries in alloc_q */ | |
398 | num_aq = kni_fifo_count(kni->alloc_q); | |
399 | ||
400 | /* Get the number of free entries in free_q */ | |
401 | num_fq = kni_fifo_free_count(kni->free_q); | |
402 | ||
403 | /* Calculate the number of entries to be dequeued from rx_q */ | |
404 | num = min(num_rq, num_tq); | |
405 | num = min(num, num_aq); | |
406 | num = min(num, num_fq); | |
407 | num = min_t(uint32_t, num, MBUF_BURST_SZ); | |
408 | ||
409 | /* Return if no entry to dequeue from rx_q */ | |
410 | if (num == 0) | |
411 | return; | |
412 | ||
413 | /* Burst dequeue from rx_q */ | |
414 | ret = kni_fifo_get(kni->rx_q, kni->pa, num); | |
415 | if (ret == 0) | |
416 | return; /* Failing should not happen */ | |
417 | ||
418 | /* Dequeue entries from alloc_q */ | |
419 | ret = kni_fifo_get(kni->alloc_q, kni->alloc_pa, num); | |
420 | if (ret) { | |
421 | num = ret; | |
422 | /* Copy mbufs */ | |
423 | for (i = 0; i < num; i++) { | |
424 | kva = pa2kva(kni->pa[i]); | |
425 | len = kva->pkt_len; | |
426 | data_kva = kva2data_kva(kva); | |
427 | kni->va[i] = pa2va(kni->pa[i], kva); | |
428 | ||
429 | alloc_kva = pa2kva(kni->alloc_pa[i]); | |
430 | alloc_data_kva = kva2data_kva(alloc_kva); | |
431 | kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva); | |
432 | ||
433 | memcpy(alloc_data_kva, data_kva, len); | |
434 | alloc_kva->pkt_len = len; | |
435 | alloc_kva->data_len = len; | |
436 | ||
437 | kni->stats.tx_bytes += len; | |
438 | kni->stats.rx_bytes += len; | |
439 | } | |
440 | ||
441 | /* Burst enqueue mbufs into tx_q */ | |
442 | ret = kni_fifo_put(kni->tx_q, kni->alloc_va, num); | |
443 | if (ret != num) | |
444 | /* Failing should not happen */ | |
445 | pr_err("Fail to enqueue mbufs into tx_q\n"); | |
446 | } | |
447 | ||
448 | /* Burst enqueue mbufs into free_q */ | |
449 | ret = kni_fifo_put(kni->free_q, kni->va, num); | |
450 | if (ret != num) | |
451 | /* Failing should not happen */ | |
452 | pr_err("Fail to enqueue mbufs into free_q\n"); | |
453 | ||
454 | /** | |
455 | * Update statistic, and enqueue/dequeue failure is impossible, | |
456 | * as all queues are checked at first. | |
457 | */ | |
458 | kni->stats.tx_packets += num; | |
459 | kni->stats.rx_packets += num; | |
460 | } | |
461 | ||
462 | /* | |
463 | * RX: loopback with enqueue/dequeue fifos and sk buffer copies. | |
464 | */ | |
465 | static void | |
466 | kni_net_rx_lo_fifo_skb(struct kni_dev *kni) | |
467 | { | |
468 | uint32_t ret; | |
469 | uint32_t len; | |
470 | uint32_t i, num_rq, num_fq, num; | |
471 | struct rte_kni_mbuf *kva; | |
472 | void *data_kva; | |
473 | struct sk_buff *skb; | |
474 | struct net_device *dev = kni->net_dev; | |
475 | ||
476 | /* Get the number of entries in rx_q */ | |
477 | num_rq = kni_fifo_count(kni->rx_q); | |
478 | ||
479 | /* Get the number of free entries in free_q */ | |
480 | num_fq = kni_fifo_free_count(kni->free_q); | |
481 | ||
482 | /* Calculate the number of entries to dequeue from rx_q */ | |
483 | num = min(num_rq, num_fq); | |
484 | num = min_t(uint32_t, num, MBUF_BURST_SZ); | |
485 | ||
486 | /* Return if no entry to dequeue from rx_q */ | |
487 | if (num == 0) | |
488 | return; | |
489 | ||
490 | /* Burst dequeue mbufs from rx_q */ | |
491 | ret = kni_fifo_get(kni->rx_q, kni->pa, num); | |
492 | if (ret == 0) | |
493 | return; | |
494 | ||
495 | /* Copy mbufs to sk buffer and then call tx interface */ | |
496 | for (i = 0; i < num; i++) { | |
497 | kva = pa2kva(kni->pa[i]); | |
498 | len = kva->pkt_len; | |
499 | data_kva = kva2data_kva(kva); | |
500 | kni->va[i] = pa2va(kni->pa[i], kva); | |
501 | ||
502 | skb = dev_alloc_skb(len + 2); | |
503 | if (skb) { | |
504 | /* Align IP on 16B boundary */ | |
505 | skb_reserve(skb, 2); | |
506 | memcpy(skb_put(skb, len), data_kva, len); | |
507 | skb->dev = dev; | |
508 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
509 | dev_kfree_skb(skb); | |
510 | } | |
511 | ||
512 | /* Simulate real usage, allocate/copy skb twice */ | |
513 | skb = dev_alloc_skb(len + 2); | |
514 | if (skb == NULL) { | |
515 | kni->stats.rx_dropped++; | |
516 | continue; | |
517 | } | |
518 | ||
519 | /* Align IP on 16B boundary */ | |
520 | skb_reserve(skb, 2); | |
521 | ||
522 | if (kva->nb_segs == 1) { | |
523 | memcpy(skb_put(skb, len), data_kva, len); | |
524 | } else { | |
525 | int nb_segs; | |
526 | int kva_nb_segs = kva->nb_segs; | |
527 | ||
528 | for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) { | |
529 | memcpy(skb_put(skb, kva->data_len), | |
530 | data_kva, kva->data_len); | |
531 | ||
532 | if (!kva->next) | |
533 | break; | |
534 | ||
535 | kva = pa2kva(va2pa(kva->next, kva)); | |
536 | data_kva = kva2data_kva(kva); | |
537 | } | |
538 | } | |
539 | ||
540 | skb->dev = dev; | |
541 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
542 | ||
543 | kni->stats.rx_bytes += len; | |
544 | kni->stats.rx_packets++; | |
545 | ||
546 | /* call tx interface */ | |
547 | kni_net_tx(skb, dev); | |
548 | } | |
549 | ||
550 | /* enqueue all the mbufs from rx_q into free_q */ | |
551 | ret = kni_fifo_put(kni->free_q, kni->va, num); | |
552 | if (ret != num) | |
553 | /* Failing should not happen */ | |
554 | pr_err("Fail to enqueue mbufs into free_q\n"); | |
555 | } | |
556 | ||
557 | /* rx interface */ | |
558 | void | |
559 | kni_net_rx(struct kni_dev *kni) | |
560 | { | |
561 | /** | |
562 | * It doesn't need to check if it is NULL pointer, | |
563 | * as it has a default value | |
564 | */ | |
565 | (*kni_net_rx_func)(kni); | |
566 | } | |
567 | ||
568 | /* | |
569 | * Deal with a transmit timeout. | |
570 | */ | |
571 | static void | |
572 | kni_net_tx_timeout(struct net_device *dev) | |
573 | { | |
574 | struct kni_dev *kni = netdev_priv(dev); | |
575 | ||
576 | pr_debug("Transmit timeout at %ld, latency %ld\n", jiffies, | |
577 | jiffies - dev_trans_start(dev)); | |
578 | ||
579 | kni->stats.tx_errors++; | |
580 | netif_wake_queue(dev); | |
581 | } | |
582 | ||
583 | /* | |
584 | * Ioctl commands | |
585 | */ | |
586 | static int | |
587 | kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
588 | { | |
589 | pr_debug("kni_net_ioctl group:%d cmd:%d\n", | |
590 | ((struct kni_dev *)netdev_priv(dev))->group_id, cmd); | |
591 | ||
592 | return 0; | |
593 | } | |
594 | ||
595 | static void | |
596 | kni_net_set_rx_mode(struct net_device *dev) | |
597 | { | |
598 | } | |
599 | ||
600 | static int | |
601 | kni_net_change_mtu(struct net_device *dev, int new_mtu) | |
602 | { | |
603 | int ret; | |
604 | struct rte_kni_request req; | |
605 | struct kni_dev *kni = netdev_priv(dev); | |
606 | ||
607 | pr_debug("kni_net_change_mtu new mtu %d to be set\n", new_mtu); | |
608 | ||
609 | memset(&req, 0, sizeof(req)); | |
610 | req.req_id = RTE_KNI_REQ_CHANGE_MTU; | |
611 | req.new_mtu = new_mtu; | |
612 | ret = kni_net_process_request(kni, &req); | |
613 | if (ret == 0 && req.result == 0) | |
614 | dev->mtu = new_mtu; | |
615 | ||
616 | return (ret == 0) ? req.result : ret; | |
617 | } | |
618 | ||
619 | /* | |
620 | * Checks if the user space application provided the resp message | |
621 | */ | |
622 | void | |
623 | kni_net_poll_resp(struct kni_dev *kni) | |
624 | { | |
625 | if (kni_fifo_count(kni->resp_q)) | |
626 | wake_up_interruptible(&kni->wq); | |
627 | } | |
628 | ||
629 | /* | |
630 | * Return statistics to the caller | |
631 | */ | |
632 | static struct net_device_stats * | |
633 | kni_net_stats(struct net_device *dev) | |
634 | { | |
635 | struct kni_dev *kni = netdev_priv(dev); | |
636 | ||
637 | return &kni->stats; | |
638 | } | |
639 | ||
640 | /* | |
641 | * Fill the eth header | |
642 | */ | |
643 | static int | |
644 | kni_net_header(struct sk_buff *skb, struct net_device *dev, | |
645 | unsigned short type, const void *daddr, | |
646 | const void *saddr, uint32_t len) | |
647 | { | |
648 | struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); | |
649 | ||
650 | memcpy(eth->h_source, saddr ? saddr : dev->dev_addr, dev->addr_len); | |
651 | memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len); | |
652 | eth->h_proto = htons(type); | |
653 | ||
654 | return dev->hard_header_len; | |
655 | } | |
656 | ||
657 | /* | |
658 | * Re-fill the eth header | |
659 | */ | |
660 | #ifdef HAVE_REBUILD_HEADER | |
661 | static int | |
662 | kni_net_rebuild_header(struct sk_buff *skb) | |
663 | { | |
664 | struct net_device *dev = skb->dev; | |
665 | struct ethhdr *eth = (struct ethhdr *) skb->data; | |
666 | ||
667 | memcpy(eth->h_source, dev->dev_addr, dev->addr_len); | |
668 | memcpy(eth->h_dest, dev->dev_addr, dev->addr_len); | |
669 | ||
670 | return 0; | |
671 | } | |
672 | #endif /* < 4.1.0 */ | |
673 | ||
674 | /** | |
675 | * kni_net_set_mac - Change the Ethernet Address of the KNI NIC | |
676 | * @netdev: network interface device structure | |
677 | * @p: pointer to an address structure | |
678 | * | |
679 | * Returns 0 on success, negative on failure | |
680 | **/ | |
681 | static int | |
682 | kni_net_set_mac(struct net_device *netdev, void *p) | |
683 | { | |
684 | struct sockaddr *addr = p; | |
685 | ||
686 | if (!is_valid_ether_addr((unsigned char *)(addr->sa_data))) | |
687 | return -EADDRNOTAVAIL; | |
688 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
689 | return 0; | |
690 | } | |
691 | ||
692 | #ifdef HAVE_CHANGE_CARRIER_CB | |
693 | static int | |
694 | kni_net_change_carrier(struct net_device *dev, bool new_carrier) | |
695 | { | |
696 | if (new_carrier) | |
697 | netif_carrier_on(dev); | |
698 | else | |
699 | netif_carrier_off(dev); | |
700 | return 0; | |
701 | } | |
702 | #endif | |
703 | ||
704 | static const struct header_ops kni_net_header_ops = { | |
705 | .create = kni_net_header, | |
706 | #ifdef HAVE_REBUILD_HEADER | |
707 | .rebuild = kni_net_rebuild_header, | |
708 | #endif /* < 4.1.0 */ | |
709 | .cache = NULL, /* disable caching */ | |
710 | }; | |
711 | ||
712 | static const struct net_device_ops kni_net_netdev_ops = { | |
713 | .ndo_open = kni_net_open, | |
714 | .ndo_stop = kni_net_release, | |
715 | .ndo_set_config = kni_net_config, | |
716 | .ndo_start_xmit = kni_net_tx, | |
717 | .ndo_change_mtu = kni_net_change_mtu, | |
718 | .ndo_do_ioctl = kni_net_ioctl, | |
719 | .ndo_set_rx_mode = kni_net_set_rx_mode, | |
720 | .ndo_get_stats = kni_net_stats, | |
721 | .ndo_tx_timeout = kni_net_tx_timeout, | |
722 | .ndo_set_mac_address = kni_net_set_mac, | |
723 | #ifdef HAVE_CHANGE_CARRIER_CB | |
724 | .ndo_change_carrier = kni_net_change_carrier, | |
725 | #endif | |
726 | }; | |
727 | ||
728 | void | |
729 | kni_net_init(struct net_device *dev) | |
730 | { | |
731 | struct kni_dev *kni = netdev_priv(dev); | |
732 | ||
733 | init_waitqueue_head(&kni->wq); | |
734 | mutex_init(&kni->sync_lock); | |
735 | ||
736 | ether_setup(dev); /* assign some of the fields */ | |
737 | dev->netdev_ops = &kni_net_netdev_ops; | |
738 | dev->header_ops = &kni_net_header_ops; | |
739 | dev->watchdog_timeo = WD_TIMEOUT; | |
740 | } | |
741 | ||
742 | void | |
743 | kni_net_config_lo_mode(char *lo_str) | |
744 | { | |
745 | if (!lo_str) { | |
746 | pr_debug("loopback disabled"); | |
747 | return; | |
748 | } | |
749 | ||
750 | if (!strcmp(lo_str, "lo_mode_none")) | |
751 | pr_debug("loopback disabled"); | |
752 | else if (!strcmp(lo_str, "lo_mode_fifo")) { | |
753 | pr_debug("loopback mode=lo_mode_fifo enabled"); | |
754 | kni_net_rx_func = kni_net_rx_lo_fifo; | |
755 | } else if (!strcmp(lo_str, "lo_mode_fifo_skb")) { | |
756 | pr_debug("loopback mode=lo_mode_fifo_skb enabled"); | |
757 | kni_net_rx_func = kni_net_rx_lo_fifo_skb; | |
758 | } else | |
759 | pr_debug("Incognizant parameter, loopback disabled"); | |
760 | } |