]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/staging/most/aim-network/networking.c
Merge tag 'armsoc-cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[mirror_ubuntu-eoan-kernel.git] / drivers / staging / most / aim-network / networking.c
1 /*
2 * Networking AIM - Networking Application Interface Module for MostCore
3 *
4 * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * This file is licensed under GPLv2.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/slab.h>
20 #include <linux/init.h>
21 #include <linux/list.h>
22 #include <linux/wait.h>
23 #include <linux/kobject.h>
24 #include "mostcore.h"
25 #include "networking.h"
26
27
28 #define MEP_HDR_LEN 8
29 #define MDP_HDR_LEN 16
30 #define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
31
32 #define PMHL 5
33
34 #define PMS_TELID_UNSEGM_MAMAC 0x0A
35 #define PMS_FIFONO_MDP 0x01
36 #define PMS_FIFONO_MEP 0x04
37 #define PMS_MSGTYPE_DATA 0x04
38 #define PMS_DEF_PRIO 0
39 #define MEP_DEF_RETRY 15
40
41 #define PMS_FIFONO_MASK 0x07
42 #define PMS_FIFONO_SHIFT 3
43 #define PMS_RETRY_SHIFT 4
44 #define PMS_TELID_MASK 0x0F
45 #define PMS_TELID_SHIFT 4
46
47 #define HB(value) ((u8)((u16)(value) >> 8))
48 #define LB(value) ((u8)(value))
49
50
51
52 #define EXTRACT_BIT_SET(bitset_name, value) \
53 (((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
54
55 #define PMS_IS_MEP(buf, len) \
56 ((len) > MEP_HDR_LEN && \
57 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
58
59 #define PMS_IS_MAMAC(buf, len) \
60 ((len) > MDP_HDR_LEN && \
61 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MDP && \
62 EXTRACT_BIT_SET(PMS_TELID, (buf)[14]) == PMS_TELID_UNSEGM_MAMAC)
63
64 struct net_dev_channel {
65 bool linked;
66 int ch_id;
67 };
68
69 struct net_dev_context {
70 struct most_interface *iface;
71 bool channels_opened;
72 bool is_mamac;
73 unsigned char link_stat;
74 struct net_device *dev;
75 struct net_dev_channel rx;
76 struct net_dev_channel tx;
77 struct list_head list;
78 };
79
80 static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
81 static struct spinlock list_lock;
82 static struct most_aim aim;
83
84
85 static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
86 {
87 u8 *buff = mbo->virt_address;
88 const u8 broadcast[] = { 0x03, 0xFF };
89 const u8 *dest_addr = skb->data + 4;
90 const u8 *eth_type = skb->data + 12;
91 unsigned int payload_len = skb->len - ETH_HLEN;
92 unsigned int mdp_len = payload_len + MDP_HDR_LEN;
93
94 if (mbo->buffer_length < mdp_len) {
95 pr_err("drop: too small buffer! (%d for %d)\n",
96 mbo->buffer_length, mdp_len);
97 return -EINVAL;
98 }
99
100 if (skb->len < ETH_HLEN) {
101 pr_err("drop: too small packet! (%d)\n", skb->len);
102 return -EINVAL;
103 }
104
105 if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
106 dest_addr = broadcast;
107
108 *buff++ = HB(mdp_len - 2);
109 *buff++ = LB(mdp_len - 2);
110
111 *buff++ = PMHL;
112 *buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
113 *buff++ = PMS_DEF_PRIO;
114 *buff++ = dest_addr[0];
115 *buff++ = dest_addr[1];
116 *buff++ = 0x00;
117
118 *buff++ = HB(payload_len + 6);
119 *buff++ = LB(payload_len + 6);
120
121 /* end of FPH here */
122
123 *buff++ = eth_type[0];
124 *buff++ = eth_type[1];
125 *buff++ = 0;
126 *buff++ = 0;
127
128 *buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
129 *buff++ = LB(payload_len);
130
131 memcpy(buff, skb->data + ETH_HLEN, payload_len);
132 mbo->buffer_length = mdp_len;
133 return 0;
134 }
135
136 static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
137 {
138 u8 *buff = mbo->virt_address;
139 unsigned int mep_len = skb->len + MEP_HDR_LEN;
140
141 if (mbo->buffer_length < mep_len) {
142 pr_err("drop: too small buffer! (%d for %d)\n",
143 mbo->buffer_length, mep_len);
144 return -EINVAL;
145 }
146
147 *buff++ = HB(mep_len - 2);
148 *buff++ = LB(mep_len - 2);
149
150 *buff++ = PMHL;
151 *buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
152 *buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
153 *buff++ = 0;
154 *buff++ = 0;
155 *buff++ = 0;
156
157 memcpy(buff, skb->data, skb->len);
158 mbo->buffer_length = mep_len;
159 return 0;
160 }
161
162 static int most_nd_set_mac_address(struct net_device *dev, void *p)
163 {
164 struct net_dev_context *nd = dev->ml_priv;
165 int err = eth_mac_addr(dev, p);
166
167 if (err)
168 return err;
169
170 BUG_ON(nd->dev != dev);
171
172 nd->is_mamac =
173 (dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
174 dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
175
176 /*
177 * Set default MTU for the given packet type.
178 * It is still possible to change MTU using ip tools afterwards.
179 */
180 dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
181
182 return 0;
183 }
184
185 static int most_nd_open(struct net_device *dev)
186 {
187 struct net_dev_context *nd = dev->ml_priv;
188
189 pr_info("open net device %s\n", dev->name);
190
191 BUG_ON(nd->dev != dev);
192
193 if (nd->channels_opened)
194 return -EFAULT;
195
196 BUG_ON(!nd->tx.linked || !nd->rx.linked);
197
198 if (most_start_channel(nd->iface, nd->rx.ch_id)) {
199 pr_err("most_start_channel() failed\n");
200 return -EBUSY;
201 }
202
203 if (most_start_channel(nd->iface, nd->tx.ch_id)) {
204 pr_err("most_start_channel() failed\n");
205 most_stop_channel(nd->iface, nd->rx.ch_id);
206 return -EBUSY;
207 }
208
209 nd->channels_opened = true;
210
211 if (nd->is_mamac) {
212 nd->link_stat = 1;
213 netif_wake_queue(dev);
214 } else {
215 nd->iface->request_netinfo(nd->iface, nd->tx.ch_id);
216 }
217
218 return 0;
219 }
220
221 static int most_nd_stop(struct net_device *dev)
222 {
223 struct net_dev_context *nd = dev->ml_priv;
224
225 pr_info("stop net device %s\n", dev->name);
226
227 BUG_ON(nd->dev != dev);
228 netif_stop_queue(dev);
229
230 if (nd->channels_opened) {
231 most_stop_channel(nd->iface, nd->rx.ch_id);
232 most_stop_channel(nd->iface, nd->tx.ch_id);
233 nd->channels_opened = false;
234 }
235
236 return 0;
237 }
238
239 static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
240 struct net_device *dev)
241 {
242 struct net_dev_context *nd = dev->ml_priv;
243 struct mbo *mbo;
244 int ret;
245
246 BUG_ON(nd->dev != dev);
247
248 mbo = most_get_mbo(nd->iface, nd->tx.ch_id);
249
250 if (!mbo) {
251 netif_stop_queue(dev);
252 dev->stats.tx_fifo_errors++;
253 return NETDEV_TX_BUSY;
254 }
255
256 if (nd->is_mamac)
257 ret = skb_to_mamac(skb, mbo);
258 else
259 ret = skb_to_mep(skb, mbo);
260
261 if (ret) {
262 most_put_mbo(mbo);
263 dev->stats.tx_dropped++;
264 kfree_skb(skb);
265 return NETDEV_TX_OK;
266 }
267
268 most_submit_mbo(mbo);
269 dev->stats.tx_packets++;
270 dev->stats.tx_bytes += skb->len;
271 kfree_skb(skb);
272 return NETDEV_TX_OK;
273 }
274
275 static const struct net_device_ops most_nd_ops = {
276 .ndo_open = most_nd_open,
277 .ndo_stop = most_nd_stop,
278 .ndo_start_xmit = most_nd_start_xmit,
279 .ndo_set_mac_address = most_nd_set_mac_address,
280 };
281
282 static void most_nd_setup(struct net_device *dev)
283 {
284 pr_info("setup net device %s\n", dev->name);
285 ether_setup(dev);
286 dev->netdev_ops = &most_nd_ops;
287 }
288
289 static void most_net_rm_netdev_safe(struct net_dev_context *nd)
290 {
291 if (!nd->dev)
292 return;
293
294 pr_info("remove net device %p\n", nd->dev);
295
296 unregister_netdev(nd->dev);
297 free_netdev(nd->dev);
298 nd->dev = 0;
299 }
300
301 static struct net_dev_context *get_net_dev_context(
302 struct most_interface *iface)
303 {
304 struct net_dev_context *nd, *tmp;
305
306 spin_lock(&list_lock);
307 list_for_each_entry_safe(nd, tmp, &net_devices, list) {
308 if (nd->iface == iface) {
309 spin_unlock(&list_lock);
310 return nd;
311 }
312 }
313 spin_unlock(&list_lock);
314 return NULL;
315 }
316
317 static int aim_probe_channel(struct most_interface *iface, int channel_idx,
318 struct most_channel_config *ccfg,
319 struct kobject *parent, char *name)
320 {
321 struct net_dev_context *nd;
322 struct net_dev_channel *ch;
323
324 if (!iface)
325 return -EINVAL;
326
327 if (ccfg->data_type != MOST_CH_ASYNC)
328 return -EINVAL;
329
330 nd = get_net_dev_context(iface);
331
332 if (!nd) {
333 nd = kzalloc(sizeof(*nd), GFP_KERNEL);
334 if (!nd)
335 return -ENOMEM;
336
337 nd->iface = iface;
338
339 spin_lock(&list_lock);
340 list_add(&nd->list, &net_devices);
341 spin_unlock(&list_lock);
342 }
343
344 ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
345 if (ch->linked) {
346 pr_err("only one channel per instance & direction allowed\n");
347 return -EINVAL;
348 }
349
350 if (nd->tx.linked || nd->rx.linked) {
351 struct net_device *dev =
352 alloc_netdev(0, "meth%d", NET_NAME_UNKNOWN, most_nd_setup);
353
354 if (!dev) {
355 pr_err("no memory for net_device\n");
356 return -ENOMEM;
357 }
358
359 nd->dev = dev;
360
361 dev->ml_priv = nd;
362 if (register_netdev(dev)) {
363 pr_err("registering net device failed\n");
364 free_netdev(dev);
365 return -EINVAL;
366 }
367 }
368
369 ch->ch_id = channel_idx;
370 ch->linked = true;
371
372 return 0;
373 }
374
375 static int aim_disconnect_channel(struct most_interface *iface,
376 int channel_idx)
377 {
378 struct net_dev_context *nd;
379 struct net_dev_channel *ch;
380
381 nd = get_net_dev_context(iface);
382 if (!nd)
383 return -EINVAL;
384
385 if (nd->rx.linked && channel_idx == nd->rx.ch_id)
386 ch = &nd->rx;
387 else if (nd->tx.linked && channel_idx == nd->tx.ch_id)
388 ch = &nd->tx;
389 else
390 return -EINVAL;
391
392 ch->linked = false;
393
394 /*
395 * do not call most_stop_channel() here, because channels are
396 * going to be closed in ndo_stop() after unregister_netdev()
397 */
398 most_net_rm_netdev_safe(nd);
399
400 if (!nd->rx.linked && !nd->tx.linked) {
401 spin_lock(&list_lock);
402 list_del(&nd->list);
403 spin_unlock(&list_lock);
404 kfree(nd);
405 }
406
407 return 0;
408 }
409
410 static int aim_resume_tx_channel(struct most_interface *iface,
411 int channel_idx)
412 {
413 struct net_dev_context *nd;
414
415 nd = get_net_dev_context(iface);
416 if (!nd || !nd->channels_opened || nd->tx.ch_id != channel_idx)
417 return 0;
418
419 if (!nd->dev)
420 return 0;
421
422 netif_wake_queue(nd->dev);
423 return 0;
424 }
425
426 static int aim_rx_data(struct mbo *mbo)
427 {
428 const u32 zero = 0;
429 struct net_dev_context *nd;
430 char *buf = mbo->virt_address;
431 uint32_t len = mbo->processed_length;
432 struct sk_buff *skb;
433 struct net_device *dev;
434
435 nd = get_net_dev_context(mbo->ifp);
436 if (!nd || !nd->channels_opened || nd->rx.ch_id != mbo->hdm_channel_id)
437 return -EIO;
438
439 dev = nd->dev;
440 if (!dev) {
441 pr_err_once("drop packet: missing net_device\n");
442 return -EIO;
443 }
444
445 if (nd->is_mamac) {
446 if (!PMS_IS_MAMAC(buf, len))
447 return -EIO;
448
449 skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
450 } else {
451 if (!PMS_IS_MEP(buf, len))
452 return -EIO;
453
454 skb = dev_alloc_skb(len - MEP_HDR_LEN);
455 }
456
457 if (!skb) {
458 dev->stats.rx_dropped++;
459 pr_err_once("drop packet: no memory for skb\n");
460 goto out;
461 }
462
463 skb->dev = dev;
464
465 if (nd->is_mamac) {
466 /* dest */
467 memcpy(skb_put(skb, ETH_ALEN), dev->dev_addr, ETH_ALEN);
468
469 /* src */
470 memcpy(skb_put(skb, 4), &zero, 4);
471 memcpy(skb_put(skb, 2), buf + 5, 2);
472
473 /* eth type */
474 memcpy(skb_put(skb, 2), buf + 10, 2);
475
476 buf += MDP_HDR_LEN;
477 len -= MDP_HDR_LEN;
478 } else {
479 buf += MEP_HDR_LEN;
480 len -= MEP_HDR_LEN;
481 }
482
483 memcpy(skb_put(skb, len), buf, len);
484 skb->protocol = eth_type_trans(skb, dev);
485 dev->stats.rx_packets++;
486 dev->stats.rx_bytes += skb->len;
487 netif_rx(skb);
488
489 out:
490 most_put_mbo(mbo);
491 return 0;
492 }
493
494 static int __init most_net_init(void)
495 {
496 pr_info("most_net_init()\n");
497 spin_lock_init(&list_lock);
498 aim.name = "networking";
499 aim.probe_channel = aim_probe_channel;
500 aim.disconnect_channel = aim_disconnect_channel;
501 aim.tx_completion = aim_resume_tx_channel;
502 aim.rx_completion = aim_rx_data;
503 return most_register_aim(&aim);
504 }
505
506 static void __exit most_net_exit(void)
507 {
508 struct net_dev_context *nd, *tmp;
509
510 spin_lock(&list_lock);
511 list_for_each_entry_safe(nd, tmp, &net_devices, list) {
512 list_del(&nd->list);
513 spin_unlock(&list_lock);
514 /*
515 * do not call most_stop_channel() here, because channels are
516 * going to be closed in ndo_stop() after unregister_netdev()
517 */
518 most_net_rm_netdev_safe(nd);
519 kfree(nd);
520 spin_lock(&list_lock);
521 }
522 spin_unlock(&list_lock);
523
524 most_deregister_aim(&aim);
525 pr_info("most_net_exit()\n");
526 }
527
528 /**
529 * most_deliver_netinfo - callback for HDM to be informed about HW's MAC
530 * @param iface - most interface instance
531 * @param link_stat - link status
532 * @param mac_addr - MAC address
533 */
534 void most_deliver_netinfo(struct most_interface *iface,
535 unsigned char link_stat, unsigned char *mac_addr)
536 {
537 struct net_dev_context *nd;
538 struct net_device *dev;
539
540 pr_info("Received netinfo from %s\n", iface->description);
541
542 nd = get_net_dev_context(iface);
543 if (!nd)
544 return;
545
546 dev = nd->dev;
547 if (!dev)
548 return;
549
550 if (mac_addr)
551 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
552
553 if (nd->link_stat != link_stat) {
554 nd->link_stat = link_stat;
555 if (nd->link_stat)
556 netif_wake_queue(dev);
557 else
558 netif_stop_queue(dev);
559 }
560 }
561 EXPORT_SYMBOL(most_deliver_netinfo);
562
563 module_init(most_net_init);
564 module_exit(most_net_exit);
565 MODULE_LICENSE("GPL");
566 MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
567 MODULE_DESCRIPTION("Networking Application Interface Module for MostCore");