]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ncsi/ncsi-manage.c
PCI: Reprogram bridge prefetch registers on resume
[mirror_ubuntu-bionic-kernel.git] / net / ncsi / ncsi-manage.c
1 /*
2 * Copyright Gavin Shan, IBM Corporation 2016.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
16
17 #include <net/ncsi.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/addrconf.h>
21 #include <net/ipv6.h>
22 #include <net/if_inet6.h>
23
24 #include "internal.h"
25 #include "ncsi-pkt.h"
26
27 LIST_HEAD(ncsi_dev_list);
28 DEFINE_SPINLOCK(ncsi_dev_lock);
29
30 static inline int ncsi_filter_size(int table)
31 {
32 int sizes[] = { 2, 6, 6, 6 };
33
34 BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
35 if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
36 return -EINVAL;
37
38 return sizes[table];
39 }
40
41 static u32 *ncsi_get_filter(struct ncsi_channel *nc, int table, int index)
42 {
43 struct ncsi_channel_filter *ncf;
44 int size;
45
46 ncf = nc->filters[table];
47 if (!ncf)
48 return NULL;
49
50 size = ncsi_filter_size(table);
51 if (size < 0)
52 return NULL;
53
54 return ncf->data + size * index;
55 }
56
57 /* Find the first active filter in a filter table that matches the given
58 * data parameter. If data is NULL, this returns the first active filter.
59 */
60 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
61 {
62 struct ncsi_channel_filter *ncf;
63 void *bitmap;
64 int index, size;
65 unsigned long flags;
66
67 ncf = nc->filters[table];
68 if (!ncf)
69 return -ENXIO;
70
71 size = ncsi_filter_size(table);
72 if (size < 0)
73 return size;
74
75 spin_lock_irqsave(&nc->lock, flags);
76 bitmap = (void *)&ncf->bitmap;
77 index = -1;
78 while ((index = find_next_bit(bitmap, ncf->total, index + 1))
79 < ncf->total) {
80 if (!data || !memcmp(ncf->data + size * index, data, size)) {
81 spin_unlock_irqrestore(&nc->lock, flags);
82 return index;
83 }
84 }
85 spin_unlock_irqrestore(&nc->lock, flags);
86
87 return -ENOENT;
88 }
89
90 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
91 {
92 struct ncsi_channel_filter *ncf;
93 int index, size;
94 void *bitmap;
95 unsigned long flags;
96
97 size = ncsi_filter_size(table);
98 if (size < 0)
99 return size;
100
101 index = ncsi_find_filter(nc, table, data);
102 if (index >= 0)
103 return index;
104
105 ncf = nc->filters[table];
106 if (!ncf)
107 return -ENODEV;
108
109 spin_lock_irqsave(&nc->lock, flags);
110 bitmap = (void *)&ncf->bitmap;
111 do {
112 index = find_next_zero_bit(bitmap, ncf->total, 0);
113 if (index >= ncf->total) {
114 spin_unlock_irqrestore(&nc->lock, flags);
115 return -ENOSPC;
116 }
117 } while (test_and_set_bit(index, bitmap));
118
119 memcpy(ncf->data + size * index, data, size);
120 spin_unlock_irqrestore(&nc->lock, flags);
121
122 return index;
123 }
124
125 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
126 {
127 struct ncsi_channel_filter *ncf;
128 int size;
129 void *bitmap;
130 unsigned long flags;
131
132 size = ncsi_filter_size(table);
133 if (size < 0)
134 return size;
135
136 ncf = nc->filters[table];
137 if (!ncf || index >= ncf->total)
138 return -ENODEV;
139
140 spin_lock_irqsave(&nc->lock, flags);
141 bitmap = (void *)&ncf->bitmap;
142 if (test_and_clear_bit(index, bitmap))
143 memset(ncf->data + size * index, 0, size);
144 spin_unlock_irqrestore(&nc->lock, flags);
145
146 return 0;
147 }
148
149 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
150 {
151 struct ncsi_dev *nd = &ndp->ndev;
152 struct ncsi_package *np;
153 struct ncsi_channel *nc;
154 unsigned long flags;
155
156 nd->state = ncsi_dev_state_functional;
157 if (force_down) {
158 nd->link_up = 0;
159 goto report;
160 }
161
162 nd->link_up = 0;
163 NCSI_FOR_EACH_PACKAGE(ndp, np) {
164 NCSI_FOR_EACH_CHANNEL(np, nc) {
165 spin_lock_irqsave(&nc->lock, flags);
166
167 if (!list_empty(&nc->link) ||
168 nc->state != NCSI_CHANNEL_ACTIVE) {
169 spin_unlock_irqrestore(&nc->lock, flags);
170 continue;
171 }
172
173 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
174 spin_unlock_irqrestore(&nc->lock, flags);
175 nd->link_up = 1;
176 goto report;
177 }
178
179 spin_unlock_irqrestore(&nc->lock, flags);
180 }
181 }
182
183 report:
184 nd->handler(nd);
185 }
186
187 static void ncsi_channel_monitor(struct timer_list *t)
188 {
189 struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
190 struct ncsi_package *np = nc->package;
191 struct ncsi_dev_priv *ndp = np->ndp;
192 struct ncsi_channel_mode *ncm;
193 struct ncsi_cmd_arg nca;
194 bool enabled, chained;
195 unsigned int monitor_state;
196 unsigned long flags;
197 int state, ret;
198
199 spin_lock_irqsave(&nc->lock, flags);
200 state = nc->state;
201 chained = !list_empty(&nc->link);
202 enabled = nc->monitor.enabled;
203 monitor_state = nc->monitor.state;
204 spin_unlock_irqrestore(&nc->lock, flags);
205
206 if (!enabled || chained) {
207 ncsi_stop_channel_monitor(nc);
208 return;
209 }
210 if (state != NCSI_CHANNEL_INACTIVE &&
211 state != NCSI_CHANNEL_ACTIVE) {
212 ncsi_stop_channel_monitor(nc);
213 return;
214 }
215
216 switch (monitor_state) {
217 case NCSI_CHANNEL_MONITOR_START:
218 case NCSI_CHANNEL_MONITOR_RETRY:
219 nca.ndp = ndp;
220 nca.package = np->id;
221 nca.channel = nc->id;
222 nca.type = NCSI_PKT_CMD_GLS;
223 nca.req_flags = 0;
224 ret = ncsi_xmit_cmd(&nca);
225 if (ret)
226 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
227 ret);
228 break;
229 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
230 break;
231 default:
232 netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
233 nc->id);
234 if (!(ndp->flags & NCSI_DEV_HWA)) {
235 ncsi_report_link(ndp, true);
236 ndp->flags |= NCSI_DEV_RESHUFFLE;
237 }
238
239 ncsi_stop_channel_monitor(nc);
240
241 ncm = &nc->modes[NCSI_MODE_LINK];
242 spin_lock_irqsave(&nc->lock, flags);
243 nc->state = NCSI_CHANNEL_INVISIBLE;
244 ncm->data[2] &= ~0x1;
245 spin_unlock_irqrestore(&nc->lock, flags);
246
247 spin_lock_irqsave(&ndp->lock, flags);
248 nc->state = NCSI_CHANNEL_ACTIVE;
249 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
250 spin_unlock_irqrestore(&ndp->lock, flags);
251 ncsi_process_next_channel(ndp);
252 return;
253 }
254
255 spin_lock_irqsave(&nc->lock, flags);
256 nc->monitor.state++;
257 spin_unlock_irqrestore(&nc->lock, flags);
258 mod_timer(&nc->monitor.timer, jiffies + HZ);
259 }
260
261 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
262 {
263 unsigned long flags;
264
265 spin_lock_irqsave(&nc->lock, flags);
266 WARN_ON_ONCE(nc->monitor.enabled);
267 nc->monitor.enabled = true;
268 nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
269 spin_unlock_irqrestore(&nc->lock, flags);
270
271 mod_timer(&nc->monitor.timer, jiffies + HZ);
272 }
273
274 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
275 {
276 unsigned long flags;
277
278 spin_lock_irqsave(&nc->lock, flags);
279 if (!nc->monitor.enabled) {
280 spin_unlock_irqrestore(&nc->lock, flags);
281 return;
282 }
283 nc->monitor.enabled = false;
284 spin_unlock_irqrestore(&nc->lock, flags);
285
286 del_timer_sync(&nc->monitor.timer);
287 }
288
289 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
290 unsigned char id)
291 {
292 struct ncsi_channel *nc;
293
294 NCSI_FOR_EACH_CHANNEL(np, nc) {
295 if (nc->id == id)
296 return nc;
297 }
298
299 return NULL;
300 }
301
302 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
303 {
304 struct ncsi_channel *nc, *tmp;
305 int index;
306 unsigned long flags;
307
308 nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
309 if (!nc)
310 return NULL;
311
312 nc->id = id;
313 nc->package = np;
314 nc->state = NCSI_CHANNEL_INACTIVE;
315 nc->monitor.enabled = false;
316 timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
317 spin_lock_init(&nc->lock);
318 INIT_LIST_HEAD(&nc->link);
319 for (index = 0; index < NCSI_CAP_MAX; index++)
320 nc->caps[index].index = index;
321 for (index = 0; index < NCSI_MODE_MAX; index++)
322 nc->modes[index].index = index;
323
324 spin_lock_irqsave(&np->lock, flags);
325 tmp = ncsi_find_channel(np, id);
326 if (tmp) {
327 spin_unlock_irqrestore(&np->lock, flags);
328 kfree(nc);
329 return tmp;
330 }
331
332 list_add_tail_rcu(&nc->node, &np->channels);
333 np->channel_num++;
334 spin_unlock_irqrestore(&np->lock, flags);
335
336 return nc;
337 }
338
339 static void ncsi_remove_channel(struct ncsi_channel *nc)
340 {
341 struct ncsi_package *np = nc->package;
342 struct ncsi_channel_filter *ncf;
343 unsigned long flags;
344 int i;
345
346 /* Release filters */
347 spin_lock_irqsave(&nc->lock, flags);
348 for (i = 0; i < NCSI_FILTER_MAX; i++) {
349 ncf = nc->filters[i];
350 if (!ncf)
351 continue;
352
353 nc->filters[i] = NULL;
354 kfree(ncf);
355 }
356
357 nc->state = NCSI_CHANNEL_INACTIVE;
358 spin_unlock_irqrestore(&nc->lock, flags);
359 ncsi_stop_channel_monitor(nc);
360
361 /* Remove and free channel */
362 spin_lock_irqsave(&np->lock, flags);
363 list_del_rcu(&nc->node);
364 np->channel_num--;
365 spin_unlock_irqrestore(&np->lock, flags);
366
367 kfree(nc);
368 }
369
370 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
371 unsigned char id)
372 {
373 struct ncsi_package *np;
374
375 NCSI_FOR_EACH_PACKAGE(ndp, np) {
376 if (np->id == id)
377 return np;
378 }
379
380 return NULL;
381 }
382
383 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
384 unsigned char id)
385 {
386 struct ncsi_package *np, *tmp;
387 unsigned long flags;
388
389 np = kzalloc(sizeof(*np), GFP_ATOMIC);
390 if (!np)
391 return NULL;
392
393 np->id = id;
394 np->ndp = ndp;
395 spin_lock_init(&np->lock);
396 INIT_LIST_HEAD(&np->channels);
397
398 spin_lock_irqsave(&ndp->lock, flags);
399 tmp = ncsi_find_package(ndp, id);
400 if (tmp) {
401 spin_unlock_irqrestore(&ndp->lock, flags);
402 kfree(np);
403 return tmp;
404 }
405
406 list_add_tail_rcu(&np->node, &ndp->packages);
407 ndp->package_num++;
408 spin_unlock_irqrestore(&ndp->lock, flags);
409
410 return np;
411 }
412
413 void ncsi_remove_package(struct ncsi_package *np)
414 {
415 struct ncsi_dev_priv *ndp = np->ndp;
416 struct ncsi_channel *nc, *tmp;
417 unsigned long flags;
418
419 /* Release all child channels */
420 list_for_each_entry_safe(nc, tmp, &np->channels, node)
421 ncsi_remove_channel(nc);
422
423 /* Remove and free package */
424 spin_lock_irqsave(&ndp->lock, flags);
425 list_del_rcu(&np->node);
426 ndp->package_num--;
427 spin_unlock_irqrestore(&ndp->lock, flags);
428
429 kfree(np);
430 }
431
432 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
433 unsigned char id,
434 struct ncsi_package **np,
435 struct ncsi_channel **nc)
436 {
437 struct ncsi_package *p;
438 struct ncsi_channel *c;
439
440 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
441 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
442
443 if (np)
444 *np = p;
445 if (nc)
446 *nc = c;
447 }
448
449 /* For two consecutive NCSI commands, the packet IDs shouldn't
450 * be same. Otherwise, the bogus response might be replied. So
451 * the available IDs are allocated in round-robin fashion.
452 */
453 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
454 unsigned int req_flags)
455 {
456 struct ncsi_request *nr = NULL;
457 int i, limit = ARRAY_SIZE(ndp->requests);
458 unsigned long flags;
459
460 /* Check if there is one available request until the ceiling */
461 spin_lock_irqsave(&ndp->lock, flags);
462 for (i = ndp->request_id; i < limit; i++) {
463 if (ndp->requests[i].used)
464 continue;
465
466 nr = &ndp->requests[i];
467 nr->used = true;
468 nr->flags = req_flags;
469 ndp->request_id = i + 1;
470 goto found;
471 }
472
473 /* Fail back to check from the starting cursor */
474 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
475 if (ndp->requests[i].used)
476 continue;
477
478 nr = &ndp->requests[i];
479 nr->used = true;
480 nr->flags = req_flags;
481 ndp->request_id = i + 1;
482 goto found;
483 }
484
485 found:
486 spin_unlock_irqrestore(&ndp->lock, flags);
487 return nr;
488 }
489
490 void ncsi_free_request(struct ncsi_request *nr)
491 {
492 struct ncsi_dev_priv *ndp = nr->ndp;
493 struct sk_buff *cmd, *rsp;
494 unsigned long flags;
495 bool driven;
496
497 if (nr->enabled) {
498 nr->enabled = false;
499 del_timer_sync(&nr->timer);
500 }
501
502 spin_lock_irqsave(&ndp->lock, flags);
503 cmd = nr->cmd;
504 rsp = nr->rsp;
505 nr->cmd = NULL;
506 nr->rsp = NULL;
507 nr->used = false;
508 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
509 spin_unlock_irqrestore(&ndp->lock, flags);
510
511 if (driven && cmd && --ndp->pending_req_num == 0)
512 schedule_work(&ndp->work);
513
514 /* Release command and response */
515 consume_skb(cmd);
516 consume_skb(rsp);
517 }
518
519 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
520 {
521 struct ncsi_dev_priv *ndp;
522
523 NCSI_FOR_EACH_DEV(ndp) {
524 if (ndp->ndev.dev == dev)
525 return &ndp->ndev;
526 }
527
528 return NULL;
529 }
530
531 static void ncsi_request_timeout(struct timer_list *t)
532 {
533 struct ncsi_request *nr = from_timer(nr, t, timer);
534 struct ncsi_dev_priv *ndp = nr->ndp;
535 unsigned long flags;
536
537 /* If the request already had associated response,
538 * let the response handler to release it.
539 */
540 spin_lock_irqsave(&ndp->lock, flags);
541 nr->enabled = false;
542 if (nr->rsp || !nr->cmd) {
543 spin_unlock_irqrestore(&ndp->lock, flags);
544 return;
545 }
546 spin_unlock_irqrestore(&ndp->lock, flags);
547
548 /* Release the request */
549 ncsi_free_request(nr);
550 }
551
552 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
553 {
554 struct ncsi_dev *nd = &ndp->ndev;
555 struct ncsi_package *np = ndp->active_package;
556 struct ncsi_channel *nc = ndp->active_channel;
557 struct ncsi_cmd_arg nca;
558 unsigned long flags;
559 int ret;
560
561 nca.ndp = ndp;
562 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
563 switch (nd->state) {
564 case ncsi_dev_state_suspend:
565 nd->state = ncsi_dev_state_suspend_select;
566 /* Fall through */
567 case ncsi_dev_state_suspend_select:
568 ndp->pending_req_num = 1;
569
570 nca.type = NCSI_PKT_CMD_SP;
571 nca.package = np->id;
572 nca.channel = NCSI_RESERVED_CHANNEL;
573 if (ndp->flags & NCSI_DEV_HWA)
574 nca.bytes[0] = 0;
575 else
576 nca.bytes[0] = 1;
577
578 /* To retrieve the last link states of channels in current
579 * package when current active channel needs fail over to
580 * another one. It means we will possibly select another
581 * channel as next active one. The link states of channels
582 * are most important factor of the selection. So we need
583 * accurate link states. Unfortunately, the link states on
584 * inactive channels can't be updated with LSC AEN in time.
585 */
586 if (ndp->flags & NCSI_DEV_RESHUFFLE)
587 nd->state = ncsi_dev_state_suspend_gls;
588 else
589 nd->state = ncsi_dev_state_suspend_dcnt;
590 ret = ncsi_xmit_cmd(&nca);
591 if (ret)
592 goto error;
593
594 break;
595 case ncsi_dev_state_suspend_gls:
596 ndp->pending_req_num = np->channel_num;
597
598 nca.type = NCSI_PKT_CMD_GLS;
599 nca.package = np->id;
600
601 nd->state = ncsi_dev_state_suspend_dcnt;
602 NCSI_FOR_EACH_CHANNEL(np, nc) {
603 nca.channel = nc->id;
604 ret = ncsi_xmit_cmd(&nca);
605 if (ret)
606 goto error;
607 }
608
609 break;
610 case ncsi_dev_state_suspend_dcnt:
611 ndp->pending_req_num = 1;
612
613 nca.type = NCSI_PKT_CMD_DCNT;
614 nca.package = np->id;
615 nca.channel = nc->id;
616
617 nd->state = ncsi_dev_state_suspend_dc;
618 ret = ncsi_xmit_cmd(&nca);
619 if (ret)
620 goto error;
621
622 break;
623 case ncsi_dev_state_suspend_dc:
624 ndp->pending_req_num = 1;
625
626 nca.type = NCSI_PKT_CMD_DC;
627 nca.package = np->id;
628 nca.channel = nc->id;
629 nca.bytes[0] = 1;
630
631 nd->state = ncsi_dev_state_suspend_deselect;
632 ret = ncsi_xmit_cmd(&nca);
633 if (ret)
634 goto error;
635
636 break;
637 case ncsi_dev_state_suspend_deselect:
638 ndp->pending_req_num = 1;
639
640 nca.type = NCSI_PKT_CMD_DP;
641 nca.package = np->id;
642 nca.channel = NCSI_RESERVED_CHANNEL;
643
644 nd->state = ncsi_dev_state_suspend_done;
645 ret = ncsi_xmit_cmd(&nca);
646 if (ret)
647 goto error;
648
649 break;
650 case ncsi_dev_state_suspend_done:
651 spin_lock_irqsave(&nc->lock, flags);
652 nc->state = NCSI_CHANNEL_INACTIVE;
653 spin_unlock_irqrestore(&nc->lock, flags);
654 ncsi_process_next_channel(ndp);
655
656 break;
657 default:
658 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
659 nd->state);
660 }
661
662 return;
663 error:
664 nd->state = ncsi_dev_state_functional;
665 }
666
667 /* Check the VLAN filter bitmap for a set filter, and construct a
668 * "Set VLAN Filter - Disable" packet if found.
669 */
670 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
671 struct ncsi_cmd_arg *nca)
672 {
673 int index;
674 u32 *data;
675 u16 vid;
676
677 index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, NULL);
678 if (index < 0) {
679 /* Filter table empty */
680 return -1;
681 }
682
683 data = ncsi_get_filter(nc, NCSI_FILTER_VLAN, index);
684 if (!data) {
685 netdev_err(ndp->ndev.dev,
686 "NCSI: failed to retrieve filter %d\n", index);
687 /* Set the VLAN id to 0 - this will still disable the entry in
688 * the filter table, but we won't know what it was.
689 */
690 vid = 0;
691 } else {
692 vid = *(u16 *)data;
693 }
694
695 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
696 "NCSI: removed vlan tag %u at index %d\n",
697 vid, index + 1);
698 ncsi_remove_filter(nc, NCSI_FILTER_VLAN, index);
699
700 nca->type = NCSI_PKT_CMD_SVF;
701 nca->words[1] = vid;
702 /* HW filter index starts at 1 */
703 nca->bytes[6] = index + 1;
704 nca->bytes[7] = 0x00;
705 return 0;
706 }
707
708 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
709 * packet.
710 */
711 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
712 struct ncsi_cmd_arg *nca)
713 {
714 struct vlan_vid *vlan = NULL;
715 int index = 0;
716
717 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
718 index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
719 if (index < 0) {
720 /* New tag to add */
721 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
722 "NCSI: new vlan id to set: %u\n",
723 vlan->vid);
724 break;
725 }
726 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
727 "vid %u already at filter pos %d\n",
728 vlan->vid, index);
729 }
730
731 if (!vlan || index >= 0) {
732 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
733 "no vlan ids left to set\n");
734 return -1;
735 }
736
737 index = ncsi_add_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
738 if (index < 0) {
739 netdev_err(ndp->ndev.dev,
740 "Failed to add new VLAN tag, error %d\n", index);
741 if (index == -ENOSPC)
742 netdev_err(ndp->ndev.dev,
743 "Channel %u already has all VLAN filters set\n",
744 nc->id);
745 return -1;
746 }
747
748 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
749 "NCSI: set vid %u in packet, index %u\n",
750 vlan->vid, index + 1);
751 nca->type = NCSI_PKT_CMD_SVF;
752 nca->words[1] = vlan->vid;
753 /* HW filter index starts at 1 */
754 nca->bytes[6] = index + 1;
755 nca->bytes[7] = 0x01;
756
757 return 0;
758 }
759
760 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
761 {
762 struct ncsi_dev *nd = &ndp->ndev;
763 struct net_device *dev = nd->dev;
764 struct ncsi_package *np = ndp->active_package;
765 struct ncsi_channel *nc = ndp->active_channel;
766 struct ncsi_channel *hot_nc = NULL;
767 struct ncsi_cmd_arg nca;
768 unsigned char index;
769 unsigned long flags;
770 int ret;
771
772 nca.ndp = ndp;
773 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
774 switch (nd->state) {
775 case ncsi_dev_state_config:
776 case ncsi_dev_state_config_sp:
777 ndp->pending_req_num = 1;
778
779 /* Select the specific package */
780 nca.type = NCSI_PKT_CMD_SP;
781 if (ndp->flags & NCSI_DEV_HWA)
782 nca.bytes[0] = 0;
783 else
784 nca.bytes[0] = 1;
785 nca.package = np->id;
786 nca.channel = NCSI_RESERVED_CHANNEL;
787 ret = ncsi_xmit_cmd(&nca);
788 if (ret) {
789 netdev_err(ndp->ndev.dev,
790 "NCSI: Failed to transmit CMD_SP\n");
791 goto error;
792 }
793
794 nd->state = ncsi_dev_state_config_cis;
795 break;
796 case ncsi_dev_state_config_cis:
797 ndp->pending_req_num = 1;
798
799 /* Clear initial state */
800 nca.type = NCSI_PKT_CMD_CIS;
801 nca.package = np->id;
802 nca.channel = nc->id;
803 ret = ncsi_xmit_cmd(&nca);
804 if (ret) {
805 netdev_err(ndp->ndev.dev,
806 "NCSI: Failed to transmit CMD_CIS\n");
807 goto error;
808 }
809
810 nd->state = ncsi_dev_state_config_clear_vids;
811 break;
812 case ncsi_dev_state_config_clear_vids:
813 case ncsi_dev_state_config_svf:
814 case ncsi_dev_state_config_ev:
815 case ncsi_dev_state_config_sma:
816 case ncsi_dev_state_config_ebf:
817 #if IS_ENABLED(CONFIG_IPV6)
818 case ncsi_dev_state_config_egmf:
819 #endif
820 case ncsi_dev_state_config_ecnt:
821 case ncsi_dev_state_config_ec:
822 case ncsi_dev_state_config_ae:
823 case ncsi_dev_state_config_gls:
824 ndp->pending_req_num = 1;
825
826 nca.package = np->id;
827 nca.channel = nc->id;
828
829 /* Clear any active filters on the channel before setting */
830 if (nd->state == ncsi_dev_state_config_clear_vids) {
831 ret = clear_one_vid(ndp, nc, &nca);
832 if (ret) {
833 nd->state = ncsi_dev_state_config_svf;
834 schedule_work(&ndp->work);
835 break;
836 }
837 /* Repeat */
838 nd->state = ncsi_dev_state_config_clear_vids;
839 /* Add known VLAN tags to the filter */
840 } else if (nd->state == ncsi_dev_state_config_svf) {
841 ret = set_one_vid(ndp, nc, &nca);
842 if (ret) {
843 nd->state = ncsi_dev_state_config_ev;
844 schedule_work(&ndp->work);
845 break;
846 }
847 /* Repeat */
848 nd->state = ncsi_dev_state_config_svf;
849 /* Enable/Disable the VLAN filter */
850 } else if (nd->state == ncsi_dev_state_config_ev) {
851 if (list_empty(&ndp->vlan_vids)) {
852 nca.type = NCSI_PKT_CMD_DV;
853 } else {
854 nca.type = NCSI_PKT_CMD_EV;
855 nca.bytes[3] = NCSI_CAP_VLAN_NO;
856 }
857 nd->state = ncsi_dev_state_config_sma;
858 } else if (nd->state == ncsi_dev_state_config_sma) {
859 /* Use first entry in unicast filter table. Note that
860 * the MAC filter table starts from entry 1 instead of
861 * 0.
862 */
863 nca.type = NCSI_PKT_CMD_SMA;
864 for (index = 0; index < 6; index++)
865 nca.bytes[index] = dev->dev_addr[index];
866 nca.bytes[6] = 0x1;
867 nca.bytes[7] = 0x1;
868 nd->state = ncsi_dev_state_config_ebf;
869 } else if (nd->state == ncsi_dev_state_config_ebf) {
870 nca.type = NCSI_PKT_CMD_EBF;
871 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
872 nd->state = ncsi_dev_state_config_ecnt;
873 #if IS_ENABLED(CONFIG_IPV6)
874 if (ndp->inet6_addr_num > 0 &&
875 (nc->caps[NCSI_CAP_GENERIC].cap &
876 NCSI_CAP_GENERIC_MC))
877 nd->state = ncsi_dev_state_config_egmf;
878 else
879 nd->state = ncsi_dev_state_config_ecnt;
880 } else if (nd->state == ncsi_dev_state_config_egmf) {
881 nca.type = NCSI_PKT_CMD_EGMF;
882 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
883 nd->state = ncsi_dev_state_config_ecnt;
884 #endif /* CONFIG_IPV6 */
885 } else if (nd->state == ncsi_dev_state_config_ecnt) {
886 nca.type = NCSI_PKT_CMD_ECNT;
887 nd->state = ncsi_dev_state_config_ec;
888 } else if (nd->state == ncsi_dev_state_config_ec) {
889 /* Enable AEN if it's supported */
890 nca.type = NCSI_PKT_CMD_EC;
891 nd->state = ncsi_dev_state_config_ae;
892 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
893 nd->state = ncsi_dev_state_config_gls;
894 } else if (nd->state == ncsi_dev_state_config_ae) {
895 nca.type = NCSI_PKT_CMD_AE;
896 nca.bytes[0] = 0;
897 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
898 nd->state = ncsi_dev_state_config_gls;
899 } else if (nd->state == ncsi_dev_state_config_gls) {
900 nca.type = NCSI_PKT_CMD_GLS;
901 nd->state = ncsi_dev_state_config_done;
902 }
903
904 ret = ncsi_xmit_cmd(&nca);
905 if (ret) {
906 netdev_err(ndp->ndev.dev,
907 "NCSI: Failed to transmit CMD %x\n",
908 nca.type);
909 goto error;
910 }
911 break;
912 case ncsi_dev_state_config_done:
913 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
914 "NCSI: channel %u config done\n", nc->id);
915 spin_lock_irqsave(&nc->lock, flags);
916 if (nc->reconfigure_needed) {
917 /* This channel's configuration has been updated
918 * part-way during the config state - start the
919 * channel configuration over
920 */
921 nc->reconfigure_needed = false;
922 nc->state = NCSI_CHANNEL_INACTIVE;
923 spin_unlock_irqrestore(&nc->lock, flags);
924
925 spin_lock_irqsave(&ndp->lock, flags);
926 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
927 spin_unlock_irqrestore(&ndp->lock, flags);
928
929 netdev_printk(KERN_DEBUG, dev,
930 "Dirty NCSI channel state reset\n");
931 ncsi_process_next_channel(ndp);
932 break;
933 }
934
935 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
936 hot_nc = nc;
937 nc->state = NCSI_CHANNEL_ACTIVE;
938 } else {
939 hot_nc = NULL;
940 nc->state = NCSI_CHANNEL_INACTIVE;
941 netdev_warn(ndp->ndev.dev,
942 "NCSI: channel %u link down after config\n",
943 nc->id);
944 }
945 spin_unlock_irqrestore(&nc->lock, flags);
946
947 /* Update the hot channel */
948 spin_lock_irqsave(&ndp->lock, flags);
949 ndp->hot_channel = hot_nc;
950 spin_unlock_irqrestore(&ndp->lock, flags);
951
952 ncsi_start_channel_monitor(nc);
953 ncsi_process_next_channel(ndp);
954 break;
955 default:
956 netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
957 nd->state);
958 }
959
960 return;
961
962 error:
963 ncsi_report_link(ndp, true);
964 }
965
966 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
967 {
968 struct ncsi_package *np;
969 struct ncsi_channel *nc, *found, *hot_nc;
970 struct ncsi_channel_mode *ncm;
971 unsigned long flags;
972
973 spin_lock_irqsave(&ndp->lock, flags);
974 hot_nc = ndp->hot_channel;
975 spin_unlock_irqrestore(&ndp->lock, flags);
976
977 /* The search is done once an inactive channel with up
978 * link is found.
979 */
980 found = NULL;
981 NCSI_FOR_EACH_PACKAGE(ndp, np) {
982 NCSI_FOR_EACH_CHANNEL(np, nc) {
983 spin_lock_irqsave(&nc->lock, flags);
984
985 if (!list_empty(&nc->link) ||
986 nc->state != NCSI_CHANNEL_INACTIVE) {
987 spin_unlock_irqrestore(&nc->lock, flags);
988 continue;
989 }
990
991 if (!found)
992 found = nc;
993
994 if (nc == hot_nc)
995 found = nc;
996
997 ncm = &nc->modes[NCSI_MODE_LINK];
998 if (ncm->data[2] & 0x1) {
999 spin_unlock_irqrestore(&nc->lock, flags);
1000 found = nc;
1001 goto out;
1002 }
1003
1004 spin_unlock_irqrestore(&nc->lock, flags);
1005 }
1006 }
1007
1008 if (!found) {
1009 netdev_warn(ndp->ndev.dev,
1010 "NCSI: No channel found with link\n");
1011 ncsi_report_link(ndp, true);
1012 return -ENODEV;
1013 }
1014
1015 ncm = &found->modes[NCSI_MODE_LINK];
1016 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
1017 "NCSI: Channel %u added to queue (link %s)\n",
1018 found->id, ncm->data[2] & 0x1 ? "up" : "down");
1019
1020 out:
1021 spin_lock_irqsave(&ndp->lock, flags);
1022 list_add_tail_rcu(&found->link, &ndp->channel_queue);
1023 spin_unlock_irqrestore(&ndp->lock, flags);
1024
1025 return ncsi_process_next_channel(ndp);
1026 }
1027
1028 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1029 {
1030 struct ncsi_package *np;
1031 struct ncsi_channel *nc;
1032 unsigned int cap;
1033 bool has_channel = false;
1034
1035 /* The hardware arbitration is disabled if any one channel
1036 * doesn't support explicitly.
1037 */
1038 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1039 NCSI_FOR_EACH_CHANNEL(np, nc) {
1040 has_channel = true;
1041
1042 cap = nc->caps[NCSI_CAP_GENERIC].cap;
1043 if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1044 (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1045 NCSI_CAP_GENERIC_HWA_SUPPORT) {
1046 ndp->flags &= ~NCSI_DEV_HWA;
1047 return false;
1048 }
1049 }
1050 }
1051
1052 if (has_channel) {
1053 ndp->flags |= NCSI_DEV_HWA;
1054 return true;
1055 }
1056
1057 ndp->flags &= ~NCSI_DEV_HWA;
1058 return false;
1059 }
1060
1061 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
1062 {
1063 struct ncsi_package *np;
1064 struct ncsi_channel *nc;
1065 unsigned long flags;
1066
1067 /* Move all available channels to processing queue */
1068 spin_lock_irqsave(&ndp->lock, flags);
1069 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1070 NCSI_FOR_EACH_CHANNEL(np, nc) {
1071 WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
1072 !list_empty(&nc->link));
1073 ncsi_stop_channel_monitor(nc);
1074 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1075 }
1076 }
1077 spin_unlock_irqrestore(&ndp->lock, flags);
1078
1079 /* We can have no channels in extremely case */
1080 if (list_empty(&ndp->channel_queue)) {
1081 netdev_err(ndp->ndev.dev,
1082 "NCSI: No available channels for HWA\n");
1083 ncsi_report_link(ndp, false);
1084 return -ENOENT;
1085 }
1086
1087 return ncsi_process_next_channel(ndp);
1088 }
1089
1090 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1091 {
1092 struct ncsi_dev *nd = &ndp->ndev;
1093 struct ncsi_package *np;
1094 struct ncsi_channel *nc;
1095 struct ncsi_cmd_arg nca;
1096 unsigned char index;
1097 int ret;
1098
1099 nca.ndp = ndp;
1100 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1101 switch (nd->state) {
1102 case ncsi_dev_state_probe:
1103 nd->state = ncsi_dev_state_probe_deselect;
1104 /* Fall through */
1105 case ncsi_dev_state_probe_deselect:
1106 ndp->pending_req_num = 8;
1107
1108 /* Deselect all possible packages */
1109 nca.type = NCSI_PKT_CMD_DP;
1110 nca.channel = NCSI_RESERVED_CHANNEL;
1111 for (index = 0; index < 8; index++) {
1112 nca.package = index;
1113 ret = ncsi_xmit_cmd(&nca);
1114 if (ret)
1115 goto error;
1116 }
1117
1118 nd->state = ncsi_dev_state_probe_package;
1119 break;
1120 case ncsi_dev_state_probe_package:
1121 ndp->pending_req_num = 16;
1122
1123 /* Select all possible packages */
1124 nca.type = NCSI_PKT_CMD_SP;
1125 nca.bytes[0] = 1;
1126 nca.channel = NCSI_RESERVED_CHANNEL;
1127 for (index = 0; index < 8; index++) {
1128 nca.package = index;
1129 ret = ncsi_xmit_cmd(&nca);
1130 if (ret)
1131 goto error;
1132 }
1133
1134 /* Disable all possible packages */
1135 nca.type = NCSI_PKT_CMD_DP;
1136 for (index = 0; index < 8; index++) {
1137 nca.package = index;
1138 ret = ncsi_xmit_cmd(&nca);
1139 if (ret)
1140 goto error;
1141 }
1142
1143 nd->state = ncsi_dev_state_probe_channel;
1144 break;
1145 case ncsi_dev_state_probe_channel:
1146 if (!ndp->active_package)
1147 ndp->active_package = list_first_or_null_rcu(
1148 &ndp->packages, struct ncsi_package, node);
1149 else if (list_is_last(&ndp->active_package->node,
1150 &ndp->packages))
1151 ndp->active_package = NULL;
1152 else
1153 ndp->active_package = list_next_entry(
1154 ndp->active_package, node);
1155
1156 /* All available packages and channels are enumerated. The
1157 * enumeration happens for once when the NCSI interface is
1158 * started. So we need continue to start the interface after
1159 * the enumeration.
1160 *
1161 * We have to choose an active channel before configuring it.
1162 * Note that we possibly don't have active channel in extreme
1163 * situation.
1164 */
1165 if (!ndp->active_package) {
1166 ndp->flags |= NCSI_DEV_PROBED;
1167 if (ncsi_check_hwa(ndp))
1168 ncsi_enable_hwa(ndp);
1169 else
1170 ncsi_choose_active_channel(ndp);
1171 return;
1172 }
1173
1174 /* Select the active package */
1175 ndp->pending_req_num = 1;
1176 nca.type = NCSI_PKT_CMD_SP;
1177 nca.bytes[0] = 1;
1178 nca.package = ndp->active_package->id;
1179 nca.channel = NCSI_RESERVED_CHANNEL;
1180 ret = ncsi_xmit_cmd(&nca);
1181 if (ret)
1182 goto error;
1183
1184 nd->state = ncsi_dev_state_probe_cis;
1185 break;
1186 case ncsi_dev_state_probe_cis:
1187 ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1188
1189 /* Clear initial state */
1190 nca.type = NCSI_PKT_CMD_CIS;
1191 nca.package = ndp->active_package->id;
1192 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1193 nca.channel = index;
1194 ret = ncsi_xmit_cmd(&nca);
1195 if (ret)
1196 goto error;
1197 }
1198
1199 nd->state = ncsi_dev_state_probe_gvi;
1200 break;
1201 case ncsi_dev_state_probe_gvi:
1202 case ncsi_dev_state_probe_gc:
1203 case ncsi_dev_state_probe_gls:
1204 np = ndp->active_package;
1205 ndp->pending_req_num = np->channel_num;
1206
1207 /* Retrieve version, capability or link status */
1208 if (nd->state == ncsi_dev_state_probe_gvi)
1209 nca.type = NCSI_PKT_CMD_GVI;
1210 else if (nd->state == ncsi_dev_state_probe_gc)
1211 nca.type = NCSI_PKT_CMD_GC;
1212 else
1213 nca.type = NCSI_PKT_CMD_GLS;
1214
1215 nca.package = np->id;
1216 NCSI_FOR_EACH_CHANNEL(np, nc) {
1217 nca.channel = nc->id;
1218 ret = ncsi_xmit_cmd(&nca);
1219 if (ret)
1220 goto error;
1221 }
1222
1223 if (nd->state == ncsi_dev_state_probe_gvi)
1224 nd->state = ncsi_dev_state_probe_gc;
1225 else if (nd->state == ncsi_dev_state_probe_gc)
1226 nd->state = ncsi_dev_state_probe_gls;
1227 else
1228 nd->state = ncsi_dev_state_probe_dp;
1229 break;
1230 case ncsi_dev_state_probe_dp:
1231 ndp->pending_req_num = 1;
1232
1233 /* Deselect the active package */
1234 nca.type = NCSI_PKT_CMD_DP;
1235 nca.package = ndp->active_package->id;
1236 nca.channel = NCSI_RESERVED_CHANNEL;
1237 ret = ncsi_xmit_cmd(&nca);
1238 if (ret)
1239 goto error;
1240
1241 /* Scan channels in next package */
1242 nd->state = ncsi_dev_state_probe_channel;
1243 break;
1244 default:
1245 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1246 nd->state);
1247 }
1248
1249 return;
1250 error:
1251 netdev_err(ndp->ndev.dev,
1252 "NCSI: Failed to transmit cmd 0x%x during probe\n",
1253 nca.type);
1254 ncsi_report_link(ndp, true);
1255 }
1256
1257 static void ncsi_dev_work(struct work_struct *work)
1258 {
1259 struct ncsi_dev_priv *ndp = container_of(work,
1260 struct ncsi_dev_priv, work);
1261 struct ncsi_dev *nd = &ndp->ndev;
1262
1263 switch (nd->state & ncsi_dev_state_major) {
1264 case ncsi_dev_state_probe:
1265 ncsi_probe_channel(ndp);
1266 break;
1267 case ncsi_dev_state_suspend:
1268 ncsi_suspend_channel(ndp);
1269 break;
1270 case ncsi_dev_state_config:
1271 ncsi_configure_channel(ndp);
1272 break;
1273 default:
1274 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1275 nd->state);
1276 }
1277 }
1278
1279 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1280 {
1281 struct ncsi_channel *nc;
1282 int old_state;
1283 unsigned long flags;
1284
1285 spin_lock_irqsave(&ndp->lock, flags);
1286 nc = list_first_or_null_rcu(&ndp->channel_queue,
1287 struct ncsi_channel, link);
1288 if (!nc) {
1289 spin_unlock_irqrestore(&ndp->lock, flags);
1290 goto out;
1291 }
1292
1293 list_del_init(&nc->link);
1294 spin_unlock_irqrestore(&ndp->lock, flags);
1295
1296 spin_lock_irqsave(&nc->lock, flags);
1297 old_state = nc->state;
1298 nc->state = NCSI_CHANNEL_INVISIBLE;
1299 spin_unlock_irqrestore(&nc->lock, flags);
1300
1301 ndp->active_channel = nc;
1302 ndp->active_package = nc->package;
1303
1304 switch (old_state) {
1305 case NCSI_CHANNEL_INACTIVE:
1306 ndp->ndev.state = ncsi_dev_state_config;
1307 netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1308 nc->id);
1309 ncsi_configure_channel(ndp);
1310 break;
1311 case NCSI_CHANNEL_ACTIVE:
1312 ndp->ndev.state = ncsi_dev_state_suspend;
1313 netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1314 nc->id);
1315 ncsi_suspend_channel(ndp);
1316 break;
1317 default:
1318 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1319 old_state, nc->package->id, nc->id);
1320 ncsi_report_link(ndp, false);
1321 return -EINVAL;
1322 }
1323
1324 return 0;
1325
1326 out:
1327 ndp->active_channel = NULL;
1328 ndp->active_package = NULL;
1329 if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1330 ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1331 return ncsi_choose_active_channel(ndp);
1332 }
1333
1334 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
1335 "NCSI: No more channels to process\n");
1336 ncsi_report_link(ndp, false);
1337 return -ENODEV;
1338 }
1339
1340 #if IS_ENABLED(CONFIG_IPV6)
1341 static int ncsi_inet6addr_event(struct notifier_block *this,
1342 unsigned long event, void *data)
1343 {
1344 struct inet6_ifaddr *ifa = data;
1345 struct net_device *dev = ifa->idev->dev;
1346 struct ncsi_dev *nd = ncsi_find_dev(dev);
1347 struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1348 struct ncsi_package *np;
1349 struct ncsi_channel *nc;
1350 struct ncsi_cmd_arg nca;
1351 bool action;
1352 int ret;
1353
1354 if (!ndp || (ipv6_addr_type(&ifa->addr) &
1355 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1356 return NOTIFY_OK;
1357
1358 switch (event) {
1359 case NETDEV_UP:
1360 action = (++ndp->inet6_addr_num) == 1;
1361 nca.type = NCSI_PKT_CMD_EGMF;
1362 break;
1363 case NETDEV_DOWN:
1364 action = (--ndp->inet6_addr_num == 0);
1365 nca.type = NCSI_PKT_CMD_DGMF;
1366 break;
1367 default:
1368 return NOTIFY_OK;
1369 }
1370
1371 /* We might not have active channel or packages. The IPv6
1372 * required multicast will be enabled when active channel
1373 * or packages are chosen.
1374 */
1375 np = ndp->active_package;
1376 nc = ndp->active_channel;
1377 if (!action || !np || !nc)
1378 return NOTIFY_OK;
1379
1380 /* We needn't enable or disable it if the function isn't supported */
1381 if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1382 return NOTIFY_OK;
1383
1384 nca.ndp = ndp;
1385 nca.req_flags = 0;
1386 nca.package = np->id;
1387 nca.channel = nc->id;
1388 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1389 ret = ncsi_xmit_cmd(&nca);
1390 if (ret) {
1391 netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1392 (event == NETDEV_UP) ? "enable" : "disable", ret);
1393 return NOTIFY_DONE;
1394 }
1395
1396 return NOTIFY_OK;
1397 }
1398
1399 static struct notifier_block ncsi_inet6addr_notifier = {
1400 .notifier_call = ncsi_inet6addr_event,
1401 };
1402 #endif /* CONFIG_IPV6 */
1403
1404 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1405 {
1406 struct ncsi_dev *nd = &ndp->ndev;
1407 struct ncsi_channel *nc;
1408 struct ncsi_package *np;
1409 unsigned long flags;
1410 unsigned int n = 0;
1411
1412 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1413 NCSI_FOR_EACH_CHANNEL(np, nc) {
1414 spin_lock_irqsave(&nc->lock, flags);
1415
1416 /* Channels may be busy, mark dirty instead of
1417 * kicking if;
1418 * a) not ACTIVE (configured)
1419 * b) in the channel_queue (to be configured)
1420 * c) it's ndev is in the config state
1421 */
1422 if (nc->state != NCSI_CHANNEL_ACTIVE) {
1423 if ((ndp->ndev.state & 0xff00) ==
1424 ncsi_dev_state_config ||
1425 !list_empty(&nc->link)) {
1426 netdev_printk(KERN_DEBUG, nd->dev,
1427 "NCSI: channel %p marked dirty\n",
1428 nc);
1429 nc->reconfigure_needed = true;
1430 }
1431 spin_unlock_irqrestore(&nc->lock, flags);
1432 continue;
1433 }
1434
1435 spin_unlock_irqrestore(&nc->lock, flags);
1436
1437 ncsi_stop_channel_monitor(nc);
1438 spin_lock_irqsave(&nc->lock, flags);
1439 nc->state = NCSI_CHANNEL_INACTIVE;
1440 spin_unlock_irqrestore(&nc->lock, flags);
1441
1442 spin_lock_irqsave(&ndp->lock, flags);
1443 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1444 spin_unlock_irqrestore(&ndp->lock, flags);
1445
1446 netdev_printk(KERN_DEBUG, nd->dev,
1447 "NCSI: kicked channel %p\n", nc);
1448 n++;
1449 }
1450 }
1451
1452 return n;
1453 }
1454
1455 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1456 {
1457 struct ncsi_dev_priv *ndp;
1458 unsigned int n_vids = 0;
1459 struct vlan_vid *vlan;
1460 struct ncsi_dev *nd;
1461 bool found = false;
1462
1463 if (vid == 0)
1464 return 0;
1465
1466 nd = ncsi_find_dev(dev);
1467 if (!nd) {
1468 netdev_warn(dev, "NCSI: No net_device?\n");
1469 return 0;
1470 }
1471
1472 ndp = TO_NCSI_DEV_PRIV(nd);
1473
1474 /* Add the VLAN id to our internal list */
1475 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1476 n_vids++;
1477 if (vlan->vid == vid) {
1478 netdev_printk(KERN_DEBUG, dev,
1479 "NCSI: vid %u already registered\n", vid);
1480 return 0;
1481 }
1482 }
1483 if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1484 netdev_warn(dev,
1485 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1486 vid, NCSI_MAX_VLAN_VIDS);
1487 return -ENOSPC;
1488 }
1489
1490 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1491 if (!vlan)
1492 return -ENOMEM;
1493
1494 vlan->proto = proto;
1495 vlan->vid = vid;
1496 list_add_rcu(&vlan->list, &ndp->vlan_vids);
1497
1498 netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid);
1499
1500 found = ncsi_kick_channels(ndp) != 0;
1501
1502 return found ? ncsi_process_next_channel(ndp) : 0;
1503 }
1504 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1505
1506 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1507 {
1508 struct vlan_vid *vlan, *tmp;
1509 struct ncsi_dev_priv *ndp;
1510 struct ncsi_dev *nd;
1511 bool found = false;
1512
1513 if (vid == 0)
1514 return 0;
1515
1516 nd = ncsi_find_dev(dev);
1517 if (!nd) {
1518 netdev_warn(dev, "NCSI: no net_device?\n");
1519 return 0;
1520 }
1521
1522 ndp = TO_NCSI_DEV_PRIV(nd);
1523
1524 /* Remove the VLAN id from our internal list */
1525 list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1526 if (vlan->vid == vid) {
1527 netdev_printk(KERN_DEBUG, dev,
1528 "NCSI: vid %u found, removing\n", vid);
1529 list_del_rcu(&vlan->list);
1530 found = true;
1531 kfree(vlan);
1532 }
1533
1534 if (!found) {
1535 netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1536 return -EINVAL;
1537 }
1538
1539 found = ncsi_kick_channels(ndp) != 0;
1540
1541 return found ? ncsi_process_next_channel(ndp) : 0;
1542 }
1543 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1544
1545 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1546 void (*handler)(struct ncsi_dev *ndev))
1547 {
1548 struct ncsi_dev_priv *ndp;
1549 struct ncsi_dev *nd;
1550 unsigned long flags;
1551 int i;
1552
1553 /* Check if the device has been registered or not */
1554 nd = ncsi_find_dev(dev);
1555 if (nd)
1556 return nd;
1557
1558 /* Create NCSI device */
1559 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1560 if (!ndp)
1561 return NULL;
1562
1563 nd = &ndp->ndev;
1564 nd->state = ncsi_dev_state_registered;
1565 nd->dev = dev;
1566 nd->handler = handler;
1567 ndp->pending_req_num = 0;
1568 INIT_LIST_HEAD(&ndp->channel_queue);
1569 INIT_LIST_HEAD(&ndp->vlan_vids);
1570 INIT_WORK(&ndp->work, ncsi_dev_work);
1571
1572 /* Initialize private NCSI device */
1573 spin_lock_init(&ndp->lock);
1574 INIT_LIST_HEAD(&ndp->packages);
1575 ndp->request_id = NCSI_REQ_START_IDX;
1576 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1577 ndp->requests[i].id = i;
1578 ndp->requests[i].ndp = ndp;
1579 timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1580 }
1581
1582 spin_lock_irqsave(&ncsi_dev_lock, flags);
1583 #if IS_ENABLED(CONFIG_IPV6)
1584 ndp->inet6_addr_num = 0;
1585 if (list_empty(&ncsi_dev_list))
1586 register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1587 #endif
1588 list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1589 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1590
1591 /* Register NCSI packet Rx handler */
1592 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1593 ndp->ptype.func = ncsi_rcv_rsp;
1594 ndp->ptype.dev = dev;
1595 dev_add_pack(&ndp->ptype);
1596
1597 return nd;
1598 }
1599 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1600
1601 int ncsi_start_dev(struct ncsi_dev *nd)
1602 {
1603 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1604 int ret;
1605
1606 if (nd->state != ncsi_dev_state_registered &&
1607 nd->state != ncsi_dev_state_functional)
1608 return -ENOTTY;
1609
1610 if (!(ndp->flags & NCSI_DEV_PROBED)) {
1611 nd->state = ncsi_dev_state_probe;
1612 schedule_work(&ndp->work);
1613 return 0;
1614 }
1615
1616 if (ndp->flags & NCSI_DEV_HWA) {
1617 netdev_info(ndp->ndev.dev, "NCSI: Enabling HWA mode\n");
1618 ret = ncsi_enable_hwa(ndp);
1619 } else {
1620 ret = ncsi_choose_active_channel(ndp);
1621 }
1622
1623 return ret;
1624 }
1625 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1626
1627 void ncsi_stop_dev(struct ncsi_dev *nd)
1628 {
1629 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1630 struct ncsi_package *np;
1631 struct ncsi_channel *nc;
1632 bool chained;
1633 int old_state;
1634 unsigned long flags;
1635
1636 /* Stop the channel monitor and reset channel's state */
1637 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1638 NCSI_FOR_EACH_CHANNEL(np, nc) {
1639 ncsi_stop_channel_monitor(nc);
1640
1641 spin_lock_irqsave(&nc->lock, flags);
1642 chained = !list_empty(&nc->link);
1643 old_state = nc->state;
1644 nc->state = NCSI_CHANNEL_INACTIVE;
1645 spin_unlock_irqrestore(&nc->lock, flags);
1646
1647 WARN_ON_ONCE(chained ||
1648 old_state == NCSI_CHANNEL_INVISIBLE);
1649 }
1650 }
1651
1652 netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n");
1653 ncsi_report_link(ndp, true);
1654 }
1655 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1656
1657 void ncsi_unregister_dev(struct ncsi_dev *nd)
1658 {
1659 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1660 struct ncsi_package *np, *tmp;
1661 unsigned long flags;
1662
1663 dev_remove_pack(&ndp->ptype);
1664
1665 list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1666 ncsi_remove_package(np);
1667
1668 spin_lock_irqsave(&ncsi_dev_lock, flags);
1669 list_del_rcu(&ndp->node);
1670 #if IS_ENABLED(CONFIG_IPV6)
1671 if (list_empty(&ncsi_dev_list))
1672 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1673 #endif
1674 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1675
1676 kfree(ndp);
1677 }
1678 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);