]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ncsi/ncsi-manage.c
mmc: core: prepend 0x to OCR entry in sysfs
[mirror_ubuntu-bionic-kernel.git] / net / ncsi / ncsi-manage.c
1 /*
2 * Copyright Gavin Shan, IBM Corporation 2016.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
16
17 #include <net/ncsi.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/addrconf.h>
21 #include <net/ipv6.h>
22 #include <net/if_inet6.h>
23
24 #include "internal.h"
25 #include "ncsi-pkt.h"
26
27 LIST_HEAD(ncsi_dev_list);
28 DEFINE_SPINLOCK(ncsi_dev_lock);
29
30 static inline int ncsi_filter_size(int table)
31 {
32 int sizes[] = { 2, 6, 6, 6 };
33
34 BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
35 if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
36 return -EINVAL;
37
38 return sizes[table];
39 }
40
41 static u32 *ncsi_get_filter(struct ncsi_channel *nc, int table, int index)
42 {
43 struct ncsi_channel_filter *ncf;
44 int size;
45
46 ncf = nc->filters[table];
47 if (!ncf)
48 return NULL;
49
50 size = ncsi_filter_size(table);
51 if (size < 0)
52 return NULL;
53
54 return ncf->data + size * index;
55 }
56
57 /* Find the first active filter in a filter table that matches the given
58 * data parameter. If data is NULL, this returns the first active filter.
59 */
60 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
61 {
62 struct ncsi_channel_filter *ncf;
63 void *bitmap;
64 int index, size;
65 unsigned long flags;
66
67 ncf = nc->filters[table];
68 if (!ncf)
69 return -ENXIO;
70
71 size = ncsi_filter_size(table);
72 if (size < 0)
73 return size;
74
75 spin_lock_irqsave(&nc->lock, flags);
76 bitmap = (void *)&ncf->bitmap;
77 index = -1;
78 while ((index = find_next_bit(bitmap, ncf->total, index + 1))
79 < ncf->total) {
80 if (!data || !memcmp(ncf->data + size * index, data, size)) {
81 spin_unlock_irqrestore(&nc->lock, flags);
82 return index;
83 }
84 }
85 spin_unlock_irqrestore(&nc->lock, flags);
86
87 return -ENOENT;
88 }
89
90 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
91 {
92 struct ncsi_channel_filter *ncf;
93 int index, size;
94 void *bitmap;
95 unsigned long flags;
96
97 size = ncsi_filter_size(table);
98 if (size < 0)
99 return size;
100
101 index = ncsi_find_filter(nc, table, data);
102 if (index >= 0)
103 return index;
104
105 ncf = nc->filters[table];
106 if (!ncf)
107 return -ENODEV;
108
109 spin_lock_irqsave(&nc->lock, flags);
110 bitmap = (void *)&ncf->bitmap;
111 do {
112 index = find_next_zero_bit(bitmap, ncf->total, 0);
113 if (index >= ncf->total) {
114 spin_unlock_irqrestore(&nc->lock, flags);
115 return -ENOSPC;
116 }
117 } while (test_and_set_bit(index, bitmap));
118
119 memcpy(ncf->data + size * index, data, size);
120 spin_unlock_irqrestore(&nc->lock, flags);
121
122 return index;
123 }
124
125 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
126 {
127 struct ncsi_channel_filter *ncf;
128 int size;
129 void *bitmap;
130 unsigned long flags;
131
132 size = ncsi_filter_size(table);
133 if (size < 0)
134 return size;
135
136 ncf = nc->filters[table];
137 if (!ncf || index >= ncf->total)
138 return -ENODEV;
139
140 spin_lock_irqsave(&nc->lock, flags);
141 bitmap = (void *)&ncf->bitmap;
142 if (test_and_clear_bit(index, bitmap))
143 memset(ncf->data + size * index, 0, size);
144 spin_unlock_irqrestore(&nc->lock, flags);
145
146 return 0;
147 }
148
149 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
150 {
151 struct ncsi_dev *nd = &ndp->ndev;
152 struct ncsi_package *np;
153 struct ncsi_channel *nc;
154 unsigned long flags;
155
156 nd->state = ncsi_dev_state_functional;
157 if (force_down) {
158 nd->link_up = 0;
159 goto report;
160 }
161
162 nd->link_up = 0;
163 NCSI_FOR_EACH_PACKAGE(ndp, np) {
164 NCSI_FOR_EACH_CHANNEL(np, nc) {
165 spin_lock_irqsave(&nc->lock, flags);
166
167 if (!list_empty(&nc->link) ||
168 nc->state != NCSI_CHANNEL_ACTIVE) {
169 spin_unlock_irqrestore(&nc->lock, flags);
170 continue;
171 }
172
173 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
174 spin_unlock_irqrestore(&nc->lock, flags);
175 nd->link_up = 1;
176 goto report;
177 }
178
179 spin_unlock_irqrestore(&nc->lock, flags);
180 }
181 }
182
183 report:
184 nd->handler(nd);
185 }
186
187 static void ncsi_channel_monitor(unsigned long data)
188 {
189 struct ncsi_channel *nc = (struct ncsi_channel *)data;
190 struct ncsi_package *np = nc->package;
191 struct ncsi_dev_priv *ndp = np->ndp;
192 struct ncsi_channel_mode *ncm;
193 struct ncsi_cmd_arg nca;
194 bool enabled, chained;
195 unsigned int monitor_state;
196 unsigned long flags;
197 int state, ret;
198
199 spin_lock_irqsave(&nc->lock, flags);
200 state = nc->state;
201 chained = !list_empty(&nc->link);
202 enabled = nc->monitor.enabled;
203 monitor_state = nc->monitor.state;
204 spin_unlock_irqrestore(&nc->lock, flags);
205
206 if (!enabled || chained) {
207 ncsi_stop_channel_monitor(nc);
208 return;
209 }
210 if (state != NCSI_CHANNEL_INACTIVE &&
211 state != NCSI_CHANNEL_ACTIVE) {
212 ncsi_stop_channel_monitor(nc);
213 return;
214 }
215
216 switch (monitor_state) {
217 case NCSI_CHANNEL_MONITOR_START:
218 case NCSI_CHANNEL_MONITOR_RETRY:
219 nca.ndp = ndp;
220 nca.package = np->id;
221 nca.channel = nc->id;
222 nca.type = NCSI_PKT_CMD_GLS;
223 nca.req_flags = 0;
224 ret = ncsi_xmit_cmd(&nca);
225 if (ret)
226 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
227 ret);
228 break;
229 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
230 break;
231 default:
232 netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
233 nc->id);
234 if (!(ndp->flags & NCSI_DEV_HWA)) {
235 ncsi_report_link(ndp, true);
236 ndp->flags |= NCSI_DEV_RESHUFFLE;
237 }
238
239 ncsi_stop_channel_monitor(nc);
240
241 ncm = &nc->modes[NCSI_MODE_LINK];
242 spin_lock_irqsave(&nc->lock, flags);
243 nc->state = NCSI_CHANNEL_INVISIBLE;
244 ncm->data[2] &= ~0x1;
245 spin_unlock_irqrestore(&nc->lock, flags);
246
247 spin_lock_irqsave(&ndp->lock, flags);
248 nc->state = NCSI_CHANNEL_ACTIVE;
249 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
250 spin_unlock_irqrestore(&ndp->lock, flags);
251 ncsi_process_next_channel(ndp);
252 return;
253 }
254
255 spin_lock_irqsave(&nc->lock, flags);
256 nc->monitor.state++;
257 spin_unlock_irqrestore(&nc->lock, flags);
258 mod_timer(&nc->monitor.timer, jiffies + HZ);
259 }
260
261 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
262 {
263 unsigned long flags;
264
265 spin_lock_irqsave(&nc->lock, flags);
266 WARN_ON_ONCE(nc->monitor.enabled);
267 nc->monitor.enabled = true;
268 nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
269 spin_unlock_irqrestore(&nc->lock, flags);
270
271 mod_timer(&nc->monitor.timer, jiffies + HZ);
272 }
273
274 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
275 {
276 unsigned long flags;
277
278 spin_lock_irqsave(&nc->lock, flags);
279 if (!nc->monitor.enabled) {
280 spin_unlock_irqrestore(&nc->lock, flags);
281 return;
282 }
283 nc->monitor.enabled = false;
284 spin_unlock_irqrestore(&nc->lock, flags);
285
286 del_timer_sync(&nc->monitor.timer);
287 }
288
289 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
290 unsigned char id)
291 {
292 struct ncsi_channel *nc;
293
294 NCSI_FOR_EACH_CHANNEL(np, nc) {
295 if (nc->id == id)
296 return nc;
297 }
298
299 return NULL;
300 }
301
302 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
303 {
304 struct ncsi_channel *nc, *tmp;
305 int index;
306 unsigned long flags;
307
308 nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
309 if (!nc)
310 return NULL;
311
312 nc->id = id;
313 nc->package = np;
314 nc->state = NCSI_CHANNEL_INACTIVE;
315 nc->monitor.enabled = false;
316 setup_timer(&nc->monitor.timer,
317 ncsi_channel_monitor, (unsigned long)nc);
318 spin_lock_init(&nc->lock);
319 INIT_LIST_HEAD(&nc->link);
320 for (index = 0; index < NCSI_CAP_MAX; index++)
321 nc->caps[index].index = index;
322 for (index = 0; index < NCSI_MODE_MAX; index++)
323 nc->modes[index].index = index;
324
325 spin_lock_irqsave(&np->lock, flags);
326 tmp = ncsi_find_channel(np, id);
327 if (tmp) {
328 spin_unlock_irqrestore(&np->lock, flags);
329 kfree(nc);
330 return tmp;
331 }
332
333 list_add_tail_rcu(&nc->node, &np->channels);
334 np->channel_num++;
335 spin_unlock_irqrestore(&np->lock, flags);
336
337 return nc;
338 }
339
340 static void ncsi_remove_channel(struct ncsi_channel *nc)
341 {
342 struct ncsi_package *np = nc->package;
343 struct ncsi_channel_filter *ncf;
344 unsigned long flags;
345 int i;
346
347 /* Release filters */
348 spin_lock_irqsave(&nc->lock, flags);
349 for (i = 0; i < NCSI_FILTER_MAX; i++) {
350 ncf = nc->filters[i];
351 if (!ncf)
352 continue;
353
354 nc->filters[i] = NULL;
355 kfree(ncf);
356 }
357
358 nc->state = NCSI_CHANNEL_INACTIVE;
359 spin_unlock_irqrestore(&nc->lock, flags);
360 ncsi_stop_channel_monitor(nc);
361
362 /* Remove and free channel */
363 spin_lock_irqsave(&np->lock, flags);
364 list_del_rcu(&nc->node);
365 np->channel_num--;
366 spin_unlock_irqrestore(&np->lock, flags);
367
368 kfree(nc);
369 }
370
371 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
372 unsigned char id)
373 {
374 struct ncsi_package *np;
375
376 NCSI_FOR_EACH_PACKAGE(ndp, np) {
377 if (np->id == id)
378 return np;
379 }
380
381 return NULL;
382 }
383
384 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
385 unsigned char id)
386 {
387 struct ncsi_package *np, *tmp;
388 unsigned long flags;
389
390 np = kzalloc(sizeof(*np), GFP_ATOMIC);
391 if (!np)
392 return NULL;
393
394 np->id = id;
395 np->ndp = ndp;
396 spin_lock_init(&np->lock);
397 INIT_LIST_HEAD(&np->channels);
398
399 spin_lock_irqsave(&ndp->lock, flags);
400 tmp = ncsi_find_package(ndp, id);
401 if (tmp) {
402 spin_unlock_irqrestore(&ndp->lock, flags);
403 kfree(np);
404 return tmp;
405 }
406
407 list_add_tail_rcu(&np->node, &ndp->packages);
408 ndp->package_num++;
409 spin_unlock_irqrestore(&ndp->lock, flags);
410
411 return np;
412 }
413
414 void ncsi_remove_package(struct ncsi_package *np)
415 {
416 struct ncsi_dev_priv *ndp = np->ndp;
417 struct ncsi_channel *nc, *tmp;
418 unsigned long flags;
419
420 /* Release all child channels */
421 list_for_each_entry_safe(nc, tmp, &np->channels, node)
422 ncsi_remove_channel(nc);
423
424 /* Remove and free package */
425 spin_lock_irqsave(&ndp->lock, flags);
426 list_del_rcu(&np->node);
427 ndp->package_num--;
428 spin_unlock_irqrestore(&ndp->lock, flags);
429
430 kfree(np);
431 }
432
433 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
434 unsigned char id,
435 struct ncsi_package **np,
436 struct ncsi_channel **nc)
437 {
438 struct ncsi_package *p;
439 struct ncsi_channel *c;
440
441 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
442 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
443
444 if (np)
445 *np = p;
446 if (nc)
447 *nc = c;
448 }
449
450 /* For two consecutive NCSI commands, the packet IDs shouldn't
451 * be same. Otherwise, the bogus response might be replied. So
452 * the available IDs are allocated in round-robin fashion.
453 */
454 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
455 unsigned int req_flags)
456 {
457 struct ncsi_request *nr = NULL;
458 int i, limit = ARRAY_SIZE(ndp->requests);
459 unsigned long flags;
460
461 /* Check if there is one available request until the ceiling */
462 spin_lock_irqsave(&ndp->lock, flags);
463 for (i = ndp->request_id; i < limit; i++) {
464 if (ndp->requests[i].used)
465 continue;
466
467 nr = &ndp->requests[i];
468 nr->used = true;
469 nr->flags = req_flags;
470 ndp->request_id = i + 1;
471 goto found;
472 }
473
474 /* Fail back to check from the starting cursor */
475 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
476 if (ndp->requests[i].used)
477 continue;
478
479 nr = &ndp->requests[i];
480 nr->used = true;
481 nr->flags = req_flags;
482 ndp->request_id = i + 1;
483 goto found;
484 }
485
486 found:
487 spin_unlock_irqrestore(&ndp->lock, flags);
488 return nr;
489 }
490
491 void ncsi_free_request(struct ncsi_request *nr)
492 {
493 struct ncsi_dev_priv *ndp = nr->ndp;
494 struct sk_buff *cmd, *rsp;
495 unsigned long flags;
496 bool driven;
497
498 if (nr->enabled) {
499 nr->enabled = false;
500 del_timer_sync(&nr->timer);
501 }
502
503 spin_lock_irqsave(&ndp->lock, flags);
504 cmd = nr->cmd;
505 rsp = nr->rsp;
506 nr->cmd = NULL;
507 nr->rsp = NULL;
508 nr->used = false;
509 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
510 spin_unlock_irqrestore(&ndp->lock, flags);
511
512 if (driven && cmd && --ndp->pending_req_num == 0)
513 schedule_work(&ndp->work);
514
515 /* Release command and response */
516 consume_skb(cmd);
517 consume_skb(rsp);
518 }
519
520 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
521 {
522 struct ncsi_dev_priv *ndp;
523
524 NCSI_FOR_EACH_DEV(ndp) {
525 if (ndp->ndev.dev == dev)
526 return &ndp->ndev;
527 }
528
529 return NULL;
530 }
531
532 static void ncsi_request_timeout(unsigned long data)
533 {
534 struct ncsi_request *nr = (struct ncsi_request *)data;
535 struct ncsi_dev_priv *ndp = nr->ndp;
536 unsigned long flags;
537
538 /* If the request already had associated response,
539 * let the response handler to release it.
540 */
541 spin_lock_irqsave(&ndp->lock, flags);
542 nr->enabled = false;
543 if (nr->rsp || !nr->cmd) {
544 spin_unlock_irqrestore(&ndp->lock, flags);
545 return;
546 }
547 spin_unlock_irqrestore(&ndp->lock, flags);
548
549 /* Release the request */
550 ncsi_free_request(nr);
551 }
552
553 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
554 {
555 struct ncsi_dev *nd = &ndp->ndev;
556 struct ncsi_package *np = ndp->active_package;
557 struct ncsi_channel *nc = ndp->active_channel;
558 struct ncsi_cmd_arg nca;
559 unsigned long flags;
560 int ret;
561
562 nca.ndp = ndp;
563 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
564 switch (nd->state) {
565 case ncsi_dev_state_suspend:
566 nd->state = ncsi_dev_state_suspend_select;
567 /* Fall through */
568 case ncsi_dev_state_suspend_select:
569 ndp->pending_req_num = 1;
570
571 nca.type = NCSI_PKT_CMD_SP;
572 nca.package = np->id;
573 nca.channel = NCSI_RESERVED_CHANNEL;
574 if (ndp->flags & NCSI_DEV_HWA)
575 nca.bytes[0] = 0;
576 else
577 nca.bytes[0] = 1;
578
579 /* To retrieve the last link states of channels in current
580 * package when current active channel needs fail over to
581 * another one. It means we will possibly select another
582 * channel as next active one. The link states of channels
583 * are most important factor of the selection. So we need
584 * accurate link states. Unfortunately, the link states on
585 * inactive channels can't be updated with LSC AEN in time.
586 */
587 if (ndp->flags & NCSI_DEV_RESHUFFLE)
588 nd->state = ncsi_dev_state_suspend_gls;
589 else
590 nd->state = ncsi_dev_state_suspend_dcnt;
591 ret = ncsi_xmit_cmd(&nca);
592 if (ret)
593 goto error;
594
595 break;
596 case ncsi_dev_state_suspend_gls:
597 ndp->pending_req_num = np->channel_num;
598
599 nca.type = NCSI_PKT_CMD_GLS;
600 nca.package = np->id;
601
602 nd->state = ncsi_dev_state_suspend_dcnt;
603 NCSI_FOR_EACH_CHANNEL(np, nc) {
604 nca.channel = nc->id;
605 ret = ncsi_xmit_cmd(&nca);
606 if (ret)
607 goto error;
608 }
609
610 break;
611 case ncsi_dev_state_suspend_dcnt:
612 ndp->pending_req_num = 1;
613
614 nca.type = NCSI_PKT_CMD_DCNT;
615 nca.package = np->id;
616 nca.channel = nc->id;
617
618 nd->state = ncsi_dev_state_suspend_dc;
619 ret = ncsi_xmit_cmd(&nca);
620 if (ret)
621 goto error;
622
623 break;
624 case ncsi_dev_state_suspend_dc:
625 ndp->pending_req_num = 1;
626
627 nca.type = NCSI_PKT_CMD_DC;
628 nca.package = np->id;
629 nca.channel = nc->id;
630 nca.bytes[0] = 1;
631
632 nd->state = ncsi_dev_state_suspend_deselect;
633 ret = ncsi_xmit_cmd(&nca);
634 if (ret)
635 goto error;
636
637 break;
638 case ncsi_dev_state_suspend_deselect:
639 ndp->pending_req_num = 1;
640
641 nca.type = NCSI_PKT_CMD_DP;
642 nca.package = np->id;
643 nca.channel = NCSI_RESERVED_CHANNEL;
644
645 nd->state = ncsi_dev_state_suspend_done;
646 ret = ncsi_xmit_cmd(&nca);
647 if (ret)
648 goto error;
649
650 break;
651 case ncsi_dev_state_suspend_done:
652 spin_lock_irqsave(&nc->lock, flags);
653 nc->state = NCSI_CHANNEL_INACTIVE;
654 spin_unlock_irqrestore(&nc->lock, flags);
655 ncsi_process_next_channel(ndp);
656
657 break;
658 default:
659 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
660 nd->state);
661 }
662
663 return;
664 error:
665 nd->state = ncsi_dev_state_functional;
666 }
667
668 /* Check the VLAN filter bitmap for a set filter, and construct a
669 * "Set VLAN Filter - Disable" packet if found.
670 */
671 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
672 struct ncsi_cmd_arg *nca)
673 {
674 int index;
675 u32 *data;
676 u16 vid;
677
678 index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, NULL);
679 if (index < 0) {
680 /* Filter table empty */
681 return -1;
682 }
683
684 data = ncsi_get_filter(nc, NCSI_FILTER_VLAN, index);
685 if (!data) {
686 netdev_err(ndp->ndev.dev,
687 "NCSI: failed to retrieve filter %d\n", index);
688 /* Set the VLAN id to 0 - this will still disable the entry in
689 * the filter table, but we won't know what it was.
690 */
691 vid = 0;
692 } else {
693 vid = *(u16 *)data;
694 }
695
696 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
697 "NCSI: removed vlan tag %u at index %d\n",
698 vid, index + 1);
699 ncsi_remove_filter(nc, NCSI_FILTER_VLAN, index);
700
701 nca->type = NCSI_PKT_CMD_SVF;
702 nca->words[1] = vid;
703 /* HW filter index starts at 1 */
704 nca->bytes[6] = index + 1;
705 nca->bytes[7] = 0x00;
706 return 0;
707 }
708
709 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
710 * packet.
711 */
712 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
713 struct ncsi_cmd_arg *nca)
714 {
715 struct vlan_vid *vlan = NULL;
716 int index = 0;
717
718 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
719 index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
720 if (index < 0) {
721 /* New tag to add */
722 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
723 "NCSI: new vlan id to set: %u\n",
724 vlan->vid);
725 break;
726 }
727 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
728 "vid %u already at filter pos %d\n",
729 vlan->vid, index);
730 }
731
732 if (!vlan || index >= 0) {
733 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
734 "no vlan ids left to set\n");
735 return -1;
736 }
737
738 index = ncsi_add_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
739 if (index < 0) {
740 netdev_err(ndp->ndev.dev,
741 "Failed to add new VLAN tag, error %d\n", index);
742 if (index == -ENOSPC)
743 netdev_err(ndp->ndev.dev,
744 "Channel %u already has all VLAN filters set\n",
745 nc->id);
746 return -1;
747 }
748
749 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
750 "NCSI: set vid %u in packet, index %u\n",
751 vlan->vid, index + 1);
752 nca->type = NCSI_PKT_CMD_SVF;
753 nca->words[1] = vlan->vid;
754 /* HW filter index starts at 1 */
755 nca->bytes[6] = index + 1;
756 nca->bytes[7] = 0x01;
757
758 return 0;
759 }
760
761 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
762 {
763 struct ncsi_dev *nd = &ndp->ndev;
764 struct net_device *dev = nd->dev;
765 struct ncsi_package *np = ndp->active_package;
766 struct ncsi_channel *nc = ndp->active_channel;
767 struct ncsi_channel *hot_nc = NULL;
768 struct ncsi_cmd_arg nca;
769 unsigned char index;
770 unsigned long flags;
771 int ret;
772
773 nca.ndp = ndp;
774 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
775 switch (nd->state) {
776 case ncsi_dev_state_config:
777 case ncsi_dev_state_config_sp:
778 ndp->pending_req_num = 1;
779
780 /* Select the specific package */
781 nca.type = NCSI_PKT_CMD_SP;
782 if (ndp->flags & NCSI_DEV_HWA)
783 nca.bytes[0] = 0;
784 else
785 nca.bytes[0] = 1;
786 nca.package = np->id;
787 nca.channel = NCSI_RESERVED_CHANNEL;
788 ret = ncsi_xmit_cmd(&nca);
789 if (ret) {
790 netdev_err(ndp->ndev.dev,
791 "NCSI: Failed to transmit CMD_SP\n");
792 goto error;
793 }
794
795 nd->state = ncsi_dev_state_config_cis;
796 break;
797 case ncsi_dev_state_config_cis:
798 ndp->pending_req_num = 1;
799
800 /* Clear initial state */
801 nca.type = NCSI_PKT_CMD_CIS;
802 nca.package = np->id;
803 nca.channel = nc->id;
804 ret = ncsi_xmit_cmd(&nca);
805 if (ret) {
806 netdev_err(ndp->ndev.dev,
807 "NCSI: Failed to transmit CMD_CIS\n");
808 goto error;
809 }
810
811 nd->state = ncsi_dev_state_config_clear_vids;
812 break;
813 case ncsi_dev_state_config_clear_vids:
814 case ncsi_dev_state_config_svf:
815 case ncsi_dev_state_config_ev:
816 case ncsi_dev_state_config_sma:
817 case ncsi_dev_state_config_ebf:
818 #if IS_ENABLED(CONFIG_IPV6)
819 case ncsi_dev_state_config_egmf:
820 #endif
821 case ncsi_dev_state_config_ecnt:
822 case ncsi_dev_state_config_ec:
823 case ncsi_dev_state_config_ae:
824 case ncsi_dev_state_config_gls:
825 ndp->pending_req_num = 1;
826
827 nca.package = np->id;
828 nca.channel = nc->id;
829
830 /* Clear any active filters on the channel before setting */
831 if (nd->state == ncsi_dev_state_config_clear_vids) {
832 ret = clear_one_vid(ndp, nc, &nca);
833 if (ret) {
834 nd->state = ncsi_dev_state_config_svf;
835 schedule_work(&ndp->work);
836 break;
837 }
838 /* Repeat */
839 nd->state = ncsi_dev_state_config_clear_vids;
840 /* Add known VLAN tags to the filter */
841 } else if (nd->state == ncsi_dev_state_config_svf) {
842 ret = set_one_vid(ndp, nc, &nca);
843 if (ret) {
844 nd->state = ncsi_dev_state_config_ev;
845 schedule_work(&ndp->work);
846 break;
847 }
848 /* Repeat */
849 nd->state = ncsi_dev_state_config_svf;
850 /* Enable/Disable the VLAN filter */
851 } else if (nd->state == ncsi_dev_state_config_ev) {
852 if (list_empty(&ndp->vlan_vids)) {
853 nca.type = NCSI_PKT_CMD_DV;
854 } else {
855 nca.type = NCSI_PKT_CMD_EV;
856 nca.bytes[3] = NCSI_CAP_VLAN_NO;
857 }
858 nd->state = ncsi_dev_state_config_sma;
859 } else if (nd->state == ncsi_dev_state_config_sma) {
860 /* Use first entry in unicast filter table. Note that
861 * the MAC filter table starts from entry 1 instead of
862 * 0.
863 */
864 nca.type = NCSI_PKT_CMD_SMA;
865 for (index = 0; index < 6; index++)
866 nca.bytes[index] = dev->dev_addr[index];
867 nca.bytes[6] = 0x1;
868 nca.bytes[7] = 0x1;
869 nd->state = ncsi_dev_state_config_ebf;
870 } else if (nd->state == ncsi_dev_state_config_ebf) {
871 nca.type = NCSI_PKT_CMD_EBF;
872 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
873 nd->state = ncsi_dev_state_config_ecnt;
874 #if IS_ENABLED(CONFIG_IPV6)
875 if (ndp->inet6_addr_num > 0 &&
876 (nc->caps[NCSI_CAP_GENERIC].cap &
877 NCSI_CAP_GENERIC_MC))
878 nd->state = ncsi_dev_state_config_egmf;
879 else
880 nd->state = ncsi_dev_state_config_ecnt;
881 } else if (nd->state == ncsi_dev_state_config_egmf) {
882 nca.type = NCSI_PKT_CMD_EGMF;
883 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
884 nd->state = ncsi_dev_state_config_ecnt;
885 #endif /* CONFIG_IPV6 */
886 } else if (nd->state == ncsi_dev_state_config_ecnt) {
887 nca.type = NCSI_PKT_CMD_ECNT;
888 nd->state = ncsi_dev_state_config_ec;
889 } else if (nd->state == ncsi_dev_state_config_ec) {
890 /* Enable AEN if it's supported */
891 nca.type = NCSI_PKT_CMD_EC;
892 nd->state = ncsi_dev_state_config_ae;
893 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
894 nd->state = ncsi_dev_state_config_gls;
895 } else if (nd->state == ncsi_dev_state_config_ae) {
896 nca.type = NCSI_PKT_CMD_AE;
897 nca.bytes[0] = 0;
898 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
899 nd->state = ncsi_dev_state_config_gls;
900 } else if (nd->state == ncsi_dev_state_config_gls) {
901 nca.type = NCSI_PKT_CMD_GLS;
902 nd->state = ncsi_dev_state_config_done;
903 }
904
905 ret = ncsi_xmit_cmd(&nca);
906 if (ret) {
907 netdev_err(ndp->ndev.dev,
908 "NCSI: Failed to transmit CMD %x\n",
909 nca.type);
910 goto error;
911 }
912 break;
913 case ncsi_dev_state_config_done:
914 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
915 "NCSI: channel %u config done\n", nc->id);
916 spin_lock_irqsave(&nc->lock, flags);
917 if (nc->reconfigure_needed) {
918 /* This channel's configuration has been updated
919 * part-way during the config state - start the
920 * channel configuration over
921 */
922 nc->reconfigure_needed = false;
923 nc->state = NCSI_CHANNEL_INACTIVE;
924 spin_unlock_irqrestore(&nc->lock, flags);
925
926 spin_lock_irqsave(&ndp->lock, flags);
927 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
928 spin_unlock_irqrestore(&ndp->lock, flags);
929
930 netdev_printk(KERN_DEBUG, dev,
931 "Dirty NCSI channel state reset\n");
932 ncsi_process_next_channel(ndp);
933 break;
934 }
935
936 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
937 hot_nc = nc;
938 nc->state = NCSI_CHANNEL_ACTIVE;
939 } else {
940 hot_nc = NULL;
941 nc->state = NCSI_CHANNEL_INACTIVE;
942 netdev_warn(ndp->ndev.dev,
943 "NCSI: channel %u link down after config\n",
944 nc->id);
945 }
946 spin_unlock_irqrestore(&nc->lock, flags);
947
948 /* Update the hot channel */
949 spin_lock_irqsave(&ndp->lock, flags);
950 ndp->hot_channel = hot_nc;
951 spin_unlock_irqrestore(&ndp->lock, flags);
952
953 ncsi_start_channel_monitor(nc);
954 ncsi_process_next_channel(ndp);
955 break;
956 default:
957 netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
958 nd->state);
959 }
960
961 return;
962
963 error:
964 ncsi_report_link(ndp, true);
965 }
966
967 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
968 {
969 struct ncsi_package *np;
970 struct ncsi_channel *nc, *found, *hot_nc;
971 struct ncsi_channel_mode *ncm;
972 unsigned long flags;
973
974 spin_lock_irqsave(&ndp->lock, flags);
975 hot_nc = ndp->hot_channel;
976 spin_unlock_irqrestore(&ndp->lock, flags);
977
978 /* The search is done once an inactive channel with up
979 * link is found.
980 */
981 found = NULL;
982 NCSI_FOR_EACH_PACKAGE(ndp, np) {
983 NCSI_FOR_EACH_CHANNEL(np, nc) {
984 spin_lock_irqsave(&nc->lock, flags);
985
986 if (!list_empty(&nc->link) ||
987 nc->state != NCSI_CHANNEL_INACTIVE) {
988 spin_unlock_irqrestore(&nc->lock, flags);
989 continue;
990 }
991
992 if (!found)
993 found = nc;
994
995 if (nc == hot_nc)
996 found = nc;
997
998 ncm = &nc->modes[NCSI_MODE_LINK];
999 if (ncm->data[2] & 0x1) {
1000 spin_unlock_irqrestore(&nc->lock, flags);
1001 found = nc;
1002 goto out;
1003 }
1004
1005 spin_unlock_irqrestore(&nc->lock, flags);
1006 }
1007 }
1008
1009 if (!found) {
1010 netdev_warn(ndp->ndev.dev,
1011 "NCSI: No channel found with link\n");
1012 ncsi_report_link(ndp, true);
1013 return -ENODEV;
1014 }
1015
1016 ncm = &found->modes[NCSI_MODE_LINK];
1017 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
1018 "NCSI: Channel %u added to queue (link %s)\n",
1019 found->id, ncm->data[2] & 0x1 ? "up" : "down");
1020
1021 out:
1022 spin_lock_irqsave(&ndp->lock, flags);
1023 list_add_tail_rcu(&found->link, &ndp->channel_queue);
1024 spin_unlock_irqrestore(&ndp->lock, flags);
1025
1026 return ncsi_process_next_channel(ndp);
1027 }
1028
1029 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1030 {
1031 struct ncsi_package *np;
1032 struct ncsi_channel *nc;
1033 unsigned int cap;
1034 bool has_channel = false;
1035
1036 /* The hardware arbitration is disabled if any one channel
1037 * doesn't support explicitly.
1038 */
1039 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1040 NCSI_FOR_EACH_CHANNEL(np, nc) {
1041 has_channel = true;
1042
1043 cap = nc->caps[NCSI_CAP_GENERIC].cap;
1044 if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1045 (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1046 NCSI_CAP_GENERIC_HWA_SUPPORT) {
1047 ndp->flags &= ~NCSI_DEV_HWA;
1048 return false;
1049 }
1050 }
1051 }
1052
1053 if (has_channel) {
1054 ndp->flags |= NCSI_DEV_HWA;
1055 return true;
1056 }
1057
1058 ndp->flags &= ~NCSI_DEV_HWA;
1059 return false;
1060 }
1061
1062 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
1063 {
1064 struct ncsi_package *np;
1065 struct ncsi_channel *nc;
1066 unsigned long flags;
1067
1068 /* Move all available channels to processing queue */
1069 spin_lock_irqsave(&ndp->lock, flags);
1070 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1071 NCSI_FOR_EACH_CHANNEL(np, nc) {
1072 WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
1073 !list_empty(&nc->link));
1074 ncsi_stop_channel_monitor(nc);
1075 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1076 }
1077 }
1078 spin_unlock_irqrestore(&ndp->lock, flags);
1079
1080 /* We can have no channels in extremely case */
1081 if (list_empty(&ndp->channel_queue)) {
1082 netdev_err(ndp->ndev.dev,
1083 "NCSI: No available channels for HWA\n");
1084 ncsi_report_link(ndp, false);
1085 return -ENOENT;
1086 }
1087
1088 return ncsi_process_next_channel(ndp);
1089 }
1090
1091 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1092 {
1093 struct ncsi_dev *nd = &ndp->ndev;
1094 struct ncsi_package *np;
1095 struct ncsi_channel *nc;
1096 struct ncsi_cmd_arg nca;
1097 unsigned char index;
1098 int ret;
1099
1100 nca.ndp = ndp;
1101 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1102 switch (nd->state) {
1103 case ncsi_dev_state_probe:
1104 nd->state = ncsi_dev_state_probe_deselect;
1105 /* Fall through */
1106 case ncsi_dev_state_probe_deselect:
1107 ndp->pending_req_num = 8;
1108
1109 /* Deselect all possible packages */
1110 nca.type = NCSI_PKT_CMD_DP;
1111 nca.channel = NCSI_RESERVED_CHANNEL;
1112 for (index = 0; index < 8; index++) {
1113 nca.package = index;
1114 ret = ncsi_xmit_cmd(&nca);
1115 if (ret)
1116 goto error;
1117 }
1118
1119 nd->state = ncsi_dev_state_probe_package;
1120 break;
1121 case ncsi_dev_state_probe_package:
1122 ndp->pending_req_num = 16;
1123
1124 /* Select all possible packages */
1125 nca.type = NCSI_PKT_CMD_SP;
1126 nca.bytes[0] = 1;
1127 nca.channel = NCSI_RESERVED_CHANNEL;
1128 for (index = 0; index < 8; index++) {
1129 nca.package = index;
1130 ret = ncsi_xmit_cmd(&nca);
1131 if (ret)
1132 goto error;
1133 }
1134
1135 /* Disable all possible packages */
1136 nca.type = NCSI_PKT_CMD_DP;
1137 for (index = 0; index < 8; index++) {
1138 nca.package = index;
1139 ret = ncsi_xmit_cmd(&nca);
1140 if (ret)
1141 goto error;
1142 }
1143
1144 nd->state = ncsi_dev_state_probe_channel;
1145 break;
1146 case ncsi_dev_state_probe_channel:
1147 if (!ndp->active_package)
1148 ndp->active_package = list_first_or_null_rcu(
1149 &ndp->packages, struct ncsi_package, node);
1150 else if (list_is_last(&ndp->active_package->node,
1151 &ndp->packages))
1152 ndp->active_package = NULL;
1153 else
1154 ndp->active_package = list_next_entry(
1155 ndp->active_package, node);
1156
1157 /* All available packages and channels are enumerated. The
1158 * enumeration happens for once when the NCSI interface is
1159 * started. So we need continue to start the interface after
1160 * the enumeration.
1161 *
1162 * We have to choose an active channel before configuring it.
1163 * Note that we possibly don't have active channel in extreme
1164 * situation.
1165 */
1166 if (!ndp->active_package) {
1167 ndp->flags |= NCSI_DEV_PROBED;
1168 if (ncsi_check_hwa(ndp))
1169 ncsi_enable_hwa(ndp);
1170 else
1171 ncsi_choose_active_channel(ndp);
1172 return;
1173 }
1174
1175 /* Select the active package */
1176 ndp->pending_req_num = 1;
1177 nca.type = NCSI_PKT_CMD_SP;
1178 nca.bytes[0] = 1;
1179 nca.package = ndp->active_package->id;
1180 nca.channel = NCSI_RESERVED_CHANNEL;
1181 ret = ncsi_xmit_cmd(&nca);
1182 if (ret)
1183 goto error;
1184
1185 nd->state = ncsi_dev_state_probe_cis;
1186 break;
1187 case ncsi_dev_state_probe_cis:
1188 ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1189
1190 /* Clear initial state */
1191 nca.type = NCSI_PKT_CMD_CIS;
1192 nca.package = ndp->active_package->id;
1193 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1194 nca.channel = index;
1195 ret = ncsi_xmit_cmd(&nca);
1196 if (ret)
1197 goto error;
1198 }
1199
1200 nd->state = ncsi_dev_state_probe_gvi;
1201 break;
1202 case ncsi_dev_state_probe_gvi:
1203 case ncsi_dev_state_probe_gc:
1204 case ncsi_dev_state_probe_gls:
1205 np = ndp->active_package;
1206 ndp->pending_req_num = np->channel_num;
1207
1208 /* Retrieve version, capability or link status */
1209 if (nd->state == ncsi_dev_state_probe_gvi)
1210 nca.type = NCSI_PKT_CMD_GVI;
1211 else if (nd->state == ncsi_dev_state_probe_gc)
1212 nca.type = NCSI_PKT_CMD_GC;
1213 else
1214 nca.type = NCSI_PKT_CMD_GLS;
1215
1216 nca.package = np->id;
1217 NCSI_FOR_EACH_CHANNEL(np, nc) {
1218 nca.channel = nc->id;
1219 ret = ncsi_xmit_cmd(&nca);
1220 if (ret)
1221 goto error;
1222 }
1223
1224 if (nd->state == ncsi_dev_state_probe_gvi)
1225 nd->state = ncsi_dev_state_probe_gc;
1226 else if (nd->state == ncsi_dev_state_probe_gc)
1227 nd->state = ncsi_dev_state_probe_gls;
1228 else
1229 nd->state = ncsi_dev_state_probe_dp;
1230 break;
1231 case ncsi_dev_state_probe_dp:
1232 ndp->pending_req_num = 1;
1233
1234 /* Deselect the active package */
1235 nca.type = NCSI_PKT_CMD_DP;
1236 nca.package = ndp->active_package->id;
1237 nca.channel = NCSI_RESERVED_CHANNEL;
1238 ret = ncsi_xmit_cmd(&nca);
1239 if (ret)
1240 goto error;
1241
1242 /* Scan channels in next package */
1243 nd->state = ncsi_dev_state_probe_channel;
1244 break;
1245 default:
1246 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1247 nd->state);
1248 }
1249
1250 return;
1251 error:
1252 netdev_err(ndp->ndev.dev,
1253 "NCSI: Failed to transmit cmd 0x%x during probe\n",
1254 nca.type);
1255 ncsi_report_link(ndp, true);
1256 }
1257
1258 static void ncsi_dev_work(struct work_struct *work)
1259 {
1260 struct ncsi_dev_priv *ndp = container_of(work,
1261 struct ncsi_dev_priv, work);
1262 struct ncsi_dev *nd = &ndp->ndev;
1263
1264 switch (nd->state & ncsi_dev_state_major) {
1265 case ncsi_dev_state_probe:
1266 ncsi_probe_channel(ndp);
1267 break;
1268 case ncsi_dev_state_suspend:
1269 ncsi_suspend_channel(ndp);
1270 break;
1271 case ncsi_dev_state_config:
1272 ncsi_configure_channel(ndp);
1273 break;
1274 default:
1275 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1276 nd->state);
1277 }
1278 }
1279
1280 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1281 {
1282 struct ncsi_channel *nc;
1283 int old_state;
1284 unsigned long flags;
1285
1286 spin_lock_irqsave(&ndp->lock, flags);
1287 nc = list_first_or_null_rcu(&ndp->channel_queue,
1288 struct ncsi_channel, link);
1289 if (!nc) {
1290 spin_unlock_irqrestore(&ndp->lock, flags);
1291 goto out;
1292 }
1293
1294 list_del_init(&nc->link);
1295 spin_unlock_irqrestore(&ndp->lock, flags);
1296
1297 spin_lock_irqsave(&nc->lock, flags);
1298 old_state = nc->state;
1299 nc->state = NCSI_CHANNEL_INVISIBLE;
1300 spin_unlock_irqrestore(&nc->lock, flags);
1301
1302 ndp->active_channel = nc;
1303 ndp->active_package = nc->package;
1304
1305 switch (old_state) {
1306 case NCSI_CHANNEL_INACTIVE:
1307 ndp->ndev.state = ncsi_dev_state_config;
1308 netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1309 nc->id);
1310 ncsi_configure_channel(ndp);
1311 break;
1312 case NCSI_CHANNEL_ACTIVE:
1313 ndp->ndev.state = ncsi_dev_state_suspend;
1314 netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1315 nc->id);
1316 ncsi_suspend_channel(ndp);
1317 break;
1318 default:
1319 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1320 old_state, nc->package->id, nc->id);
1321 ncsi_report_link(ndp, false);
1322 return -EINVAL;
1323 }
1324
1325 return 0;
1326
1327 out:
1328 ndp->active_channel = NULL;
1329 ndp->active_package = NULL;
1330 if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1331 ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1332 return ncsi_choose_active_channel(ndp);
1333 }
1334
1335 netdev_printk(KERN_DEBUG, ndp->ndev.dev,
1336 "NCSI: No more channels to process\n");
1337 ncsi_report_link(ndp, false);
1338 return -ENODEV;
1339 }
1340
1341 #if IS_ENABLED(CONFIG_IPV6)
1342 static int ncsi_inet6addr_event(struct notifier_block *this,
1343 unsigned long event, void *data)
1344 {
1345 struct inet6_ifaddr *ifa = data;
1346 struct net_device *dev = ifa->idev->dev;
1347 struct ncsi_dev *nd = ncsi_find_dev(dev);
1348 struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1349 struct ncsi_package *np;
1350 struct ncsi_channel *nc;
1351 struct ncsi_cmd_arg nca;
1352 bool action;
1353 int ret;
1354
1355 if (!ndp || (ipv6_addr_type(&ifa->addr) &
1356 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1357 return NOTIFY_OK;
1358
1359 switch (event) {
1360 case NETDEV_UP:
1361 action = (++ndp->inet6_addr_num) == 1;
1362 nca.type = NCSI_PKT_CMD_EGMF;
1363 break;
1364 case NETDEV_DOWN:
1365 action = (--ndp->inet6_addr_num == 0);
1366 nca.type = NCSI_PKT_CMD_DGMF;
1367 break;
1368 default:
1369 return NOTIFY_OK;
1370 }
1371
1372 /* We might not have active channel or packages. The IPv6
1373 * required multicast will be enabled when active channel
1374 * or packages are chosen.
1375 */
1376 np = ndp->active_package;
1377 nc = ndp->active_channel;
1378 if (!action || !np || !nc)
1379 return NOTIFY_OK;
1380
1381 /* We needn't enable or disable it if the function isn't supported */
1382 if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1383 return NOTIFY_OK;
1384
1385 nca.ndp = ndp;
1386 nca.req_flags = 0;
1387 nca.package = np->id;
1388 nca.channel = nc->id;
1389 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1390 ret = ncsi_xmit_cmd(&nca);
1391 if (ret) {
1392 netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1393 (event == NETDEV_UP) ? "enable" : "disable", ret);
1394 return NOTIFY_DONE;
1395 }
1396
1397 return NOTIFY_OK;
1398 }
1399
1400 static struct notifier_block ncsi_inet6addr_notifier = {
1401 .notifier_call = ncsi_inet6addr_event,
1402 };
1403 #endif /* CONFIG_IPV6 */
1404
1405 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1406 {
1407 struct ncsi_dev *nd = &ndp->ndev;
1408 struct ncsi_channel *nc;
1409 struct ncsi_package *np;
1410 unsigned long flags;
1411 unsigned int n = 0;
1412
1413 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1414 NCSI_FOR_EACH_CHANNEL(np, nc) {
1415 spin_lock_irqsave(&nc->lock, flags);
1416
1417 /* Channels may be busy, mark dirty instead of
1418 * kicking if;
1419 * a) not ACTIVE (configured)
1420 * b) in the channel_queue (to be configured)
1421 * c) it's ndev is in the config state
1422 */
1423 if (nc->state != NCSI_CHANNEL_ACTIVE) {
1424 if ((ndp->ndev.state & 0xff00) ==
1425 ncsi_dev_state_config ||
1426 !list_empty(&nc->link)) {
1427 netdev_printk(KERN_DEBUG, nd->dev,
1428 "NCSI: channel %p marked dirty\n",
1429 nc);
1430 nc->reconfigure_needed = true;
1431 }
1432 spin_unlock_irqrestore(&nc->lock, flags);
1433 continue;
1434 }
1435
1436 spin_unlock_irqrestore(&nc->lock, flags);
1437
1438 ncsi_stop_channel_monitor(nc);
1439 spin_lock_irqsave(&nc->lock, flags);
1440 nc->state = NCSI_CHANNEL_INACTIVE;
1441 spin_unlock_irqrestore(&nc->lock, flags);
1442
1443 spin_lock_irqsave(&ndp->lock, flags);
1444 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1445 spin_unlock_irqrestore(&ndp->lock, flags);
1446
1447 netdev_printk(KERN_DEBUG, nd->dev,
1448 "NCSI: kicked channel %p\n", nc);
1449 n++;
1450 }
1451 }
1452
1453 return n;
1454 }
1455
1456 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1457 {
1458 struct ncsi_dev_priv *ndp;
1459 unsigned int n_vids = 0;
1460 struct vlan_vid *vlan;
1461 struct ncsi_dev *nd;
1462 bool found = false;
1463
1464 if (vid == 0)
1465 return 0;
1466
1467 nd = ncsi_find_dev(dev);
1468 if (!nd) {
1469 netdev_warn(dev, "NCSI: No net_device?\n");
1470 return 0;
1471 }
1472
1473 ndp = TO_NCSI_DEV_PRIV(nd);
1474
1475 /* Add the VLAN id to our internal list */
1476 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1477 n_vids++;
1478 if (vlan->vid == vid) {
1479 netdev_printk(KERN_DEBUG, dev,
1480 "NCSI: vid %u already registered\n", vid);
1481 return 0;
1482 }
1483 }
1484 if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1485 netdev_warn(dev,
1486 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1487 vid, NCSI_MAX_VLAN_VIDS);
1488 return -ENOSPC;
1489 }
1490
1491 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1492 if (!vlan)
1493 return -ENOMEM;
1494
1495 vlan->proto = proto;
1496 vlan->vid = vid;
1497 list_add_rcu(&vlan->list, &ndp->vlan_vids);
1498
1499 netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid);
1500
1501 found = ncsi_kick_channels(ndp) != 0;
1502
1503 return found ? ncsi_process_next_channel(ndp) : 0;
1504 }
1505 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1506
1507 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1508 {
1509 struct vlan_vid *vlan, *tmp;
1510 struct ncsi_dev_priv *ndp;
1511 struct ncsi_dev *nd;
1512 bool found = false;
1513
1514 if (vid == 0)
1515 return 0;
1516
1517 nd = ncsi_find_dev(dev);
1518 if (!nd) {
1519 netdev_warn(dev, "NCSI: no net_device?\n");
1520 return 0;
1521 }
1522
1523 ndp = TO_NCSI_DEV_PRIV(nd);
1524
1525 /* Remove the VLAN id from our internal list */
1526 list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1527 if (vlan->vid == vid) {
1528 netdev_printk(KERN_DEBUG, dev,
1529 "NCSI: vid %u found, removing\n", vid);
1530 list_del_rcu(&vlan->list);
1531 found = true;
1532 kfree(vlan);
1533 }
1534
1535 if (!found) {
1536 netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1537 return -EINVAL;
1538 }
1539
1540 found = ncsi_kick_channels(ndp) != 0;
1541
1542 return found ? ncsi_process_next_channel(ndp) : 0;
1543 }
1544 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1545
1546 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1547 void (*handler)(struct ncsi_dev *ndev))
1548 {
1549 struct ncsi_dev_priv *ndp;
1550 struct ncsi_dev *nd;
1551 unsigned long flags;
1552 int i;
1553
1554 /* Check if the device has been registered or not */
1555 nd = ncsi_find_dev(dev);
1556 if (nd)
1557 return nd;
1558
1559 /* Create NCSI device */
1560 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1561 if (!ndp)
1562 return NULL;
1563
1564 nd = &ndp->ndev;
1565 nd->state = ncsi_dev_state_registered;
1566 nd->dev = dev;
1567 nd->handler = handler;
1568 ndp->pending_req_num = 0;
1569 INIT_LIST_HEAD(&ndp->channel_queue);
1570 INIT_LIST_HEAD(&ndp->vlan_vids);
1571 INIT_WORK(&ndp->work, ncsi_dev_work);
1572
1573 /* Initialize private NCSI device */
1574 spin_lock_init(&ndp->lock);
1575 INIT_LIST_HEAD(&ndp->packages);
1576 ndp->request_id = NCSI_REQ_START_IDX;
1577 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1578 ndp->requests[i].id = i;
1579 ndp->requests[i].ndp = ndp;
1580 setup_timer(&ndp->requests[i].timer,
1581 ncsi_request_timeout,
1582 (unsigned long)&ndp->requests[i]);
1583 }
1584
1585 spin_lock_irqsave(&ncsi_dev_lock, flags);
1586 #if IS_ENABLED(CONFIG_IPV6)
1587 ndp->inet6_addr_num = 0;
1588 if (list_empty(&ncsi_dev_list))
1589 register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1590 #endif
1591 list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1592 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1593
1594 /* Register NCSI packet Rx handler */
1595 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1596 ndp->ptype.func = ncsi_rcv_rsp;
1597 ndp->ptype.dev = dev;
1598 dev_add_pack(&ndp->ptype);
1599
1600 return nd;
1601 }
1602 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1603
1604 int ncsi_start_dev(struct ncsi_dev *nd)
1605 {
1606 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1607 int ret;
1608
1609 if (nd->state != ncsi_dev_state_registered &&
1610 nd->state != ncsi_dev_state_functional)
1611 return -ENOTTY;
1612
1613 if (!(ndp->flags & NCSI_DEV_PROBED)) {
1614 nd->state = ncsi_dev_state_probe;
1615 schedule_work(&ndp->work);
1616 return 0;
1617 }
1618
1619 if (ndp->flags & NCSI_DEV_HWA) {
1620 netdev_info(ndp->ndev.dev, "NCSI: Enabling HWA mode\n");
1621 ret = ncsi_enable_hwa(ndp);
1622 } else {
1623 ret = ncsi_choose_active_channel(ndp);
1624 }
1625
1626 return ret;
1627 }
1628 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1629
1630 void ncsi_stop_dev(struct ncsi_dev *nd)
1631 {
1632 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1633 struct ncsi_package *np;
1634 struct ncsi_channel *nc;
1635 bool chained;
1636 int old_state;
1637 unsigned long flags;
1638
1639 /* Stop the channel monitor and reset channel's state */
1640 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1641 NCSI_FOR_EACH_CHANNEL(np, nc) {
1642 ncsi_stop_channel_monitor(nc);
1643
1644 spin_lock_irqsave(&nc->lock, flags);
1645 chained = !list_empty(&nc->link);
1646 old_state = nc->state;
1647 nc->state = NCSI_CHANNEL_INACTIVE;
1648 spin_unlock_irqrestore(&nc->lock, flags);
1649
1650 WARN_ON_ONCE(chained ||
1651 old_state == NCSI_CHANNEL_INVISIBLE);
1652 }
1653 }
1654
1655 netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n");
1656 ncsi_report_link(ndp, true);
1657 }
1658 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1659
1660 void ncsi_unregister_dev(struct ncsi_dev *nd)
1661 {
1662 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1663 struct ncsi_package *np, *tmp;
1664 unsigned long flags;
1665
1666 dev_remove_pack(&ndp->ptype);
1667
1668 list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1669 ncsi_remove_package(np);
1670
1671 spin_lock_irqsave(&ncsi_dev_lock, flags);
1672 list_del_rcu(&ndp->node);
1673 #if IS_ENABLED(CONFIG_IPV6)
1674 if (list_empty(&ncsi_dev_list))
1675 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1676 #endif
1677 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1678
1679 kfree(ndp);
1680 }
1681 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);