]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ncsi/ncsi-manage.c
net/ncsi: Rework request index allocation
[mirror_ubuntu-artful-kernel.git] / net / ncsi / ncsi-manage.c
1 /*
2 * Copyright Gavin Shan, IBM Corporation 2016.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
16
17 #include <net/ncsi.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/addrconf.h>
21 #include <net/ipv6.h>
22 #include <net/if_inet6.h>
23
24 #include "internal.h"
25 #include "ncsi-pkt.h"
26
27 LIST_HEAD(ncsi_dev_list);
28 DEFINE_SPINLOCK(ncsi_dev_lock);
29
30 static inline int ncsi_filter_size(int table)
31 {
32 int sizes[] = { 2, 6, 6, 6 };
33
34 BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
35 if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
36 return -EINVAL;
37
38 return sizes[table];
39 }
40
41 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
42 {
43 struct ncsi_channel_filter *ncf;
44 void *bitmap;
45 int index, size;
46 unsigned long flags;
47
48 ncf = nc->filters[table];
49 if (!ncf)
50 return -ENXIO;
51
52 size = ncsi_filter_size(table);
53 if (size < 0)
54 return size;
55
56 spin_lock_irqsave(&nc->lock, flags);
57 bitmap = (void *)&ncf->bitmap;
58 index = -1;
59 while ((index = find_next_bit(bitmap, ncf->total, index + 1))
60 < ncf->total) {
61 if (!memcmp(ncf->data + size * index, data, size)) {
62 spin_unlock_irqrestore(&nc->lock, flags);
63 return index;
64 }
65 }
66 spin_unlock_irqrestore(&nc->lock, flags);
67
68 return -ENOENT;
69 }
70
71 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
72 {
73 struct ncsi_channel_filter *ncf;
74 int index, size;
75 void *bitmap;
76 unsigned long flags;
77
78 size = ncsi_filter_size(table);
79 if (size < 0)
80 return size;
81
82 index = ncsi_find_filter(nc, table, data);
83 if (index >= 0)
84 return index;
85
86 ncf = nc->filters[table];
87 if (!ncf)
88 return -ENODEV;
89
90 spin_lock_irqsave(&nc->lock, flags);
91 bitmap = (void *)&ncf->bitmap;
92 do {
93 index = find_next_zero_bit(bitmap, ncf->total, 0);
94 if (index >= ncf->total) {
95 spin_unlock_irqrestore(&nc->lock, flags);
96 return -ENOSPC;
97 }
98 } while (test_and_set_bit(index, bitmap));
99
100 memcpy(ncf->data + size * index, data, size);
101 spin_unlock_irqrestore(&nc->lock, flags);
102
103 return index;
104 }
105
106 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
107 {
108 struct ncsi_channel_filter *ncf;
109 int size;
110 void *bitmap;
111 unsigned long flags;
112
113 size = ncsi_filter_size(table);
114 if (size < 0)
115 return size;
116
117 ncf = nc->filters[table];
118 if (!ncf || index >= ncf->total)
119 return -ENODEV;
120
121 spin_lock_irqsave(&nc->lock, flags);
122 bitmap = (void *)&ncf->bitmap;
123 if (test_and_clear_bit(index, bitmap))
124 memset(ncf->data + size * index, 0, size);
125 spin_unlock_irqrestore(&nc->lock, flags);
126
127 return 0;
128 }
129
130 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
131 {
132 struct ncsi_dev *nd = &ndp->ndev;
133 struct ncsi_package *np;
134 struct ncsi_channel *nc;
135 unsigned long flags;
136
137 nd->state = ncsi_dev_state_functional;
138 if (force_down) {
139 nd->link_up = 0;
140 goto report;
141 }
142
143 nd->link_up = 0;
144 NCSI_FOR_EACH_PACKAGE(ndp, np) {
145 NCSI_FOR_EACH_CHANNEL(np, nc) {
146 spin_lock_irqsave(&nc->lock, flags);
147
148 if (!list_empty(&nc->link) ||
149 nc->state != NCSI_CHANNEL_ACTIVE) {
150 spin_unlock_irqrestore(&nc->lock, flags);
151 continue;
152 }
153
154 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
155 spin_unlock_irqrestore(&nc->lock, flags);
156 nd->link_up = 1;
157 goto report;
158 }
159
160 spin_unlock_irqrestore(&nc->lock, flags);
161 }
162 }
163
164 report:
165 nd->handler(nd);
166 }
167
168 static void ncsi_channel_monitor(unsigned long data)
169 {
170 struct ncsi_channel *nc = (struct ncsi_channel *)data;
171 struct ncsi_package *np = nc->package;
172 struct ncsi_dev_priv *ndp = np->ndp;
173 struct ncsi_cmd_arg nca;
174 bool enabled, chained;
175 unsigned int timeout;
176 unsigned long flags;
177 int state, ret;
178
179 spin_lock_irqsave(&nc->lock, flags);
180 state = nc->state;
181 chained = !list_empty(&nc->link);
182 timeout = nc->timeout;
183 enabled = nc->enabled;
184 spin_unlock_irqrestore(&nc->lock, flags);
185
186 if (!enabled || chained)
187 return;
188 if (state != NCSI_CHANNEL_INACTIVE &&
189 state != NCSI_CHANNEL_ACTIVE)
190 return;
191
192 if (!(timeout % 2)) {
193 nca.ndp = ndp;
194 nca.package = np->id;
195 nca.channel = nc->id;
196 nca.type = NCSI_PKT_CMD_GLS;
197 nca.driven = false;
198 ret = ncsi_xmit_cmd(&nca);
199 if (ret) {
200 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
201 ret);
202 return;
203 }
204 }
205
206 if (timeout + 1 >= 3) {
207 if (!(ndp->flags & NCSI_DEV_HWA) &&
208 state == NCSI_CHANNEL_ACTIVE)
209 ncsi_report_link(ndp, true);
210
211 spin_lock_irqsave(&nc->lock, flags);
212 nc->state = NCSI_CHANNEL_INVISIBLE;
213 spin_unlock_irqrestore(&nc->lock, flags);
214
215 spin_lock_irqsave(&ndp->lock, flags);
216 nc->state = NCSI_CHANNEL_INACTIVE;
217 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
218 spin_unlock_irqrestore(&ndp->lock, flags);
219 ncsi_process_next_channel(ndp);
220 return;
221 }
222
223 spin_lock_irqsave(&nc->lock, flags);
224 nc->timeout = timeout + 1;
225 nc->enabled = true;
226 spin_unlock_irqrestore(&nc->lock, flags);
227 mod_timer(&nc->timer, jiffies + HZ * (1 << (nc->timeout / 2)));
228 }
229
230 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
231 {
232 unsigned long flags;
233
234 spin_lock_irqsave(&nc->lock, flags);
235 WARN_ON_ONCE(nc->enabled);
236 nc->timeout = 0;
237 nc->enabled = true;
238 spin_unlock_irqrestore(&nc->lock, flags);
239
240 mod_timer(&nc->timer, jiffies + HZ * (1 << (nc->timeout / 2)));
241 }
242
243 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
244 {
245 unsigned long flags;
246
247 spin_lock_irqsave(&nc->lock, flags);
248 if (!nc->enabled) {
249 spin_unlock_irqrestore(&nc->lock, flags);
250 return;
251 }
252 nc->enabled = false;
253 spin_unlock_irqrestore(&nc->lock, flags);
254
255 del_timer_sync(&nc->timer);
256 }
257
258 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
259 unsigned char id)
260 {
261 struct ncsi_channel *nc;
262
263 NCSI_FOR_EACH_CHANNEL(np, nc) {
264 if (nc->id == id)
265 return nc;
266 }
267
268 return NULL;
269 }
270
271 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
272 {
273 struct ncsi_channel *nc, *tmp;
274 int index;
275 unsigned long flags;
276
277 nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
278 if (!nc)
279 return NULL;
280
281 nc->id = id;
282 nc->package = np;
283 nc->state = NCSI_CHANNEL_INACTIVE;
284 nc->enabled = false;
285 setup_timer(&nc->timer, ncsi_channel_monitor, (unsigned long)nc);
286 spin_lock_init(&nc->lock);
287 INIT_LIST_HEAD(&nc->link);
288 for (index = 0; index < NCSI_CAP_MAX; index++)
289 nc->caps[index].index = index;
290 for (index = 0; index < NCSI_MODE_MAX; index++)
291 nc->modes[index].index = index;
292
293 spin_lock_irqsave(&np->lock, flags);
294 tmp = ncsi_find_channel(np, id);
295 if (tmp) {
296 spin_unlock_irqrestore(&np->lock, flags);
297 kfree(nc);
298 return tmp;
299 }
300
301 list_add_tail_rcu(&nc->node, &np->channels);
302 np->channel_num++;
303 spin_unlock_irqrestore(&np->lock, flags);
304
305 return nc;
306 }
307
308 static void ncsi_remove_channel(struct ncsi_channel *nc)
309 {
310 struct ncsi_package *np = nc->package;
311 struct ncsi_channel_filter *ncf;
312 unsigned long flags;
313 int i;
314
315 /* Release filters */
316 spin_lock_irqsave(&nc->lock, flags);
317 for (i = 0; i < NCSI_FILTER_MAX; i++) {
318 ncf = nc->filters[i];
319 if (!ncf)
320 continue;
321
322 nc->filters[i] = NULL;
323 kfree(ncf);
324 }
325
326 nc->state = NCSI_CHANNEL_INACTIVE;
327 spin_unlock_irqrestore(&nc->lock, flags);
328 ncsi_stop_channel_monitor(nc);
329
330 /* Remove and free channel */
331 spin_lock_irqsave(&np->lock, flags);
332 list_del_rcu(&nc->node);
333 np->channel_num--;
334 spin_unlock_irqrestore(&np->lock, flags);
335
336 kfree(nc);
337 }
338
339 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
340 unsigned char id)
341 {
342 struct ncsi_package *np;
343
344 NCSI_FOR_EACH_PACKAGE(ndp, np) {
345 if (np->id == id)
346 return np;
347 }
348
349 return NULL;
350 }
351
352 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
353 unsigned char id)
354 {
355 struct ncsi_package *np, *tmp;
356 unsigned long flags;
357
358 np = kzalloc(sizeof(*np), GFP_ATOMIC);
359 if (!np)
360 return NULL;
361
362 np->id = id;
363 np->ndp = ndp;
364 spin_lock_init(&np->lock);
365 INIT_LIST_HEAD(&np->channels);
366
367 spin_lock_irqsave(&ndp->lock, flags);
368 tmp = ncsi_find_package(ndp, id);
369 if (tmp) {
370 spin_unlock_irqrestore(&ndp->lock, flags);
371 kfree(np);
372 return tmp;
373 }
374
375 list_add_tail_rcu(&np->node, &ndp->packages);
376 ndp->package_num++;
377 spin_unlock_irqrestore(&ndp->lock, flags);
378
379 return np;
380 }
381
382 void ncsi_remove_package(struct ncsi_package *np)
383 {
384 struct ncsi_dev_priv *ndp = np->ndp;
385 struct ncsi_channel *nc, *tmp;
386 unsigned long flags;
387
388 /* Release all child channels */
389 list_for_each_entry_safe(nc, tmp, &np->channels, node)
390 ncsi_remove_channel(nc);
391
392 /* Remove and free package */
393 spin_lock_irqsave(&ndp->lock, flags);
394 list_del_rcu(&np->node);
395 ndp->package_num--;
396 spin_unlock_irqrestore(&ndp->lock, flags);
397
398 kfree(np);
399 }
400
401 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
402 unsigned char id,
403 struct ncsi_package **np,
404 struct ncsi_channel **nc)
405 {
406 struct ncsi_package *p;
407 struct ncsi_channel *c;
408
409 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
410 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
411
412 if (np)
413 *np = p;
414 if (nc)
415 *nc = c;
416 }
417
418 /* For two consecutive NCSI commands, the packet IDs shouldn't
419 * be same. Otherwise, the bogus response might be replied. So
420 * the available IDs are allocated in round-robin fashion.
421 */
422 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp, bool driven)
423 {
424 struct ncsi_request *nr = NULL;
425 int i, limit = ARRAY_SIZE(ndp->requests);
426 unsigned long flags;
427
428 /* Check if there is one available request until the ceiling */
429 spin_lock_irqsave(&ndp->lock, flags);
430 for (i = ndp->request_id; i < limit; i++) {
431 if (ndp->requests[i].used)
432 continue;
433
434 nr = &ndp->requests[i];
435 nr->used = true;
436 nr->driven = driven;
437 ndp->request_id = i + 1;
438 goto found;
439 }
440
441 /* Fail back to check from the starting cursor */
442 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
443 if (ndp->requests[i].used)
444 continue;
445
446 nr = &ndp->requests[i];
447 nr->used = true;
448 nr->driven = driven;
449 ndp->request_id = i + 1;
450 goto found;
451 }
452
453 found:
454 spin_unlock_irqrestore(&ndp->lock, flags);
455 return nr;
456 }
457
458 void ncsi_free_request(struct ncsi_request *nr)
459 {
460 struct ncsi_dev_priv *ndp = nr->ndp;
461 struct sk_buff *cmd, *rsp;
462 unsigned long flags;
463 bool driven;
464
465 if (nr->enabled) {
466 nr->enabled = false;
467 del_timer_sync(&nr->timer);
468 }
469
470 spin_lock_irqsave(&ndp->lock, flags);
471 cmd = nr->cmd;
472 rsp = nr->rsp;
473 nr->cmd = NULL;
474 nr->rsp = NULL;
475 nr->used = false;
476 driven = nr->driven;
477 spin_unlock_irqrestore(&ndp->lock, flags);
478
479 if (driven && cmd && --ndp->pending_req_num == 0)
480 schedule_work(&ndp->work);
481
482 /* Release command and response */
483 consume_skb(cmd);
484 consume_skb(rsp);
485 }
486
487 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
488 {
489 struct ncsi_dev_priv *ndp;
490
491 NCSI_FOR_EACH_DEV(ndp) {
492 if (ndp->ndev.dev == dev)
493 return &ndp->ndev;
494 }
495
496 return NULL;
497 }
498
499 static void ncsi_request_timeout(unsigned long data)
500 {
501 struct ncsi_request *nr = (struct ncsi_request *)data;
502 struct ncsi_dev_priv *ndp = nr->ndp;
503 unsigned long flags;
504
505 /* If the request already had associated response,
506 * let the response handler to release it.
507 */
508 spin_lock_irqsave(&ndp->lock, flags);
509 nr->enabled = false;
510 if (nr->rsp || !nr->cmd) {
511 spin_unlock_irqrestore(&ndp->lock, flags);
512 return;
513 }
514 spin_unlock_irqrestore(&ndp->lock, flags);
515
516 /* Release the request */
517 ncsi_free_request(nr);
518 }
519
520 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
521 {
522 struct ncsi_dev *nd = &ndp->ndev;
523 struct ncsi_package *np = ndp->active_package;
524 struct ncsi_channel *nc = ndp->active_channel;
525 struct ncsi_cmd_arg nca;
526 unsigned long flags;
527 int ret;
528
529 nca.ndp = ndp;
530 nca.driven = true;
531 switch (nd->state) {
532 case ncsi_dev_state_suspend:
533 nd->state = ncsi_dev_state_suspend_select;
534 /* Fall through */
535 case ncsi_dev_state_suspend_select:
536 case ncsi_dev_state_suspend_dcnt:
537 case ncsi_dev_state_suspend_dc:
538 case ncsi_dev_state_suspend_deselect:
539 ndp->pending_req_num = 1;
540
541 np = ndp->active_package;
542 nc = ndp->active_channel;
543 nca.package = np->id;
544 if (nd->state == ncsi_dev_state_suspend_select) {
545 nca.type = NCSI_PKT_CMD_SP;
546 nca.channel = NCSI_RESERVED_CHANNEL;
547 if (ndp->flags & NCSI_DEV_HWA)
548 nca.bytes[0] = 0;
549 else
550 nca.bytes[0] = 1;
551 nd->state = ncsi_dev_state_suspend_dcnt;
552 } else if (nd->state == ncsi_dev_state_suspend_dcnt) {
553 nca.type = NCSI_PKT_CMD_DCNT;
554 nca.channel = nc->id;
555 nd->state = ncsi_dev_state_suspend_dc;
556 } else if (nd->state == ncsi_dev_state_suspend_dc) {
557 nca.type = NCSI_PKT_CMD_DC;
558 nca.channel = nc->id;
559 nca.bytes[0] = 1;
560 nd->state = ncsi_dev_state_suspend_deselect;
561 } else if (nd->state == ncsi_dev_state_suspend_deselect) {
562 nca.type = NCSI_PKT_CMD_DP;
563 nca.channel = NCSI_RESERVED_CHANNEL;
564 nd->state = ncsi_dev_state_suspend_done;
565 }
566
567 ret = ncsi_xmit_cmd(&nca);
568 if (ret) {
569 nd->state = ncsi_dev_state_functional;
570 return;
571 }
572
573 break;
574 case ncsi_dev_state_suspend_done:
575 spin_lock_irqsave(&nc->lock, flags);
576 nc->state = NCSI_CHANNEL_INACTIVE;
577 spin_unlock_irqrestore(&nc->lock, flags);
578 ncsi_process_next_channel(ndp);
579
580 break;
581 default:
582 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
583 nd->state);
584 }
585 }
586
587 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
588 {
589 struct ncsi_dev *nd = &ndp->ndev;
590 struct net_device *dev = nd->dev;
591 struct ncsi_package *np = ndp->active_package;
592 struct ncsi_channel *nc = ndp->active_channel;
593 struct ncsi_cmd_arg nca;
594 unsigned char index;
595 unsigned long flags;
596 int ret;
597
598 nca.ndp = ndp;
599 nca.driven = true;
600 switch (nd->state) {
601 case ncsi_dev_state_config:
602 case ncsi_dev_state_config_sp:
603 ndp->pending_req_num = 1;
604
605 /* Select the specific package */
606 nca.type = NCSI_PKT_CMD_SP;
607 if (ndp->flags & NCSI_DEV_HWA)
608 nca.bytes[0] = 0;
609 else
610 nca.bytes[0] = 1;
611 nca.package = np->id;
612 nca.channel = NCSI_RESERVED_CHANNEL;
613 ret = ncsi_xmit_cmd(&nca);
614 if (ret)
615 goto error;
616
617 nd->state = ncsi_dev_state_config_cis;
618 break;
619 case ncsi_dev_state_config_cis:
620 ndp->pending_req_num = 1;
621
622 /* Clear initial state */
623 nca.type = NCSI_PKT_CMD_CIS;
624 nca.package = np->id;
625 nca.channel = nc->id;
626 ret = ncsi_xmit_cmd(&nca);
627 if (ret)
628 goto error;
629
630 nd->state = ncsi_dev_state_config_sma;
631 break;
632 case ncsi_dev_state_config_sma:
633 case ncsi_dev_state_config_ebf:
634 #if IS_ENABLED(CONFIG_IPV6)
635 case ncsi_dev_state_config_egmf:
636 #endif
637 case ncsi_dev_state_config_ecnt:
638 case ncsi_dev_state_config_ec:
639 case ncsi_dev_state_config_ae:
640 case ncsi_dev_state_config_gls:
641 ndp->pending_req_num = 1;
642
643 nca.package = np->id;
644 nca.channel = nc->id;
645
646 /* Use first entry in unicast filter table. Note that
647 * the MAC filter table starts from entry 1 instead of
648 * 0.
649 */
650 if (nd->state == ncsi_dev_state_config_sma) {
651 nca.type = NCSI_PKT_CMD_SMA;
652 for (index = 0; index < 6; index++)
653 nca.bytes[index] = dev->dev_addr[index];
654 nca.bytes[6] = 0x1;
655 nca.bytes[7] = 0x1;
656 nd->state = ncsi_dev_state_config_ebf;
657 } else if (nd->state == ncsi_dev_state_config_ebf) {
658 nca.type = NCSI_PKT_CMD_EBF;
659 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
660 nd->state = ncsi_dev_state_config_ecnt;
661 #if IS_ENABLED(CONFIG_IPV6)
662 if (ndp->inet6_addr_num > 0 &&
663 (nc->caps[NCSI_CAP_GENERIC].cap &
664 NCSI_CAP_GENERIC_MC))
665 nd->state = ncsi_dev_state_config_egmf;
666 else
667 nd->state = ncsi_dev_state_config_ecnt;
668 } else if (nd->state == ncsi_dev_state_config_egmf) {
669 nca.type = NCSI_PKT_CMD_EGMF;
670 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
671 nd->state = ncsi_dev_state_config_ecnt;
672 #endif /* CONFIG_IPV6 */
673 } else if (nd->state == ncsi_dev_state_config_ecnt) {
674 nca.type = NCSI_PKT_CMD_ECNT;
675 nd->state = ncsi_dev_state_config_ec;
676 } else if (nd->state == ncsi_dev_state_config_ec) {
677 /* Enable AEN if it's supported */
678 nca.type = NCSI_PKT_CMD_EC;
679 nd->state = ncsi_dev_state_config_ae;
680 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
681 nd->state = ncsi_dev_state_config_gls;
682 } else if (nd->state == ncsi_dev_state_config_ae) {
683 nca.type = NCSI_PKT_CMD_AE;
684 nca.bytes[0] = 0;
685 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
686 nd->state = ncsi_dev_state_config_gls;
687 } else if (nd->state == ncsi_dev_state_config_gls) {
688 nca.type = NCSI_PKT_CMD_GLS;
689 nd->state = ncsi_dev_state_config_done;
690 }
691
692 ret = ncsi_xmit_cmd(&nca);
693 if (ret)
694 goto error;
695 break;
696 case ncsi_dev_state_config_done:
697 spin_lock_irqsave(&nc->lock, flags);
698 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1)
699 nc->state = NCSI_CHANNEL_ACTIVE;
700 else
701 nc->state = NCSI_CHANNEL_INACTIVE;
702 spin_unlock_irqrestore(&nc->lock, flags);
703
704 ncsi_start_channel_monitor(nc);
705 ncsi_process_next_channel(ndp);
706 break;
707 default:
708 netdev_warn(dev, "Wrong NCSI state 0x%x in config\n",
709 nd->state);
710 }
711
712 return;
713
714 error:
715 ncsi_report_link(ndp, true);
716 }
717
718 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
719 {
720 struct ncsi_package *np;
721 struct ncsi_channel *nc, *found;
722 struct ncsi_channel_mode *ncm;
723 unsigned long flags;
724
725 /* The search is done once an inactive channel with up
726 * link is found.
727 */
728 found = NULL;
729 NCSI_FOR_EACH_PACKAGE(ndp, np) {
730 NCSI_FOR_EACH_CHANNEL(np, nc) {
731 spin_lock_irqsave(&nc->lock, flags);
732
733 if (!list_empty(&nc->link) ||
734 nc->state != NCSI_CHANNEL_INACTIVE) {
735 spin_unlock_irqrestore(&nc->lock, flags);
736 continue;
737 }
738
739 if (!found)
740 found = nc;
741
742 ncm = &nc->modes[NCSI_MODE_LINK];
743 if (ncm->data[2] & 0x1) {
744 spin_unlock_irqrestore(&nc->lock, flags);
745 found = nc;
746 goto out;
747 }
748
749 spin_unlock_irqrestore(&nc->lock, flags);
750 }
751 }
752
753 if (!found) {
754 ncsi_report_link(ndp, true);
755 return -ENODEV;
756 }
757
758 out:
759 spin_lock_irqsave(&ndp->lock, flags);
760 list_add_tail_rcu(&found->link, &ndp->channel_queue);
761 spin_unlock_irqrestore(&ndp->lock, flags);
762
763 return ncsi_process_next_channel(ndp);
764 }
765
766 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
767 {
768 struct ncsi_package *np;
769 struct ncsi_channel *nc;
770 unsigned int cap;
771
772 /* The hardware arbitration is disabled if any one channel
773 * doesn't support explicitly.
774 */
775 NCSI_FOR_EACH_PACKAGE(ndp, np) {
776 NCSI_FOR_EACH_CHANNEL(np, nc) {
777 cap = nc->caps[NCSI_CAP_GENERIC].cap;
778 if (!(cap & NCSI_CAP_GENERIC_HWA) ||
779 (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
780 NCSI_CAP_GENERIC_HWA_SUPPORT) {
781 ndp->flags &= ~NCSI_DEV_HWA;
782 return false;
783 }
784 }
785 }
786
787 ndp->flags |= NCSI_DEV_HWA;
788 return true;
789 }
790
791 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
792 {
793 struct ncsi_package *np;
794 struct ncsi_channel *nc;
795 unsigned long flags;
796
797 /* Move all available channels to processing queue */
798 spin_lock_irqsave(&ndp->lock, flags);
799 NCSI_FOR_EACH_PACKAGE(ndp, np) {
800 NCSI_FOR_EACH_CHANNEL(np, nc) {
801 WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
802 !list_empty(&nc->link));
803 ncsi_stop_channel_monitor(nc);
804 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
805 }
806 }
807 spin_unlock_irqrestore(&ndp->lock, flags);
808
809 /* We can have no channels in extremely case */
810 if (list_empty(&ndp->channel_queue)) {
811 ncsi_report_link(ndp, false);
812 return -ENOENT;
813 }
814
815 return ncsi_process_next_channel(ndp);
816 }
817
818 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
819 {
820 struct ncsi_dev *nd = &ndp->ndev;
821 struct ncsi_package *np;
822 struct ncsi_channel *nc;
823 struct ncsi_cmd_arg nca;
824 unsigned char index;
825 int ret;
826
827 nca.ndp = ndp;
828 nca.driven = true;
829 switch (nd->state) {
830 case ncsi_dev_state_probe:
831 nd->state = ncsi_dev_state_probe_deselect;
832 /* Fall through */
833 case ncsi_dev_state_probe_deselect:
834 ndp->pending_req_num = 8;
835
836 /* Deselect all possible packages */
837 nca.type = NCSI_PKT_CMD_DP;
838 nca.channel = NCSI_RESERVED_CHANNEL;
839 for (index = 0; index < 8; index++) {
840 nca.package = index;
841 ret = ncsi_xmit_cmd(&nca);
842 if (ret)
843 goto error;
844 }
845
846 nd->state = ncsi_dev_state_probe_package;
847 break;
848 case ncsi_dev_state_probe_package:
849 ndp->pending_req_num = 16;
850
851 /* Select all possible packages */
852 nca.type = NCSI_PKT_CMD_SP;
853 nca.bytes[0] = 1;
854 nca.channel = NCSI_RESERVED_CHANNEL;
855 for (index = 0; index < 8; index++) {
856 nca.package = index;
857 ret = ncsi_xmit_cmd(&nca);
858 if (ret)
859 goto error;
860 }
861
862 /* Disable all possible packages */
863 nca.type = NCSI_PKT_CMD_DP;
864 for (index = 0; index < 8; index++) {
865 nca.package = index;
866 ret = ncsi_xmit_cmd(&nca);
867 if (ret)
868 goto error;
869 }
870
871 nd->state = ncsi_dev_state_probe_channel;
872 break;
873 case ncsi_dev_state_probe_channel:
874 if (!ndp->active_package)
875 ndp->active_package = list_first_or_null_rcu(
876 &ndp->packages, struct ncsi_package, node);
877 else if (list_is_last(&ndp->active_package->node,
878 &ndp->packages))
879 ndp->active_package = NULL;
880 else
881 ndp->active_package = list_next_entry(
882 ndp->active_package, node);
883
884 /* All available packages and channels are enumerated. The
885 * enumeration happens for once when the NCSI interface is
886 * started. So we need continue to start the interface after
887 * the enumeration.
888 *
889 * We have to choose an active channel before configuring it.
890 * Note that we possibly don't have active channel in extreme
891 * situation.
892 */
893 if (!ndp->active_package) {
894 ndp->flags |= NCSI_DEV_PROBED;
895 if (ncsi_check_hwa(ndp))
896 ncsi_enable_hwa(ndp);
897 else
898 ncsi_choose_active_channel(ndp);
899 return;
900 }
901
902 /* Select the active package */
903 ndp->pending_req_num = 1;
904 nca.type = NCSI_PKT_CMD_SP;
905 nca.bytes[0] = 1;
906 nca.package = ndp->active_package->id;
907 nca.channel = NCSI_RESERVED_CHANNEL;
908 ret = ncsi_xmit_cmd(&nca);
909 if (ret)
910 goto error;
911
912 nd->state = ncsi_dev_state_probe_cis;
913 break;
914 case ncsi_dev_state_probe_cis:
915 ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
916
917 /* Clear initial state */
918 nca.type = NCSI_PKT_CMD_CIS;
919 nca.package = ndp->active_package->id;
920 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
921 nca.channel = index;
922 ret = ncsi_xmit_cmd(&nca);
923 if (ret)
924 goto error;
925 }
926
927 nd->state = ncsi_dev_state_probe_gvi;
928 break;
929 case ncsi_dev_state_probe_gvi:
930 case ncsi_dev_state_probe_gc:
931 case ncsi_dev_state_probe_gls:
932 np = ndp->active_package;
933 ndp->pending_req_num = np->channel_num;
934
935 /* Retrieve version, capability or link status */
936 if (nd->state == ncsi_dev_state_probe_gvi)
937 nca.type = NCSI_PKT_CMD_GVI;
938 else if (nd->state == ncsi_dev_state_probe_gc)
939 nca.type = NCSI_PKT_CMD_GC;
940 else
941 nca.type = NCSI_PKT_CMD_GLS;
942
943 nca.package = np->id;
944 NCSI_FOR_EACH_CHANNEL(np, nc) {
945 nca.channel = nc->id;
946 ret = ncsi_xmit_cmd(&nca);
947 if (ret)
948 goto error;
949 }
950
951 if (nd->state == ncsi_dev_state_probe_gvi)
952 nd->state = ncsi_dev_state_probe_gc;
953 else if (nd->state == ncsi_dev_state_probe_gc)
954 nd->state = ncsi_dev_state_probe_gls;
955 else
956 nd->state = ncsi_dev_state_probe_dp;
957 break;
958 case ncsi_dev_state_probe_dp:
959 ndp->pending_req_num = 1;
960
961 /* Deselect the active package */
962 nca.type = NCSI_PKT_CMD_DP;
963 nca.package = ndp->active_package->id;
964 nca.channel = NCSI_RESERVED_CHANNEL;
965 ret = ncsi_xmit_cmd(&nca);
966 if (ret)
967 goto error;
968
969 /* Scan channels in next package */
970 nd->state = ncsi_dev_state_probe_channel;
971 break;
972 default:
973 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
974 nd->state);
975 }
976
977 return;
978 error:
979 ncsi_report_link(ndp, true);
980 }
981
982 static void ncsi_dev_work(struct work_struct *work)
983 {
984 struct ncsi_dev_priv *ndp = container_of(work,
985 struct ncsi_dev_priv, work);
986 struct ncsi_dev *nd = &ndp->ndev;
987
988 switch (nd->state & ncsi_dev_state_major) {
989 case ncsi_dev_state_probe:
990 ncsi_probe_channel(ndp);
991 break;
992 case ncsi_dev_state_suspend:
993 ncsi_suspend_channel(ndp);
994 break;
995 case ncsi_dev_state_config:
996 ncsi_configure_channel(ndp);
997 break;
998 default:
999 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1000 nd->state);
1001 }
1002 }
1003
1004 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1005 {
1006 struct ncsi_channel *nc;
1007 int old_state;
1008 unsigned long flags;
1009
1010 spin_lock_irqsave(&ndp->lock, flags);
1011 nc = list_first_or_null_rcu(&ndp->channel_queue,
1012 struct ncsi_channel, link);
1013 if (!nc) {
1014 spin_unlock_irqrestore(&ndp->lock, flags);
1015 goto out;
1016 }
1017
1018 list_del_init(&nc->link);
1019 spin_unlock_irqrestore(&ndp->lock, flags);
1020
1021 spin_lock_irqsave(&nc->lock, flags);
1022 old_state = nc->state;
1023 nc->state = NCSI_CHANNEL_INVISIBLE;
1024 spin_unlock_irqrestore(&nc->lock, flags);
1025
1026 ndp->active_channel = nc;
1027 ndp->active_package = nc->package;
1028
1029 switch (old_state) {
1030 case NCSI_CHANNEL_INACTIVE:
1031 ndp->ndev.state = ncsi_dev_state_config;
1032 ncsi_configure_channel(ndp);
1033 break;
1034 case NCSI_CHANNEL_ACTIVE:
1035 ndp->ndev.state = ncsi_dev_state_suspend;
1036 ncsi_suspend_channel(ndp);
1037 break;
1038 default:
1039 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1040 old_state, nc->package->id, nc->id);
1041 ncsi_report_link(ndp, false);
1042 return -EINVAL;
1043 }
1044
1045 return 0;
1046
1047 out:
1048 ndp->active_channel = NULL;
1049 ndp->active_package = NULL;
1050 if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1051 ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1052 return ncsi_choose_active_channel(ndp);
1053 }
1054
1055 ncsi_report_link(ndp, false);
1056 return -ENODEV;
1057 }
1058
1059 #if IS_ENABLED(CONFIG_IPV6)
1060 static int ncsi_inet6addr_event(struct notifier_block *this,
1061 unsigned long event, void *data)
1062 {
1063 struct inet6_ifaddr *ifa = data;
1064 struct net_device *dev = ifa->idev->dev;
1065 struct ncsi_dev *nd = ncsi_find_dev(dev);
1066 struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1067 struct ncsi_package *np;
1068 struct ncsi_channel *nc;
1069 struct ncsi_cmd_arg nca;
1070 bool action;
1071 int ret;
1072
1073 if (!ndp || (ipv6_addr_type(&ifa->addr) &
1074 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1075 return NOTIFY_OK;
1076
1077 switch (event) {
1078 case NETDEV_UP:
1079 action = (++ndp->inet6_addr_num) == 1;
1080 nca.type = NCSI_PKT_CMD_EGMF;
1081 break;
1082 case NETDEV_DOWN:
1083 action = (--ndp->inet6_addr_num == 0);
1084 nca.type = NCSI_PKT_CMD_DGMF;
1085 break;
1086 default:
1087 return NOTIFY_OK;
1088 }
1089
1090 /* We might not have active channel or packages. The IPv6
1091 * required multicast will be enabled when active channel
1092 * or packages are chosen.
1093 */
1094 np = ndp->active_package;
1095 nc = ndp->active_channel;
1096 if (!action || !np || !nc)
1097 return NOTIFY_OK;
1098
1099 /* We needn't enable or disable it if the function isn't supported */
1100 if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1101 return NOTIFY_OK;
1102
1103 nca.ndp = ndp;
1104 nca.driven = false;
1105 nca.package = np->id;
1106 nca.channel = nc->id;
1107 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1108 ret = ncsi_xmit_cmd(&nca);
1109 if (ret) {
1110 netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1111 (event == NETDEV_UP) ? "enable" : "disable", ret);
1112 return NOTIFY_DONE;
1113 }
1114
1115 return NOTIFY_OK;
1116 }
1117
1118 static struct notifier_block ncsi_inet6addr_notifier = {
1119 .notifier_call = ncsi_inet6addr_event,
1120 };
1121 #endif /* CONFIG_IPV6 */
1122
1123 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1124 void (*handler)(struct ncsi_dev *ndev))
1125 {
1126 struct ncsi_dev_priv *ndp;
1127 struct ncsi_dev *nd;
1128 unsigned long flags;
1129 int i;
1130
1131 /* Check if the device has been registered or not */
1132 nd = ncsi_find_dev(dev);
1133 if (nd)
1134 return nd;
1135
1136 /* Create NCSI device */
1137 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1138 if (!ndp)
1139 return NULL;
1140
1141 nd = &ndp->ndev;
1142 nd->state = ncsi_dev_state_registered;
1143 nd->dev = dev;
1144 nd->handler = handler;
1145 ndp->pending_req_num = 0;
1146 INIT_LIST_HEAD(&ndp->channel_queue);
1147 INIT_WORK(&ndp->work, ncsi_dev_work);
1148
1149 /* Initialize private NCSI device */
1150 spin_lock_init(&ndp->lock);
1151 INIT_LIST_HEAD(&ndp->packages);
1152 ndp->request_id = NCSI_REQ_START_IDX;
1153 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1154 ndp->requests[i].id = i;
1155 ndp->requests[i].ndp = ndp;
1156 setup_timer(&ndp->requests[i].timer,
1157 ncsi_request_timeout,
1158 (unsigned long)&ndp->requests[i]);
1159 }
1160
1161 spin_lock_irqsave(&ncsi_dev_lock, flags);
1162 #if IS_ENABLED(CONFIG_IPV6)
1163 ndp->inet6_addr_num = 0;
1164 if (list_empty(&ncsi_dev_list))
1165 register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1166 #endif
1167 list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1168 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1169
1170 /* Register NCSI packet Rx handler */
1171 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1172 ndp->ptype.func = ncsi_rcv_rsp;
1173 ndp->ptype.dev = dev;
1174 dev_add_pack(&ndp->ptype);
1175
1176 return nd;
1177 }
1178 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1179
1180 int ncsi_start_dev(struct ncsi_dev *nd)
1181 {
1182 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1183 struct ncsi_package *np;
1184 struct ncsi_channel *nc;
1185 unsigned long flags;
1186 bool chained;
1187 int old_state, ret;
1188
1189 if (nd->state != ncsi_dev_state_registered &&
1190 nd->state != ncsi_dev_state_functional)
1191 return -ENOTTY;
1192
1193 if (!(ndp->flags & NCSI_DEV_PROBED)) {
1194 nd->state = ncsi_dev_state_probe;
1195 schedule_work(&ndp->work);
1196 return 0;
1197 }
1198
1199 /* Reset channel's state and start over */
1200 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1201 NCSI_FOR_EACH_CHANNEL(np, nc) {
1202 spin_lock_irqsave(&nc->lock, flags);
1203 chained = !list_empty(&nc->link);
1204 old_state = nc->state;
1205 nc->state = NCSI_CHANNEL_INACTIVE;
1206 spin_unlock_irqrestore(&nc->lock, flags);
1207
1208 WARN_ON_ONCE(chained ||
1209 old_state == NCSI_CHANNEL_INVISIBLE);
1210 }
1211 }
1212
1213 if (ndp->flags & NCSI_DEV_HWA)
1214 ret = ncsi_enable_hwa(ndp);
1215 else
1216 ret = ncsi_choose_active_channel(ndp);
1217
1218 return ret;
1219 }
1220 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1221
1222 void ncsi_unregister_dev(struct ncsi_dev *nd)
1223 {
1224 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1225 struct ncsi_package *np, *tmp;
1226 unsigned long flags;
1227
1228 dev_remove_pack(&ndp->ptype);
1229
1230 list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1231 ncsi_remove_package(np);
1232
1233 spin_lock_irqsave(&ncsi_dev_lock, flags);
1234 list_del_rcu(&ndp->node);
1235 #if IS_ENABLED(CONFIG_IPV6)
1236 if (list_empty(&ncsi_dev_list))
1237 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1238 #endif
1239 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1240
1241 kfree(ndp);
1242 }
1243 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);