2 * Copyright Gavin Shan, IBM Corporation 2016.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
18 #include <net/net_namespace.h>
20 #include <net/addrconf.h>
22 #include <net/if_inet6.h>
27 LIST_HEAD(ncsi_dev_list
);
28 DEFINE_SPINLOCK(ncsi_dev_lock
);
30 static inline int ncsi_filter_size(int table
)
32 int sizes
[] = { 2, 6, 6, 6 };
34 BUILD_BUG_ON(ARRAY_SIZE(sizes
) != NCSI_FILTER_MAX
);
35 if (table
< NCSI_FILTER_BASE
|| table
>= NCSI_FILTER_MAX
)
41 int ncsi_find_filter(struct ncsi_channel
*nc
, int table
, void *data
)
43 struct ncsi_channel_filter
*ncf
;
48 ncf
= nc
->filters
[table
];
52 size
= ncsi_filter_size(table
);
56 spin_lock_irqsave(&nc
->lock
, flags
);
57 bitmap
= (void *)&ncf
->bitmap
;
59 while ((index
= find_next_bit(bitmap
, ncf
->total
, index
+ 1))
61 if (!memcmp(ncf
->data
+ size
* index
, data
, size
)) {
62 spin_unlock_irqrestore(&nc
->lock
, flags
);
66 spin_unlock_irqrestore(&nc
->lock
, flags
);
71 int ncsi_add_filter(struct ncsi_channel
*nc
, int table
, void *data
)
73 struct ncsi_channel_filter
*ncf
;
78 size
= ncsi_filter_size(table
);
82 index
= ncsi_find_filter(nc
, table
, data
);
86 ncf
= nc
->filters
[table
];
90 spin_lock_irqsave(&nc
->lock
, flags
);
91 bitmap
= (void *)&ncf
->bitmap
;
93 index
= find_next_zero_bit(bitmap
, ncf
->total
, 0);
94 if (index
>= ncf
->total
) {
95 spin_unlock_irqrestore(&nc
->lock
, flags
);
98 } while (test_and_set_bit(index
, bitmap
));
100 memcpy(ncf
->data
+ size
* index
, data
, size
);
101 spin_unlock_irqrestore(&nc
->lock
, flags
);
106 int ncsi_remove_filter(struct ncsi_channel
*nc
, int table
, int index
)
108 struct ncsi_channel_filter
*ncf
;
113 size
= ncsi_filter_size(table
);
117 ncf
= nc
->filters
[table
];
118 if (!ncf
|| index
>= ncf
->total
)
121 spin_lock_irqsave(&nc
->lock
, flags
);
122 bitmap
= (void *)&ncf
->bitmap
;
123 if (test_and_clear_bit(index
, bitmap
))
124 memset(ncf
->data
+ size
* index
, 0, size
);
125 spin_unlock_irqrestore(&nc
->lock
, flags
);
130 static void ncsi_report_link(struct ncsi_dev_priv
*ndp
, bool force_down
)
132 struct ncsi_dev
*nd
= &ndp
->ndev
;
133 struct ncsi_package
*np
;
134 struct ncsi_channel
*nc
;
137 nd
->state
= ncsi_dev_state_functional
;
144 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
145 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
146 spin_lock_irqsave(&nc
->lock
, flags
);
148 if (!list_empty(&nc
->link
) ||
149 nc
->state
!= NCSI_CHANNEL_ACTIVE
) {
150 spin_unlock_irqrestore(&nc
->lock
, flags
);
154 if (nc
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1) {
155 spin_unlock_irqrestore(&nc
->lock
, flags
);
160 spin_unlock_irqrestore(&nc
->lock
, flags
);
168 static void ncsi_channel_monitor(unsigned long data
)
170 struct ncsi_channel
*nc
= (struct ncsi_channel
*)data
;
171 struct ncsi_package
*np
= nc
->package
;
172 struct ncsi_dev_priv
*ndp
= np
->ndp
;
173 struct ncsi_cmd_arg nca
;
174 bool enabled
, chained
;
175 unsigned int monitor_state
;
179 spin_lock_irqsave(&nc
->lock
, flags
);
181 chained
= !list_empty(&nc
->link
);
182 enabled
= nc
->monitor
.enabled
;
183 monitor_state
= nc
->monitor
.state
;
184 spin_unlock_irqrestore(&nc
->lock
, flags
);
186 if (!enabled
|| chained
)
188 if (state
!= NCSI_CHANNEL_INACTIVE
&&
189 state
!= NCSI_CHANNEL_ACTIVE
)
192 switch (monitor_state
) {
193 case NCSI_CHANNEL_MONITOR_START
:
194 case NCSI_CHANNEL_MONITOR_RETRY
:
196 nca
.package
= np
->id
;
197 nca
.channel
= nc
->id
;
198 nca
.type
= NCSI_PKT_CMD_GLS
;
200 ret
= ncsi_xmit_cmd(&nca
);
202 netdev_err(ndp
->ndev
.dev
, "Error %d sending GLS\n",
208 case NCSI_CHANNEL_MONITOR_WAIT
... NCSI_CHANNEL_MONITOR_WAIT_MAX
:
211 if (!(ndp
->flags
& NCSI_DEV_HWA
) &&
212 state
== NCSI_CHANNEL_ACTIVE
) {
213 ncsi_report_link(ndp
, true);
214 ndp
->flags
|= NCSI_DEV_RESHUFFLE
;
217 spin_lock_irqsave(&nc
->lock
, flags
);
218 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
219 spin_unlock_irqrestore(&nc
->lock
, flags
);
221 spin_lock_irqsave(&ndp
->lock
, flags
);
222 nc
->state
= NCSI_CHANNEL_INACTIVE
;
223 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
224 spin_unlock_irqrestore(&ndp
->lock
, flags
);
225 ncsi_process_next_channel(ndp
);
229 spin_lock_irqsave(&nc
->lock
, flags
);
231 spin_unlock_irqrestore(&nc
->lock
, flags
);
232 mod_timer(&nc
->monitor
.timer
, jiffies
+ HZ
);
235 void ncsi_start_channel_monitor(struct ncsi_channel
*nc
)
239 spin_lock_irqsave(&nc
->lock
, flags
);
240 WARN_ON_ONCE(nc
->monitor
.enabled
);
241 nc
->monitor
.enabled
= true;
242 nc
->monitor
.state
= NCSI_CHANNEL_MONITOR_START
;
243 spin_unlock_irqrestore(&nc
->lock
, flags
);
245 mod_timer(&nc
->monitor
.timer
, jiffies
+ HZ
);
248 void ncsi_stop_channel_monitor(struct ncsi_channel
*nc
)
252 spin_lock_irqsave(&nc
->lock
, flags
);
253 if (!nc
->monitor
.enabled
) {
254 spin_unlock_irqrestore(&nc
->lock
, flags
);
257 nc
->monitor
.enabled
= false;
258 spin_unlock_irqrestore(&nc
->lock
, flags
);
260 del_timer_sync(&nc
->monitor
.timer
);
263 struct ncsi_channel
*ncsi_find_channel(struct ncsi_package
*np
,
266 struct ncsi_channel
*nc
;
268 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
276 struct ncsi_channel
*ncsi_add_channel(struct ncsi_package
*np
, unsigned char id
)
278 struct ncsi_channel
*nc
, *tmp
;
282 nc
= kzalloc(sizeof(*nc
), GFP_ATOMIC
);
288 nc
->state
= NCSI_CHANNEL_INACTIVE
;
289 nc
->monitor
.enabled
= false;
290 setup_timer(&nc
->monitor
.timer
,
291 ncsi_channel_monitor
, (unsigned long)nc
);
292 spin_lock_init(&nc
->lock
);
293 INIT_LIST_HEAD(&nc
->link
);
294 for (index
= 0; index
< NCSI_CAP_MAX
; index
++)
295 nc
->caps
[index
].index
= index
;
296 for (index
= 0; index
< NCSI_MODE_MAX
; index
++)
297 nc
->modes
[index
].index
= index
;
299 spin_lock_irqsave(&np
->lock
, flags
);
300 tmp
= ncsi_find_channel(np
, id
);
302 spin_unlock_irqrestore(&np
->lock
, flags
);
307 list_add_tail_rcu(&nc
->node
, &np
->channels
);
309 spin_unlock_irqrestore(&np
->lock
, flags
);
314 static void ncsi_remove_channel(struct ncsi_channel
*nc
)
316 struct ncsi_package
*np
= nc
->package
;
317 struct ncsi_channel_filter
*ncf
;
321 /* Release filters */
322 spin_lock_irqsave(&nc
->lock
, flags
);
323 for (i
= 0; i
< NCSI_FILTER_MAX
; i
++) {
324 ncf
= nc
->filters
[i
];
328 nc
->filters
[i
] = NULL
;
332 nc
->state
= NCSI_CHANNEL_INACTIVE
;
333 spin_unlock_irqrestore(&nc
->lock
, flags
);
334 ncsi_stop_channel_monitor(nc
);
336 /* Remove and free channel */
337 spin_lock_irqsave(&np
->lock
, flags
);
338 list_del_rcu(&nc
->node
);
340 spin_unlock_irqrestore(&np
->lock
, flags
);
345 struct ncsi_package
*ncsi_find_package(struct ncsi_dev_priv
*ndp
,
348 struct ncsi_package
*np
;
350 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
358 struct ncsi_package
*ncsi_add_package(struct ncsi_dev_priv
*ndp
,
361 struct ncsi_package
*np
, *tmp
;
364 np
= kzalloc(sizeof(*np
), GFP_ATOMIC
);
370 spin_lock_init(&np
->lock
);
371 INIT_LIST_HEAD(&np
->channels
);
373 spin_lock_irqsave(&ndp
->lock
, flags
);
374 tmp
= ncsi_find_package(ndp
, id
);
376 spin_unlock_irqrestore(&ndp
->lock
, flags
);
381 list_add_tail_rcu(&np
->node
, &ndp
->packages
);
383 spin_unlock_irqrestore(&ndp
->lock
, flags
);
388 void ncsi_remove_package(struct ncsi_package
*np
)
390 struct ncsi_dev_priv
*ndp
= np
->ndp
;
391 struct ncsi_channel
*nc
, *tmp
;
394 /* Release all child channels */
395 list_for_each_entry_safe(nc
, tmp
, &np
->channels
, node
)
396 ncsi_remove_channel(nc
);
398 /* Remove and free package */
399 spin_lock_irqsave(&ndp
->lock
, flags
);
400 list_del_rcu(&np
->node
);
402 spin_unlock_irqrestore(&ndp
->lock
, flags
);
407 void ncsi_find_package_and_channel(struct ncsi_dev_priv
*ndp
,
409 struct ncsi_package
**np
,
410 struct ncsi_channel
**nc
)
412 struct ncsi_package
*p
;
413 struct ncsi_channel
*c
;
415 p
= ncsi_find_package(ndp
, NCSI_PACKAGE_INDEX(id
));
416 c
= p
? ncsi_find_channel(p
, NCSI_CHANNEL_INDEX(id
)) : NULL
;
424 /* For two consecutive NCSI commands, the packet IDs shouldn't
425 * be same. Otherwise, the bogus response might be replied. So
426 * the available IDs are allocated in round-robin fashion.
428 struct ncsi_request
*ncsi_alloc_request(struct ncsi_dev_priv
*ndp
,
429 unsigned int req_flags
)
431 struct ncsi_request
*nr
= NULL
;
432 int i
, limit
= ARRAY_SIZE(ndp
->requests
);
435 /* Check if there is one available request until the ceiling */
436 spin_lock_irqsave(&ndp
->lock
, flags
);
437 for (i
= ndp
->request_id
; i
< limit
; i
++) {
438 if (ndp
->requests
[i
].used
)
441 nr
= &ndp
->requests
[i
];
443 nr
->flags
= req_flags
;
444 ndp
->request_id
= i
+ 1;
448 /* Fail back to check from the starting cursor */
449 for (i
= NCSI_REQ_START_IDX
; i
< ndp
->request_id
; i
++) {
450 if (ndp
->requests
[i
].used
)
453 nr
= &ndp
->requests
[i
];
455 nr
->flags
= req_flags
;
456 ndp
->request_id
= i
+ 1;
461 spin_unlock_irqrestore(&ndp
->lock
, flags
);
465 void ncsi_free_request(struct ncsi_request
*nr
)
467 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
468 struct sk_buff
*cmd
, *rsp
;
474 del_timer_sync(&nr
->timer
);
477 spin_lock_irqsave(&ndp
->lock
, flags
);
483 driven
= !!(nr
->flags
& NCSI_REQ_FLAG_EVENT_DRIVEN
);
484 spin_unlock_irqrestore(&ndp
->lock
, flags
);
486 if (driven
&& cmd
&& --ndp
->pending_req_num
== 0)
487 schedule_work(&ndp
->work
);
489 /* Release command and response */
494 struct ncsi_dev
*ncsi_find_dev(struct net_device
*dev
)
496 struct ncsi_dev_priv
*ndp
;
498 NCSI_FOR_EACH_DEV(ndp
) {
499 if (ndp
->ndev
.dev
== dev
)
506 static void ncsi_request_timeout(unsigned long data
)
508 struct ncsi_request
*nr
= (struct ncsi_request
*)data
;
509 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
512 /* If the request already had associated response,
513 * let the response handler to release it.
515 spin_lock_irqsave(&ndp
->lock
, flags
);
517 if (nr
->rsp
|| !nr
->cmd
) {
518 spin_unlock_irqrestore(&ndp
->lock
, flags
);
521 spin_unlock_irqrestore(&ndp
->lock
, flags
);
523 /* Release the request */
524 ncsi_free_request(nr
);
527 static void ncsi_suspend_channel(struct ncsi_dev_priv
*ndp
)
529 struct ncsi_dev
*nd
= &ndp
->ndev
;
530 struct ncsi_package
*np
= ndp
->active_package
;
531 struct ncsi_channel
*nc
= ndp
->active_channel
;
532 struct ncsi_cmd_arg nca
;
537 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
539 case ncsi_dev_state_suspend
:
540 nd
->state
= ncsi_dev_state_suspend_select
;
542 case ncsi_dev_state_suspend_select
:
543 case ncsi_dev_state_suspend_dcnt
:
544 case ncsi_dev_state_suspend_dc
:
545 case ncsi_dev_state_suspend_deselect
:
546 ndp
->pending_req_num
= 1;
548 np
= ndp
->active_package
;
549 nc
= ndp
->active_channel
;
550 nca
.package
= np
->id
;
551 if (nd
->state
== ncsi_dev_state_suspend_select
) {
552 nca
.type
= NCSI_PKT_CMD_SP
;
553 nca
.channel
= NCSI_RESERVED_CHANNEL
;
554 if (ndp
->flags
& NCSI_DEV_HWA
)
558 nd
->state
= ncsi_dev_state_suspend_dcnt
;
559 } else if (nd
->state
== ncsi_dev_state_suspend_dcnt
) {
560 nca
.type
= NCSI_PKT_CMD_DCNT
;
561 nca
.channel
= nc
->id
;
562 nd
->state
= ncsi_dev_state_suspend_dc
;
563 } else if (nd
->state
== ncsi_dev_state_suspend_dc
) {
564 nca
.type
= NCSI_PKT_CMD_DC
;
565 nca
.channel
= nc
->id
;
567 nd
->state
= ncsi_dev_state_suspend_deselect
;
568 } else if (nd
->state
== ncsi_dev_state_suspend_deselect
) {
569 nca
.type
= NCSI_PKT_CMD_DP
;
570 nca
.channel
= NCSI_RESERVED_CHANNEL
;
571 nd
->state
= ncsi_dev_state_suspend_done
;
574 ret
= ncsi_xmit_cmd(&nca
);
576 nd
->state
= ncsi_dev_state_functional
;
581 case ncsi_dev_state_suspend_done
:
582 spin_lock_irqsave(&nc
->lock
, flags
);
583 nc
->state
= NCSI_CHANNEL_INACTIVE
;
584 spin_unlock_irqrestore(&nc
->lock
, flags
);
585 ncsi_process_next_channel(ndp
);
589 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in suspend\n",
594 static void ncsi_configure_channel(struct ncsi_dev_priv
*ndp
)
596 struct ncsi_dev
*nd
= &ndp
->ndev
;
597 struct net_device
*dev
= nd
->dev
;
598 struct ncsi_package
*np
= ndp
->active_package
;
599 struct ncsi_channel
*nc
= ndp
->active_channel
;
600 struct ncsi_cmd_arg nca
;
606 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
608 case ncsi_dev_state_config
:
609 case ncsi_dev_state_config_sp
:
610 ndp
->pending_req_num
= 1;
612 /* Select the specific package */
613 nca
.type
= NCSI_PKT_CMD_SP
;
614 if (ndp
->flags
& NCSI_DEV_HWA
)
618 nca
.package
= np
->id
;
619 nca
.channel
= NCSI_RESERVED_CHANNEL
;
620 ret
= ncsi_xmit_cmd(&nca
);
624 nd
->state
= ncsi_dev_state_config_cis
;
626 case ncsi_dev_state_config_cis
:
627 ndp
->pending_req_num
= 1;
629 /* Clear initial state */
630 nca
.type
= NCSI_PKT_CMD_CIS
;
631 nca
.package
= np
->id
;
632 nca
.channel
= nc
->id
;
633 ret
= ncsi_xmit_cmd(&nca
);
637 nd
->state
= ncsi_dev_state_config_sma
;
639 case ncsi_dev_state_config_sma
:
640 case ncsi_dev_state_config_ebf
:
641 #if IS_ENABLED(CONFIG_IPV6)
642 case ncsi_dev_state_config_egmf
:
644 case ncsi_dev_state_config_ecnt
:
645 case ncsi_dev_state_config_ec
:
646 case ncsi_dev_state_config_ae
:
647 case ncsi_dev_state_config_gls
:
648 ndp
->pending_req_num
= 1;
650 nca
.package
= np
->id
;
651 nca
.channel
= nc
->id
;
653 /* Use first entry in unicast filter table. Note that
654 * the MAC filter table starts from entry 1 instead of
657 if (nd
->state
== ncsi_dev_state_config_sma
) {
658 nca
.type
= NCSI_PKT_CMD_SMA
;
659 for (index
= 0; index
< 6; index
++)
660 nca
.bytes
[index
] = dev
->dev_addr
[index
];
663 nd
->state
= ncsi_dev_state_config_ebf
;
664 } else if (nd
->state
== ncsi_dev_state_config_ebf
) {
665 nca
.type
= NCSI_PKT_CMD_EBF
;
666 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_BC
].cap
;
667 nd
->state
= ncsi_dev_state_config_ecnt
;
668 #if IS_ENABLED(CONFIG_IPV6)
669 if (ndp
->inet6_addr_num
> 0 &&
670 (nc
->caps
[NCSI_CAP_GENERIC
].cap
&
671 NCSI_CAP_GENERIC_MC
))
672 nd
->state
= ncsi_dev_state_config_egmf
;
674 nd
->state
= ncsi_dev_state_config_ecnt
;
675 } else if (nd
->state
== ncsi_dev_state_config_egmf
) {
676 nca
.type
= NCSI_PKT_CMD_EGMF
;
677 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_MC
].cap
;
678 nd
->state
= ncsi_dev_state_config_ecnt
;
679 #endif /* CONFIG_IPV6 */
680 } else if (nd
->state
== ncsi_dev_state_config_ecnt
) {
681 nca
.type
= NCSI_PKT_CMD_ECNT
;
682 nd
->state
= ncsi_dev_state_config_ec
;
683 } else if (nd
->state
== ncsi_dev_state_config_ec
) {
684 /* Enable AEN if it's supported */
685 nca
.type
= NCSI_PKT_CMD_EC
;
686 nd
->state
= ncsi_dev_state_config_ae
;
687 if (!(nc
->caps
[NCSI_CAP_AEN
].cap
& NCSI_CAP_AEN_MASK
))
688 nd
->state
= ncsi_dev_state_config_gls
;
689 } else if (nd
->state
== ncsi_dev_state_config_ae
) {
690 nca
.type
= NCSI_PKT_CMD_AE
;
692 nca
.dwords
[1] = nc
->caps
[NCSI_CAP_AEN
].cap
;
693 nd
->state
= ncsi_dev_state_config_gls
;
694 } else if (nd
->state
== ncsi_dev_state_config_gls
) {
695 nca
.type
= NCSI_PKT_CMD_GLS
;
696 nd
->state
= ncsi_dev_state_config_done
;
699 ret
= ncsi_xmit_cmd(&nca
);
703 case ncsi_dev_state_config_done
:
704 spin_lock_irqsave(&nc
->lock
, flags
);
705 if (nc
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1)
706 nc
->state
= NCSI_CHANNEL_ACTIVE
;
708 nc
->state
= NCSI_CHANNEL_INACTIVE
;
709 spin_unlock_irqrestore(&nc
->lock
, flags
);
711 ncsi_start_channel_monitor(nc
);
712 ncsi_process_next_channel(ndp
);
715 netdev_warn(dev
, "Wrong NCSI state 0x%x in config\n",
722 ncsi_report_link(ndp
, true);
725 static int ncsi_choose_active_channel(struct ncsi_dev_priv
*ndp
)
727 struct ncsi_package
*np
;
728 struct ncsi_channel
*nc
, *found
;
729 struct ncsi_channel_mode
*ncm
;
732 /* The search is done once an inactive channel with up
736 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
737 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
738 spin_lock_irqsave(&nc
->lock
, flags
);
740 if (!list_empty(&nc
->link
) ||
741 nc
->state
!= NCSI_CHANNEL_INACTIVE
) {
742 spin_unlock_irqrestore(&nc
->lock
, flags
);
749 ncm
= &nc
->modes
[NCSI_MODE_LINK
];
750 if (ncm
->data
[2] & 0x1) {
751 spin_unlock_irqrestore(&nc
->lock
, flags
);
756 spin_unlock_irqrestore(&nc
->lock
, flags
);
761 ncsi_report_link(ndp
, true);
766 spin_lock_irqsave(&ndp
->lock
, flags
);
767 list_add_tail_rcu(&found
->link
, &ndp
->channel_queue
);
768 spin_unlock_irqrestore(&ndp
->lock
, flags
);
770 return ncsi_process_next_channel(ndp
);
773 static bool ncsi_check_hwa(struct ncsi_dev_priv
*ndp
)
775 struct ncsi_package
*np
;
776 struct ncsi_channel
*nc
;
779 /* The hardware arbitration is disabled if any one channel
780 * doesn't support explicitly.
782 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
783 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
784 cap
= nc
->caps
[NCSI_CAP_GENERIC
].cap
;
785 if (!(cap
& NCSI_CAP_GENERIC_HWA
) ||
786 (cap
& NCSI_CAP_GENERIC_HWA_MASK
) !=
787 NCSI_CAP_GENERIC_HWA_SUPPORT
) {
788 ndp
->flags
&= ~NCSI_DEV_HWA
;
794 ndp
->flags
|= NCSI_DEV_HWA
;
798 static int ncsi_enable_hwa(struct ncsi_dev_priv
*ndp
)
800 struct ncsi_package
*np
;
801 struct ncsi_channel
*nc
;
804 /* Move all available channels to processing queue */
805 spin_lock_irqsave(&ndp
->lock
, flags
);
806 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
807 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
808 WARN_ON_ONCE(nc
->state
!= NCSI_CHANNEL_INACTIVE
||
809 !list_empty(&nc
->link
));
810 ncsi_stop_channel_monitor(nc
);
811 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
814 spin_unlock_irqrestore(&ndp
->lock
, flags
);
816 /* We can have no channels in extremely case */
817 if (list_empty(&ndp
->channel_queue
)) {
818 ncsi_report_link(ndp
, false);
822 return ncsi_process_next_channel(ndp
);
825 static void ncsi_probe_channel(struct ncsi_dev_priv
*ndp
)
827 struct ncsi_dev
*nd
= &ndp
->ndev
;
828 struct ncsi_package
*np
;
829 struct ncsi_channel
*nc
;
830 struct ncsi_cmd_arg nca
;
835 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
837 case ncsi_dev_state_probe
:
838 nd
->state
= ncsi_dev_state_probe_deselect
;
840 case ncsi_dev_state_probe_deselect
:
841 ndp
->pending_req_num
= 8;
843 /* Deselect all possible packages */
844 nca
.type
= NCSI_PKT_CMD_DP
;
845 nca
.channel
= NCSI_RESERVED_CHANNEL
;
846 for (index
= 0; index
< 8; index
++) {
848 ret
= ncsi_xmit_cmd(&nca
);
853 nd
->state
= ncsi_dev_state_probe_package
;
855 case ncsi_dev_state_probe_package
:
856 ndp
->pending_req_num
= 16;
858 /* Select all possible packages */
859 nca
.type
= NCSI_PKT_CMD_SP
;
861 nca
.channel
= NCSI_RESERVED_CHANNEL
;
862 for (index
= 0; index
< 8; index
++) {
864 ret
= ncsi_xmit_cmd(&nca
);
869 /* Disable all possible packages */
870 nca
.type
= NCSI_PKT_CMD_DP
;
871 for (index
= 0; index
< 8; index
++) {
873 ret
= ncsi_xmit_cmd(&nca
);
878 nd
->state
= ncsi_dev_state_probe_channel
;
880 case ncsi_dev_state_probe_channel
:
881 if (!ndp
->active_package
)
882 ndp
->active_package
= list_first_or_null_rcu(
883 &ndp
->packages
, struct ncsi_package
, node
);
884 else if (list_is_last(&ndp
->active_package
->node
,
886 ndp
->active_package
= NULL
;
888 ndp
->active_package
= list_next_entry(
889 ndp
->active_package
, node
);
891 /* All available packages and channels are enumerated. The
892 * enumeration happens for once when the NCSI interface is
893 * started. So we need continue to start the interface after
896 * We have to choose an active channel before configuring it.
897 * Note that we possibly don't have active channel in extreme
900 if (!ndp
->active_package
) {
901 ndp
->flags
|= NCSI_DEV_PROBED
;
902 if (ncsi_check_hwa(ndp
))
903 ncsi_enable_hwa(ndp
);
905 ncsi_choose_active_channel(ndp
);
909 /* Select the active package */
910 ndp
->pending_req_num
= 1;
911 nca
.type
= NCSI_PKT_CMD_SP
;
913 nca
.package
= ndp
->active_package
->id
;
914 nca
.channel
= NCSI_RESERVED_CHANNEL
;
915 ret
= ncsi_xmit_cmd(&nca
);
919 nd
->state
= ncsi_dev_state_probe_cis
;
921 case ncsi_dev_state_probe_cis
:
922 ndp
->pending_req_num
= NCSI_RESERVED_CHANNEL
;
924 /* Clear initial state */
925 nca
.type
= NCSI_PKT_CMD_CIS
;
926 nca
.package
= ndp
->active_package
->id
;
927 for (index
= 0; index
< NCSI_RESERVED_CHANNEL
; index
++) {
929 ret
= ncsi_xmit_cmd(&nca
);
934 nd
->state
= ncsi_dev_state_probe_gvi
;
936 case ncsi_dev_state_probe_gvi
:
937 case ncsi_dev_state_probe_gc
:
938 case ncsi_dev_state_probe_gls
:
939 np
= ndp
->active_package
;
940 ndp
->pending_req_num
= np
->channel_num
;
942 /* Retrieve version, capability or link status */
943 if (nd
->state
== ncsi_dev_state_probe_gvi
)
944 nca
.type
= NCSI_PKT_CMD_GVI
;
945 else if (nd
->state
== ncsi_dev_state_probe_gc
)
946 nca
.type
= NCSI_PKT_CMD_GC
;
948 nca
.type
= NCSI_PKT_CMD_GLS
;
950 nca
.package
= np
->id
;
951 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
952 nca
.channel
= nc
->id
;
953 ret
= ncsi_xmit_cmd(&nca
);
958 if (nd
->state
== ncsi_dev_state_probe_gvi
)
959 nd
->state
= ncsi_dev_state_probe_gc
;
960 else if (nd
->state
== ncsi_dev_state_probe_gc
)
961 nd
->state
= ncsi_dev_state_probe_gls
;
963 nd
->state
= ncsi_dev_state_probe_dp
;
965 case ncsi_dev_state_probe_dp
:
966 ndp
->pending_req_num
= 1;
968 /* Deselect the active package */
969 nca
.type
= NCSI_PKT_CMD_DP
;
970 nca
.package
= ndp
->active_package
->id
;
971 nca
.channel
= NCSI_RESERVED_CHANNEL
;
972 ret
= ncsi_xmit_cmd(&nca
);
976 /* Scan channels in next package */
977 nd
->state
= ncsi_dev_state_probe_channel
;
980 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%0x in enumeration\n",
986 ncsi_report_link(ndp
, true);
989 static void ncsi_dev_work(struct work_struct
*work
)
991 struct ncsi_dev_priv
*ndp
= container_of(work
,
992 struct ncsi_dev_priv
, work
);
993 struct ncsi_dev
*nd
= &ndp
->ndev
;
995 switch (nd
->state
& ncsi_dev_state_major
) {
996 case ncsi_dev_state_probe
:
997 ncsi_probe_channel(ndp
);
999 case ncsi_dev_state_suspend
:
1000 ncsi_suspend_channel(ndp
);
1002 case ncsi_dev_state_config
:
1003 ncsi_configure_channel(ndp
);
1006 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in workqueue\n",
1011 int ncsi_process_next_channel(struct ncsi_dev_priv
*ndp
)
1013 struct ncsi_channel
*nc
;
1015 unsigned long flags
;
1017 spin_lock_irqsave(&ndp
->lock
, flags
);
1018 nc
= list_first_or_null_rcu(&ndp
->channel_queue
,
1019 struct ncsi_channel
, link
);
1021 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1025 list_del_init(&nc
->link
);
1026 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1028 spin_lock_irqsave(&nc
->lock
, flags
);
1029 old_state
= nc
->state
;
1030 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
1031 spin_unlock_irqrestore(&nc
->lock
, flags
);
1033 ndp
->active_channel
= nc
;
1034 ndp
->active_package
= nc
->package
;
1036 switch (old_state
) {
1037 case NCSI_CHANNEL_INACTIVE
:
1038 ndp
->ndev
.state
= ncsi_dev_state_config
;
1039 ncsi_configure_channel(ndp
);
1041 case NCSI_CHANNEL_ACTIVE
:
1042 ndp
->ndev
.state
= ncsi_dev_state_suspend
;
1043 ncsi_suspend_channel(ndp
);
1046 netdev_err(ndp
->ndev
.dev
, "Invalid state 0x%x on %d:%d\n",
1047 old_state
, nc
->package
->id
, nc
->id
);
1048 ncsi_report_link(ndp
, false);
1055 ndp
->active_channel
= NULL
;
1056 ndp
->active_package
= NULL
;
1057 if (ndp
->flags
& NCSI_DEV_RESHUFFLE
) {
1058 ndp
->flags
&= ~NCSI_DEV_RESHUFFLE
;
1059 return ncsi_choose_active_channel(ndp
);
1062 ncsi_report_link(ndp
, false);
1066 #if IS_ENABLED(CONFIG_IPV6)
1067 static int ncsi_inet6addr_event(struct notifier_block
*this,
1068 unsigned long event
, void *data
)
1070 struct inet6_ifaddr
*ifa
= data
;
1071 struct net_device
*dev
= ifa
->idev
->dev
;
1072 struct ncsi_dev
*nd
= ncsi_find_dev(dev
);
1073 struct ncsi_dev_priv
*ndp
= nd
? TO_NCSI_DEV_PRIV(nd
) : NULL
;
1074 struct ncsi_package
*np
;
1075 struct ncsi_channel
*nc
;
1076 struct ncsi_cmd_arg nca
;
1080 if (!ndp
|| (ipv6_addr_type(&ifa
->addr
) &
1081 (IPV6_ADDR_LINKLOCAL
| IPV6_ADDR_LOOPBACK
)))
1086 action
= (++ndp
->inet6_addr_num
) == 1;
1087 nca
.type
= NCSI_PKT_CMD_EGMF
;
1090 action
= (--ndp
->inet6_addr_num
== 0);
1091 nca
.type
= NCSI_PKT_CMD_DGMF
;
1097 /* We might not have active channel or packages. The IPv6
1098 * required multicast will be enabled when active channel
1099 * or packages are chosen.
1101 np
= ndp
->active_package
;
1102 nc
= ndp
->active_channel
;
1103 if (!action
|| !np
|| !nc
)
1106 /* We needn't enable or disable it if the function isn't supported */
1107 if (!(nc
->caps
[NCSI_CAP_GENERIC
].cap
& NCSI_CAP_GENERIC_MC
))
1112 nca
.package
= np
->id
;
1113 nca
.channel
= nc
->id
;
1114 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_MC
].cap
;
1115 ret
= ncsi_xmit_cmd(&nca
);
1117 netdev_warn(dev
, "Fail to %s global multicast filter (%d)\n",
1118 (event
== NETDEV_UP
) ? "enable" : "disable", ret
);
1125 static struct notifier_block ncsi_inet6addr_notifier
= {
1126 .notifier_call
= ncsi_inet6addr_event
,
1128 #endif /* CONFIG_IPV6 */
1130 struct ncsi_dev
*ncsi_register_dev(struct net_device
*dev
,
1131 void (*handler
)(struct ncsi_dev
*ndev
))
1133 struct ncsi_dev_priv
*ndp
;
1134 struct ncsi_dev
*nd
;
1135 unsigned long flags
;
1138 /* Check if the device has been registered or not */
1139 nd
= ncsi_find_dev(dev
);
1143 /* Create NCSI device */
1144 ndp
= kzalloc(sizeof(*ndp
), GFP_ATOMIC
);
1149 nd
->state
= ncsi_dev_state_registered
;
1151 nd
->handler
= handler
;
1152 ndp
->pending_req_num
= 0;
1153 INIT_LIST_HEAD(&ndp
->channel_queue
);
1154 INIT_WORK(&ndp
->work
, ncsi_dev_work
);
1156 /* Initialize private NCSI device */
1157 spin_lock_init(&ndp
->lock
);
1158 INIT_LIST_HEAD(&ndp
->packages
);
1159 ndp
->request_id
= NCSI_REQ_START_IDX
;
1160 for (i
= 0; i
< ARRAY_SIZE(ndp
->requests
); i
++) {
1161 ndp
->requests
[i
].id
= i
;
1162 ndp
->requests
[i
].ndp
= ndp
;
1163 setup_timer(&ndp
->requests
[i
].timer
,
1164 ncsi_request_timeout
,
1165 (unsigned long)&ndp
->requests
[i
]);
1168 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1169 #if IS_ENABLED(CONFIG_IPV6)
1170 ndp
->inet6_addr_num
= 0;
1171 if (list_empty(&ncsi_dev_list
))
1172 register_inet6addr_notifier(&ncsi_inet6addr_notifier
);
1174 list_add_tail_rcu(&ndp
->node
, &ncsi_dev_list
);
1175 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1177 /* Register NCSI packet Rx handler */
1178 ndp
->ptype
.type
= cpu_to_be16(ETH_P_NCSI
);
1179 ndp
->ptype
.func
= ncsi_rcv_rsp
;
1180 ndp
->ptype
.dev
= dev
;
1181 dev_add_pack(&ndp
->ptype
);
1185 EXPORT_SYMBOL_GPL(ncsi_register_dev
);
1187 int ncsi_start_dev(struct ncsi_dev
*nd
)
1189 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1190 struct ncsi_package
*np
;
1191 struct ncsi_channel
*nc
;
1192 unsigned long flags
;
1196 if (nd
->state
!= ncsi_dev_state_registered
&&
1197 nd
->state
!= ncsi_dev_state_functional
)
1200 if (!(ndp
->flags
& NCSI_DEV_PROBED
)) {
1201 nd
->state
= ncsi_dev_state_probe
;
1202 schedule_work(&ndp
->work
);
1206 /* Reset channel's state and start over */
1207 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1208 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1209 spin_lock_irqsave(&nc
->lock
, flags
);
1210 chained
= !list_empty(&nc
->link
);
1211 old_state
= nc
->state
;
1212 nc
->state
= NCSI_CHANNEL_INACTIVE
;
1213 spin_unlock_irqrestore(&nc
->lock
, flags
);
1215 WARN_ON_ONCE(chained
||
1216 old_state
== NCSI_CHANNEL_INVISIBLE
);
1220 if (ndp
->flags
& NCSI_DEV_HWA
)
1221 ret
= ncsi_enable_hwa(ndp
);
1223 ret
= ncsi_choose_active_channel(ndp
);
1227 EXPORT_SYMBOL_GPL(ncsi_start_dev
);
1229 void ncsi_unregister_dev(struct ncsi_dev
*nd
)
1231 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1232 struct ncsi_package
*np
, *tmp
;
1233 unsigned long flags
;
1235 dev_remove_pack(&ndp
->ptype
);
1237 list_for_each_entry_safe(np
, tmp
, &ndp
->packages
, node
)
1238 ncsi_remove_package(np
);
1240 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1241 list_del_rcu(&ndp
->node
);
1242 #if IS_ENABLED(CONFIG_IPV6)
1243 if (list_empty(&ncsi_dev_list
))
1244 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier
);
1246 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1250 EXPORT_SYMBOL_GPL(ncsi_unregister_dev
);