2 * Copyright Gavin Shan, IBM Corporation 2016.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
18 #include <net/net_namespace.h>
20 #include <net/addrconf.h>
22 #include <net/if_inet6.h>
27 LIST_HEAD(ncsi_dev_list
);
28 DEFINE_SPINLOCK(ncsi_dev_lock
);
30 static inline int ncsi_filter_size(int table
)
32 int sizes
[] = { 2, 6, 6, 6 };
34 BUILD_BUG_ON(ARRAY_SIZE(sizes
) != NCSI_FILTER_MAX
);
35 if (table
< NCSI_FILTER_BASE
|| table
>= NCSI_FILTER_MAX
)
41 int ncsi_find_filter(struct ncsi_channel
*nc
, int table
, void *data
)
43 struct ncsi_channel_filter
*ncf
;
48 ncf
= nc
->filters
[table
];
52 size
= ncsi_filter_size(table
);
56 spin_lock_irqsave(&nc
->lock
, flags
);
57 bitmap
= (void *)&ncf
->bitmap
;
59 while ((index
= find_next_bit(bitmap
, ncf
->total
, index
+ 1))
61 if (!memcmp(ncf
->data
+ size
* index
, data
, size
)) {
62 spin_unlock_irqrestore(&nc
->lock
, flags
);
66 spin_unlock_irqrestore(&nc
->lock
, flags
);
71 int ncsi_add_filter(struct ncsi_channel
*nc
, int table
, void *data
)
73 struct ncsi_channel_filter
*ncf
;
78 size
= ncsi_filter_size(table
);
82 index
= ncsi_find_filter(nc
, table
, data
);
86 ncf
= nc
->filters
[table
];
90 spin_lock_irqsave(&nc
->lock
, flags
);
91 bitmap
= (void *)&ncf
->bitmap
;
93 index
= find_next_zero_bit(bitmap
, ncf
->total
, 0);
94 if (index
>= ncf
->total
) {
95 spin_unlock_irqrestore(&nc
->lock
, flags
);
98 } while (test_and_set_bit(index
, bitmap
));
100 memcpy(ncf
->data
+ size
* index
, data
, size
);
101 spin_unlock_irqrestore(&nc
->lock
, flags
);
106 int ncsi_remove_filter(struct ncsi_channel
*nc
, int table
, int index
)
108 struct ncsi_channel_filter
*ncf
;
113 size
= ncsi_filter_size(table
);
117 ncf
= nc
->filters
[table
];
118 if (!ncf
|| index
>= ncf
->total
)
121 spin_lock_irqsave(&nc
->lock
, flags
);
122 bitmap
= (void *)&ncf
->bitmap
;
123 if (test_and_clear_bit(index
, bitmap
))
124 memset(ncf
->data
+ size
* index
, 0, size
);
125 spin_unlock_irqrestore(&nc
->lock
, flags
);
130 static void ncsi_report_link(struct ncsi_dev_priv
*ndp
, bool force_down
)
132 struct ncsi_dev
*nd
= &ndp
->ndev
;
133 struct ncsi_package
*np
;
134 struct ncsi_channel
*nc
;
137 nd
->state
= ncsi_dev_state_functional
;
144 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
145 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
146 spin_lock_irqsave(&nc
->lock
, flags
);
148 if (!list_empty(&nc
->link
) ||
149 nc
->state
!= NCSI_CHANNEL_ACTIVE
) {
150 spin_unlock_irqrestore(&nc
->lock
, flags
);
154 if (nc
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1) {
155 spin_unlock_irqrestore(&nc
->lock
, flags
);
160 spin_unlock_irqrestore(&nc
->lock
, flags
);
168 static void ncsi_channel_monitor(unsigned long data
)
170 struct ncsi_channel
*nc
= (struct ncsi_channel
*)data
;
171 struct ncsi_package
*np
= nc
->package
;
172 struct ncsi_dev_priv
*ndp
= np
->ndp
;
173 struct ncsi_cmd_arg nca
;
174 bool enabled
, chained
;
175 unsigned int timeout
;
179 spin_lock_irqsave(&nc
->lock
, flags
);
181 chained
= !list_empty(&nc
->link
);
182 timeout
= nc
->timeout
;
183 enabled
= nc
->enabled
;
184 spin_unlock_irqrestore(&nc
->lock
, flags
);
186 if (!enabled
|| chained
)
188 if (state
!= NCSI_CHANNEL_INACTIVE
&&
189 state
!= NCSI_CHANNEL_ACTIVE
)
192 if (!(timeout
% 2)) {
194 nca
.package
= np
->id
;
195 nca
.channel
= nc
->id
;
196 nca
.type
= NCSI_PKT_CMD_GLS
;
198 ret
= ncsi_xmit_cmd(&nca
);
200 netdev_err(ndp
->ndev
.dev
, "Error %d sending GLS\n",
206 if (timeout
+ 1 >= 3) {
207 if (!(ndp
->flags
& NCSI_DEV_HWA
) &&
208 state
== NCSI_CHANNEL_ACTIVE
)
209 ncsi_report_link(ndp
, true);
211 spin_lock_irqsave(&nc
->lock
, flags
);
212 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
213 spin_unlock_irqrestore(&nc
->lock
, flags
);
215 spin_lock_irqsave(&ndp
->lock
, flags
);
216 nc
->state
= NCSI_CHANNEL_INACTIVE
;
217 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
218 spin_unlock_irqrestore(&ndp
->lock
, flags
);
219 ncsi_process_next_channel(ndp
);
223 spin_lock_irqsave(&nc
->lock
, flags
);
224 nc
->timeout
= timeout
+ 1;
226 spin_unlock_irqrestore(&nc
->lock
, flags
);
227 mod_timer(&nc
->timer
, jiffies
+ HZ
* (1 << (nc
->timeout
/ 2)));
230 void ncsi_start_channel_monitor(struct ncsi_channel
*nc
)
234 spin_lock_irqsave(&nc
->lock
, flags
);
235 WARN_ON_ONCE(nc
->enabled
);
238 spin_unlock_irqrestore(&nc
->lock
, flags
);
240 mod_timer(&nc
->timer
, jiffies
+ HZ
* (1 << (nc
->timeout
/ 2)));
243 void ncsi_stop_channel_monitor(struct ncsi_channel
*nc
)
247 spin_lock_irqsave(&nc
->lock
, flags
);
249 spin_unlock_irqrestore(&nc
->lock
, flags
);
253 spin_unlock_irqrestore(&nc
->lock
, flags
);
255 del_timer_sync(&nc
->timer
);
258 struct ncsi_channel
*ncsi_find_channel(struct ncsi_package
*np
,
261 struct ncsi_channel
*nc
;
263 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
271 struct ncsi_channel
*ncsi_add_channel(struct ncsi_package
*np
, unsigned char id
)
273 struct ncsi_channel
*nc
, *tmp
;
277 nc
= kzalloc(sizeof(*nc
), GFP_ATOMIC
);
283 nc
->state
= NCSI_CHANNEL_INACTIVE
;
285 setup_timer(&nc
->timer
, ncsi_channel_monitor
, (unsigned long)nc
);
286 spin_lock_init(&nc
->lock
);
287 INIT_LIST_HEAD(&nc
->link
);
288 for (index
= 0; index
< NCSI_CAP_MAX
; index
++)
289 nc
->caps
[index
].index
= index
;
290 for (index
= 0; index
< NCSI_MODE_MAX
; index
++)
291 nc
->modes
[index
].index
= index
;
293 spin_lock_irqsave(&np
->lock
, flags
);
294 tmp
= ncsi_find_channel(np
, id
);
296 spin_unlock_irqrestore(&np
->lock
, flags
);
301 list_add_tail_rcu(&nc
->node
, &np
->channels
);
303 spin_unlock_irqrestore(&np
->lock
, flags
);
308 static void ncsi_remove_channel(struct ncsi_channel
*nc
)
310 struct ncsi_package
*np
= nc
->package
;
311 struct ncsi_channel_filter
*ncf
;
315 /* Release filters */
316 spin_lock_irqsave(&nc
->lock
, flags
);
317 for (i
= 0; i
< NCSI_FILTER_MAX
; i
++) {
318 ncf
= nc
->filters
[i
];
322 nc
->filters
[i
] = NULL
;
326 nc
->state
= NCSI_CHANNEL_INACTIVE
;
327 spin_unlock_irqrestore(&nc
->lock
, flags
);
328 ncsi_stop_channel_monitor(nc
);
330 /* Remove and free channel */
331 spin_lock_irqsave(&np
->lock
, flags
);
332 list_del_rcu(&nc
->node
);
334 spin_unlock_irqrestore(&np
->lock
, flags
);
339 struct ncsi_package
*ncsi_find_package(struct ncsi_dev_priv
*ndp
,
342 struct ncsi_package
*np
;
344 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
352 struct ncsi_package
*ncsi_add_package(struct ncsi_dev_priv
*ndp
,
355 struct ncsi_package
*np
, *tmp
;
358 np
= kzalloc(sizeof(*np
), GFP_ATOMIC
);
364 spin_lock_init(&np
->lock
);
365 INIT_LIST_HEAD(&np
->channels
);
367 spin_lock_irqsave(&ndp
->lock
, flags
);
368 tmp
= ncsi_find_package(ndp
, id
);
370 spin_unlock_irqrestore(&ndp
->lock
, flags
);
375 list_add_tail_rcu(&np
->node
, &ndp
->packages
);
377 spin_unlock_irqrestore(&ndp
->lock
, flags
);
382 void ncsi_remove_package(struct ncsi_package
*np
)
384 struct ncsi_dev_priv
*ndp
= np
->ndp
;
385 struct ncsi_channel
*nc
, *tmp
;
388 /* Release all child channels */
389 list_for_each_entry_safe(nc
, tmp
, &np
->channels
, node
)
390 ncsi_remove_channel(nc
);
392 /* Remove and free package */
393 spin_lock_irqsave(&ndp
->lock
, flags
);
394 list_del_rcu(&np
->node
);
396 spin_unlock_irqrestore(&ndp
->lock
, flags
);
401 void ncsi_find_package_and_channel(struct ncsi_dev_priv
*ndp
,
403 struct ncsi_package
**np
,
404 struct ncsi_channel
**nc
)
406 struct ncsi_package
*p
;
407 struct ncsi_channel
*c
;
409 p
= ncsi_find_package(ndp
, NCSI_PACKAGE_INDEX(id
));
410 c
= p
? ncsi_find_channel(p
, NCSI_CHANNEL_INDEX(id
)) : NULL
;
418 /* For two consecutive NCSI commands, the packet IDs shouldn't
419 * be same. Otherwise, the bogus response might be replied. So
420 * the available IDs are allocated in round-robin fashion.
422 struct ncsi_request
*ncsi_alloc_request(struct ncsi_dev_priv
*ndp
, bool driven
)
424 struct ncsi_request
*nr
= NULL
;
425 int i
, limit
= ARRAY_SIZE(ndp
->requests
);
428 /* Check if there is one available request until the ceiling */
429 spin_lock_irqsave(&ndp
->lock
, flags
);
430 for (i
= ndp
->request_id
; i
< limit
; i
++) {
431 if (ndp
->requests
[i
].used
)
434 nr
= &ndp
->requests
[i
];
437 ndp
->request_id
= i
+ 1;
441 /* Fail back to check from the starting cursor */
442 for (i
= NCSI_REQ_START_IDX
; i
< ndp
->request_id
; i
++) {
443 if (ndp
->requests
[i
].used
)
446 nr
= &ndp
->requests
[i
];
449 ndp
->request_id
= i
+ 1;
454 spin_unlock_irqrestore(&ndp
->lock
, flags
);
458 void ncsi_free_request(struct ncsi_request
*nr
)
460 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
461 struct sk_buff
*cmd
, *rsp
;
467 del_timer_sync(&nr
->timer
);
470 spin_lock_irqsave(&ndp
->lock
, flags
);
477 spin_unlock_irqrestore(&ndp
->lock
, flags
);
479 if (driven
&& cmd
&& --ndp
->pending_req_num
== 0)
480 schedule_work(&ndp
->work
);
482 /* Release command and response */
487 struct ncsi_dev
*ncsi_find_dev(struct net_device
*dev
)
489 struct ncsi_dev_priv
*ndp
;
491 NCSI_FOR_EACH_DEV(ndp
) {
492 if (ndp
->ndev
.dev
== dev
)
499 static void ncsi_request_timeout(unsigned long data
)
501 struct ncsi_request
*nr
= (struct ncsi_request
*)data
;
502 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
505 /* If the request already had associated response,
506 * let the response handler to release it.
508 spin_lock_irqsave(&ndp
->lock
, flags
);
510 if (nr
->rsp
|| !nr
->cmd
) {
511 spin_unlock_irqrestore(&ndp
->lock
, flags
);
514 spin_unlock_irqrestore(&ndp
->lock
, flags
);
516 /* Release the request */
517 ncsi_free_request(nr
);
520 static void ncsi_suspend_channel(struct ncsi_dev_priv
*ndp
)
522 struct ncsi_dev
*nd
= &ndp
->ndev
;
523 struct ncsi_package
*np
= ndp
->active_package
;
524 struct ncsi_channel
*nc
= ndp
->active_channel
;
525 struct ncsi_cmd_arg nca
;
532 case ncsi_dev_state_suspend
:
533 nd
->state
= ncsi_dev_state_suspend_select
;
535 case ncsi_dev_state_suspend_select
:
536 case ncsi_dev_state_suspend_dcnt
:
537 case ncsi_dev_state_suspend_dc
:
538 case ncsi_dev_state_suspend_deselect
:
539 ndp
->pending_req_num
= 1;
541 np
= ndp
->active_package
;
542 nc
= ndp
->active_channel
;
543 nca
.package
= np
->id
;
544 if (nd
->state
== ncsi_dev_state_suspend_select
) {
545 nca
.type
= NCSI_PKT_CMD_SP
;
546 nca
.channel
= NCSI_RESERVED_CHANNEL
;
547 if (ndp
->flags
& NCSI_DEV_HWA
)
551 nd
->state
= ncsi_dev_state_suspend_dcnt
;
552 } else if (nd
->state
== ncsi_dev_state_suspend_dcnt
) {
553 nca
.type
= NCSI_PKT_CMD_DCNT
;
554 nca
.channel
= nc
->id
;
555 nd
->state
= ncsi_dev_state_suspend_dc
;
556 } else if (nd
->state
== ncsi_dev_state_suspend_dc
) {
557 nca
.type
= NCSI_PKT_CMD_DC
;
558 nca
.channel
= nc
->id
;
560 nd
->state
= ncsi_dev_state_suspend_deselect
;
561 } else if (nd
->state
== ncsi_dev_state_suspend_deselect
) {
562 nca
.type
= NCSI_PKT_CMD_DP
;
563 nca
.channel
= NCSI_RESERVED_CHANNEL
;
564 nd
->state
= ncsi_dev_state_suspend_done
;
567 ret
= ncsi_xmit_cmd(&nca
);
569 nd
->state
= ncsi_dev_state_functional
;
574 case ncsi_dev_state_suspend_done
:
575 spin_lock_irqsave(&nc
->lock
, flags
);
576 nc
->state
= NCSI_CHANNEL_INACTIVE
;
577 spin_unlock_irqrestore(&nc
->lock
, flags
);
578 ncsi_process_next_channel(ndp
);
582 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in suspend\n",
587 static void ncsi_configure_channel(struct ncsi_dev_priv
*ndp
)
589 struct ncsi_dev
*nd
= &ndp
->ndev
;
590 struct net_device
*dev
= nd
->dev
;
591 struct ncsi_package
*np
= ndp
->active_package
;
592 struct ncsi_channel
*nc
= ndp
->active_channel
;
593 struct ncsi_cmd_arg nca
;
601 case ncsi_dev_state_config
:
602 case ncsi_dev_state_config_sp
:
603 ndp
->pending_req_num
= 1;
605 /* Select the specific package */
606 nca
.type
= NCSI_PKT_CMD_SP
;
607 if (ndp
->flags
& NCSI_DEV_HWA
)
611 nca
.package
= np
->id
;
612 nca
.channel
= NCSI_RESERVED_CHANNEL
;
613 ret
= ncsi_xmit_cmd(&nca
);
617 nd
->state
= ncsi_dev_state_config_cis
;
619 case ncsi_dev_state_config_cis
:
620 ndp
->pending_req_num
= 1;
622 /* Clear initial state */
623 nca
.type
= NCSI_PKT_CMD_CIS
;
624 nca
.package
= np
->id
;
625 nca
.channel
= nc
->id
;
626 ret
= ncsi_xmit_cmd(&nca
);
630 nd
->state
= ncsi_dev_state_config_sma
;
632 case ncsi_dev_state_config_sma
:
633 case ncsi_dev_state_config_ebf
:
634 #if IS_ENABLED(CONFIG_IPV6)
635 case ncsi_dev_state_config_egmf
:
637 case ncsi_dev_state_config_ecnt
:
638 case ncsi_dev_state_config_ec
:
639 case ncsi_dev_state_config_ae
:
640 case ncsi_dev_state_config_gls
:
641 ndp
->pending_req_num
= 1;
643 nca
.package
= np
->id
;
644 nca
.channel
= nc
->id
;
646 /* Use first entry in unicast filter table. Note that
647 * the MAC filter table starts from entry 1 instead of
650 if (nd
->state
== ncsi_dev_state_config_sma
) {
651 nca
.type
= NCSI_PKT_CMD_SMA
;
652 for (index
= 0; index
< 6; index
++)
653 nca
.bytes
[index
] = dev
->dev_addr
[index
];
656 nd
->state
= ncsi_dev_state_config_ebf
;
657 } else if (nd
->state
== ncsi_dev_state_config_ebf
) {
658 nca
.type
= NCSI_PKT_CMD_EBF
;
659 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_BC
].cap
;
660 nd
->state
= ncsi_dev_state_config_ecnt
;
661 #if IS_ENABLED(CONFIG_IPV6)
662 if (ndp
->inet6_addr_num
> 0 &&
663 (nc
->caps
[NCSI_CAP_GENERIC
].cap
&
664 NCSI_CAP_GENERIC_MC
))
665 nd
->state
= ncsi_dev_state_config_egmf
;
667 nd
->state
= ncsi_dev_state_config_ecnt
;
668 } else if (nd
->state
== ncsi_dev_state_config_egmf
) {
669 nca
.type
= NCSI_PKT_CMD_EGMF
;
670 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_MC
].cap
;
671 nd
->state
= ncsi_dev_state_config_ecnt
;
672 #endif /* CONFIG_IPV6 */
673 } else if (nd
->state
== ncsi_dev_state_config_ecnt
) {
674 nca
.type
= NCSI_PKT_CMD_ECNT
;
675 nd
->state
= ncsi_dev_state_config_ec
;
676 } else if (nd
->state
== ncsi_dev_state_config_ec
) {
677 /* Enable AEN if it's supported */
678 nca
.type
= NCSI_PKT_CMD_EC
;
679 nd
->state
= ncsi_dev_state_config_ae
;
680 if (!(nc
->caps
[NCSI_CAP_AEN
].cap
& NCSI_CAP_AEN_MASK
))
681 nd
->state
= ncsi_dev_state_config_gls
;
682 } else if (nd
->state
== ncsi_dev_state_config_ae
) {
683 nca
.type
= NCSI_PKT_CMD_AE
;
685 nca
.dwords
[1] = nc
->caps
[NCSI_CAP_AEN
].cap
;
686 nd
->state
= ncsi_dev_state_config_gls
;
687 } else if (nd
->state
== ncsi_dev_state_config_gls
) {
688 nca
.type
= NCSI_PKT_CMD_GLS
;
689 nd
->state
= ncsi_dev_state_config_done
;
692 ret
= ncsi_xmit_cmd(&nca
);
696 case ncsi_dev_state_config_done
:
697 spin_lock_irqsave(&nc
->lock
, flags
);
698 if (nc
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1)
699 nc
->state
= NCSI_CHANNEL_ACTIVE
;
701 nc
->state
= NCSI_CHANNEL_INACTIVE
;
702 spin_unlock_irqrestore(&nc
->lock
, flags
);
704 ncsi_start_channel_monitor(nc
);
705 ncsi_process_next_channel(ndp
);
708 netdev_warn(dev
, "Wrong NCSI state 0x%x in config\n",
715 ncsi_report_link(ndp
, true);
718 static int ncsi_choose_active_channel(struct ncsi_dev_priv
*ndp
)
720 struct ncsi_package
*np
;
721 struct ncsi_channel
*nc
, *found
;
722 struct ncsi_channel_mode
*ncm
;
725 /* The search is done once an inactive channel with up
729 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
730 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
731 spin_lock_irqsave(&nc
->lock
, flags
);
733 if (!list_empty(&nc
->link
) ||
734 nc
->state
!= NCSI_CHANNEL_INACTIVE
) {
735 spin_unlock_irqrestore(&nc
->lock
, flags
);
742 ncm
= &nc
->modes
[NCSI_MODE_LINK
];
743 if (ncm
->data
[2] & 0x1) {
744 spin_unlock_irqrestore(&nc
->lock
, flags
);
749 spin_unlock_irqrestore(&nc
->lock
, flags
);
754 ncsi_report_link(ndp
, true);
759 spin_lock_irqsave(&ndp
->lock
, flags
);
760 list_add_tail_rcu(&found
->link
, &ndp
->channel_queue
);
761 spin_unlock_irqrestore(&ndp
->lock
, flags
);
763 return ncsi_process_next_channel(ndp
);
766 static bool ncsi_check_hwa(struct ncsi_dev_priv
*ndp
)
768 struct ncsi_package
*np
;
769 struct ncsi_channel
*nc
;
772 /* The hardware arbitration is disabled if any one channel
773 * doesn't support explicitly.
775 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
776 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
777 cap
= nc
->caps
[NCSI_CAP_GENERIC
].cap
;
778 if (!(cap
& NCSI_CAP_GENERIC_HWA
) ||
779 (cap
& NCSI_CAP_GENERIC_HWA_MASK
) !=
780 NCSI_CAP_GENERIC_HWA_SUPPORT
) {
781 ndp
->flags
&= ~NCSI_DEV_HWA
;
787 ndp
->flags
|= NCSI_DEV_HWA
;
791 static int ncsi_enable_hwa(struct ncsi_dev_priv
*ndp
)
793 struct ncsi_package
*np
;
794 struct ncsi_channel
*nc
;
797 /* Move all available channels to processing queue */
798 spin_lock_irqsave(&ndp
->lock
, flags
);
799 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
800 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
801 WARN_ON_ONCE(nc
->state
!= NCSI_CHANNEL_INACTIVE
||
802 !list_empty(&nc
->link
));
803 ncsi_stop_channel_monitor(nc
);
804 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
807 spin_unlock_irqrestore(&ndp
->lock
, flags
);
809 /* We can have no channels in extremely case */
810 if (list_empty(&ndp
->channel_queue
)) {
811 ncsi_report_link(ndp
, false);
815 return ncsi_process_next_channel(ndp
);
818 static void ncsi_probe_channel(struct ncsi_dev_priv
*ndp
)
820 struct ncsi_dev
*nd
= &ndp
->ndev
;
821 struct ncsi_package
*np
;
822 struct ncsi_channel
*nc
;
823 struct ncsi_cmd_arg nca
;
830 case ncsi_dev_state_probe
:
831 nd
->state
= ncsi_dev_state_probe_deselect
;
833 case ncsi_dev_state_probe_deselect
:
834 ndp
->pending_req_num
= 8;
836 /* Deselect all possible packages */
837 nca
.type
= NCSI_PKT_CMD_DP
;
838 nca
.channel
= NCSI_RESERVED_CHANNEL
;
839 for (index
= 0; index
< 8; index
++) {
841 ret
= ncsi_xmit_cmd(&nca
);
846 nd
->state
= ncsi_dev_state_probe_package
;
848 case ncsi_dev_state_probe_package
:
849 ndp
->pending_req_num
= 16;
851 /* Select all possible packages */
852 nca
.type
= NCSI_PKT_CMD_SP
;
854 nca
.channel
= NCSI_RESERVED_CHANNEL
;
855 for (index
= 0; index
< 8; index
++) {
857 ret
= ncsi_xmit_cmd(&nca
);
862 /* Disable all possible packages */
863 nca
.type
= NCSI_PKT_CMD_DP
;
864 for (index
= 0; index
< 8; index
++) {
866 ret
= ncsi_xmit_cmd(&nca
);
871 nd
->state
= ncsi_dev_state_probe_channel
;
873 case ncsi_dev_state_probe_channel
:
874 if (!ndp
->active_package
)
875 ndp
->active_package
= list_first_or_null_rcu(
876 &ndp
->packages
, struct ncsi_package
, node
);
877 else if (list_is_last(&ndp
->active_package
->node
,
879 ndp
->active_package
= NULL
;
881 ndp
->active_package
= list_next_entry(
882 ndp
->active_package
, node
);
884 /* All available packages and channels are enumerated. The
885 * enumeration happens for once when the NCSI interface is
886 * started. So we need continue to start the interface after
889 * We have to choose an active channel before configuring it.
890 * Note that we possibly don't have active channel in extreme
893 if (!ndp
->active_package
) {
894 ndp
->flags
|= NCSI_DEV_PROBED
;
895 if (ncsi_check_hwa(ndp
))
896 ncsi_enable_hwa(ndp
);
898 ncsi_choose_active_channel(ndp
);
902 /* Select the active package */
903 ndp
->pending_req_num
= 1;
904 nca
.type
= NCSI_PKT_CMD_SP
;
906 nca
.package
= ndp
->active_package
->id
;
907 nca
.channel
= NCSI_RESERVED_CHANNEL
;
908 ret
= ncsi_xmit_cmd(&nca
);
912 nd
->state
= ncsi_dev_state_probe_cis
;
914 case ncsi_dev_state_probe_cis
:
915 ndp
->pending_req_num
= NCSI_RESERVED_CHANNEL
;
917 /* Clear initial state */
918 nca
.type
= NCSI_PKT_CMD_CIS
;
919 nca
.package
= ndp
->active_package
->id
;
920 for (index
= 0; index
< NCSI_RESERVED_CHANNEL
; index
++) {
922 ret
= ncsi_xmit_cmd(&nca
);
927 nd
->state
= ncsi_dev_state_probe_gvi
;
929 case ncsi_dev_state_probe_gvi
:
930 case ncsi_dev_state_probe_gc
:
931 case ncsi_dev_state_probe_gls
:
932 np
= ndp
->active_package
;
933 ndp
->pending_req_num
= np
->channel_num
;
935 /* Retrieve version, capability or link status */
936 if (nd
->state
== ncsi_dev_state_probe_gvi
)
937 nca
.type
= NCSI_PKT_CMD_GVI
;
938 else if (nd
->state
== ncsi_dev_state_probe_gc
)
939 nca
.type
= NCSI_PKT_CMD_GC
;
941 nca
.type
= NCSI_PKT_CMD_GLS
;
943 nca
.package
= np
->id
;
944 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
945 nca
.channel
= nc
->id
;
946 ret
= ncsi_xmit_cmd(&nca
);
951 if (nd
->state
== ncsi_dev_state_probe_gvi
)
952 nd
->state
= ncsi_dev_state_probe_gc
;
953 else if (nd
->state
== ncsi_dev_state_probe_gc
)
954 nd
->state
= ncsi_dev_state_probe_gls
;
956 nd
->state
= ncsi_dev_state_probe_dp
;
958 case ncsi_dev_state_probe_dp
:
959 ndp
->pending_req_num
= 1;
961 /* Deselect the active package */
962 nca
.type
= NCSI_PKT_CMD_DP
;
963 nca
.package
= ndp
->active_package
->id
;
964 nca
.channel
= NCSI_RESERVED_CHANNEL
;
965 ret
= ncsi_xmit_cmd(&nca
);
969 /* Scan channels in next package */
970 nd
->state
= ncsi_dev_state_probe_channel
;
973 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%0x in enumeration\n",
979 ncsi_report_link(ndp
, true);
982 static void ncsi_dev_work(struct work_struct
*work
)
984 struct ncsi_dev_priv
*ndp
= container_of(work
,
985 struct ncsi_dev_priv
, work
);
986 struct ncsi_dev
*nd
= &ndp
->ndev
;
988 switch (nd
->state
& ncsi_dev_state_major
) {
989 case ncsi_dev_state_probe
:
990 ncsi_probe_channel(ndp
);
992 case ncsi_dev_state_suspend
:
993 ncsi_suspend_channel(ndp
);
995 case ncsi_dev_state_config
:
996 ncsi_configure_channel(ndp
);
999 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in workqueue\n",
1004 int ncsi_process_next_channel(struct ncsi_dev_priv
*ndp
)
1006 struct ncsi_channel
*nc
;
1008 unsigned long flags
;
1010 spin_lock_irqsave(&ndp
->lock
, flags
);
1011 nc
= list_first_or_null_rcu(&ndp
->channel_queue
,
1012 struct ncsi_channel
, link
);
1014 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1018 list_del_init(&nc
->link
);
1019 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1021 spin_lock_irqsave(&nc
->lock
, flags
);
1022 old_state
= nc
->state
;
1023 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
1024 spin_unlock_irqrestore(&nc
->lock
, flags
);
1026 ndp
->active_channel
= nc
;
1027 ndp
->active_package
= nc
->package
;
1029 switch (old_state
) {
1030 case NCSI_CHANNEL_INACTIVE
:
1031 ndp
->ndev
.state
= ncsi_dev_state_config
;
1032 ncsi_configure_channel(ndp
);
1034 case NCSI_CHANNEL_ACTIVE
:
1035 ndp
->ndev
.state
= ncsi_dev_state_suspend
;
1036 ncsi_suspend_channel(ndp
);
1039 netdev_err(ndp
->ndev
.dev
, "Invalid state 0x%x on %d:%d\n",
1040 old_state
, nc
->package
->id
, nc
->id
);
1041 ncsi_report_link(ndp
, false);
1048 ndp
->active_channel
= NULL
;
1049 ndp
->active_package
= NULL
;
1050 if (ndp
->flags
& NCSI_DEV_RESHUFFLE
) {
1051 ndp
->flags
&= ~NCSI_DEV_RESHUFFLE
;
1052 return ncsi_choose_active_channel(ndp
);
1055 ncsi_report_link(ndp
, false);
1059 #if IS_ENABLED(CONFIG_IPV6)
1060 static int ncsi_inet6addr_event(struct notifier_block
*this,
1061 unsigned long event
, void *data
)
1063 struct inet6_ifaddr
*ifa
= data
;
1064 struct net_device
*dev
= ifa
->idev
->dev
;
1065 struct ncsi_dev
*nd
= ncsi_find_dev(dev
);
1066 struct ncsi_dev_priv
*ndp
= nd
? TO_NCSI_DEV_PRIV(nd
) : NULL
;
1067 struct ncsi_package
*np
;
1068 struct ncsi_channel
*nc
;
1069 struct ncsi_cmd_arg nca
;
1073 if (!ndp
|| (ipv6_addr_type(&ifa
->addr
) &
1074 (IPV6_ADDR_LINKLOCAL
| IPV6_ADDR_LOOPBACK
)))
1079 action
= (++ndp
->inet6_addr_num
) == 1;
1080 nca
.type
= NCSI_PKT_CMD_EGMF
;
1083 action
= (--ndp
->inet6_addr_num
== 0);
1084 nca
.type
= NCSI_PKT_CMD_DGMF
;
1090 /* We might not have active channel or packages. The IPv6
1091 * required multicast will be enabled when active channel
1092 * or packages are chosen.
1094 np
= ndp
->active_package
;
1095 nc
= ndp
->active_channel
;
1096 if (!action
|| !np
|| !nc
)
1099 /* We needn't enable or disable it if the function isn't supported */
1100 if (!(nc
->caps
[NCSI_CAP_GENERIC
].cap
& NCSI_CAP_GENERIC_MC
))
1105 nca
.package
= np
->id
;
1106 nca
.channel
= nc
->id
;
1107 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_MC
].cap
;
1108 ret
= ncsi_xmit_cmd(&nca
);
1110 netdev_warn(dev
, "Fail to %s global multicast filter (%d)\n",
1111 (event
== NETDEV_UP
) ? "enable" : "disable", ret
);
1118 static struct notifier_block ncsi_inet6addr_notifier
= {
1119 .notifier_call
= ncsi_inet6addr_event
,
1121 #endif /* CONFIG_IPV6 */
1123 struct ncsi_dev
*ncsi_register_dev(struct net_device
*dev
,
1124 void (*handler
)(struct ncsi_dev
*ndev
))
1126 struct ncsi_dev_priv
*ndp
;
1127 struct ncsi_dev
*nd
;
1128 unsigned long flags
;
1131 /* Check if the device has been registered or not */
1132 nd
= ncsi_find_dev(dev
);
1136 /* Create NCSI device */
1137 ndp
= kzalloc(sizeof(*ndp
), GFP_ATOMIC
);
1142 nd
->state
= ncsi_dev_state_registered
;
1144 nd
->handler
= handler
;
1145 ndp
->pending_req_num
= 0;
1146 INIT_LIST_HEAD(&ndp
->channel_queue
);
1147 INIT_WORK(&ndp
->work
, ncsi_dev_work
);
1149 /* Initialize private NCSI device */
1150 spin_lock_init(&ndp
->lock
);
1151 INIT_LIST_HEAD(&ndp
->packages
);
1152 ndp
->request_id
= NCSI_REQ_START_IDX
;
1153 for (i
= 0; i
< ARRAY_SIZE(ndp
->requests
); i
++) {
1154 ndp
->requests
[i
].id
= i
;
1155 ndp
->requests
[i
].ndp
= ndp
;
1156 setup_timer(&ndp
->requests
[i
].timer
,
1157 ncsi_request_timeout
,
1158 (unsigned long)&ndp
->requests
[i
]);
1161 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1162 #if IS_ENABLED(CONFIG_IPV6)
1163 ndp
->inet6_addr_num
= 0;
1164 if (list_empty(&ncsi_dev_list
))
1165 register_inet6addr_notifier(&ncsi_inet6addr_notifier
);
1167 list_add_tail_rcu(&ndp
->node
, &ncsi_dev_list
);
1168 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1170 /* Register NCSI packet Rx handler */
1171 ndp
->ptype
.type
= cpu_to_be16(ETH_P_NCSI
);
1172 ndp
->ptype
.func
= ncsi_rcv_rsp
;
1173 ndp
->ptype
.dev
= dev
;
1174 dev_add_pack(&ndp
->ptype
);
1178 EXPORT_SYMBOL_GPL(ncsi_register_dev
);
1180 int ncsi_start_dev(struct ncsi_dev
*nd
)
1182 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1183 struct ncsi_package
*np
;
1184 struct ncsi_channel
*nc
;
1185 unsigned long flags
;
1189 if (nd
->state
!= ncsi_dev_state_registered
&&
1190 nd
->state
!= ncsi_dev_state_functional
)
1193 if (!(ndp
->flags
& NCSI_DEV_PROBED
)) {
1194 nd
->state
= ncsi_dev_state_probe
;
1195 schedule_work(&ndp
->work
);
1199 /* Reset channel's state and start over */
1200 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1201 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1202 spin_lock_irqsave(&nc
->lock
, flags
);
1203 chained
= !list_empty(&nc
->link
);
1204 old_state
= nc
->state
;
1205 nc
->state
= NCSI_CHANNEL_INACTIVE
;
1206 spin_unlock_irqrestore(&nc
->lock
, flags
);
1208 WARN_ON_ONCE(chained
||
1209 old_state
== NCSI_CHANNEL_INVISIBLE
);
1213 if (ndp
->flags
& NCSI_DEV_HWA
)
1214 ret
= ncsi_enable_hwa(ndp
);
1216 ret
= ncsi_choose_active_channel(ndp
);
1220 EXPORT_SYMBOL_GPL(ncsi_start_dev
);
1222 void ncsi_unregister_dev(struct ncsi_dev
*nd
)
1224 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1225 struct ncsi_package
*np
, *tmp
;
1226 unsigned long flags
;
1228 dev_remove_pack(&ndp
->ptype
);
1230 list_for_each_entry_safe(np
, tmp
, &ndp
->packages
, node
)
1231 ncsi_remove_package(np
);
1233 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1234 list_del_rcu(&ndp
->node
);
1235 #if IS_ENABLED(CONFIG_IPV6)
1236 if (list_empty(&ncsi_dev_list
))
1237 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier
);
1239 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1243 EXPORT_SYMBOL_GPL(ncsi_unregister_dev
);