2 * Copyright Gavin Shan, IBM Corporation 2016.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
18 #include <net/net_namespace.h>
20 #include <net/addrconf.h>
22 #include <net/if_inet6.h>
27 LIST_HEAD(ncsi_dev_list
);
28 DEFINE_SPINLOCK(ncsi_dev_lock
);
30 static inline int ncsi_filter_size(int table
)
32 int sizes
[] = { 2, 6, 6, 6 };
34 BUILD_BUG_ON(ARRAY_SIZE(sizes
) != NCSI_FILTER_MAX
);
35 if (table
< NCSI_FILTER_BASE
|| table
>= NCSI_FILTER_MAX
)
41 u32
*ncsi_get_filter(struct ncsi_channel
*nc
, int table
, int index
)
43 struct ncsi_channel_filter
*ncf
;
46 ncf
= nc
->filters
[table
];
50 size
= ncsi_filter_size(table
);
54 return ncf
->data
+ size
* index
;
57 /* Find the first active filter in a filter table that matches the given
58 * data parameter. If data is NULL, this returns the first active filter.
60 int ncsi_find_filter(struct ncsi_channel
*nc
, int table
, void *data
)
62 struct ncsi_channel_filter
*ncf
;
67 ncf
= nc
->filters
[table
];
71 size
= ncsi_filter_size(table
);
75 spin_lock_irqsave(&nc
->lock
, flags
);
76 bitmap
= (void *)&ncf
->bitmap
;
78 while ((index
= find_next_bit(bitmap
, ncf
->total
, index
+ 1))
80 if (!data
|| !memcmp(ncf
->data
+ size
* index
, data
, size
)) {
81 spin_unlock_irqrestore(&nc
->lock
, flags
);
85 spin_unlock_irqrestore(&nc
->lock
, flags
);
90 int ncsi_add_filter(struct ncsi_channel
*nc
, int table
, void *data
)
92 struct ncsi_channel_filter
*ncf
;
97 size
= ncsi_filter_size(table
);
101 index
= ncsi_find_filter(nc
, table
, data
);
105 ncf
= nc
->filters
[table
];
109 spin_lock_irqsave(&nc
->lock
, flags
);
110 bitmap
= (void *)&ncf
->bitmap
;
112 index
= find_next_zero_bit(bitmap
, ncf
->total
, 0);
113 if (index
>= ncf
->total
) {
114 spin_unlock_irqrestore(&nc
->lock
, flags
);
117 } while (test_and_set_bit(index
, bitmap
));
119 memcpy(ncf
->data
+ size
* index
, data
, size
);
120 spin_unlock_irqrestore(&nc
->lock
, flags
);
125 int ncsi_remove_filter(struct ncsi_channel
*nc
, int table
, int index
)
127 struct ncsi_channel_filter
*ncf
;
132 size
= ncsi_filter_size(table
);
136 ncf
= nc
->filters
[table
];
137 if (!ncf
|| index
>= ncf
->total
)
140 spin_lock_irqsave(&nc
->lock
, flags
);
141 bitmap
= (void *)&ncf
->bitmap
;
142 if (test_and_clear_bit(index
, bitmap
))
143 memset(ncf
->data
+ size
* index
, 0, size
);
144 spin_unlock_irqrestore(&nc
->lock
, flags
);
149 static void ncsi_report_link(struct ncsi_dev_priv
*ndp
, bool force_down
)
151 struct ncsi_dev
*nd
= &ndp
->ndev
;
152 struct ncsi_package
*np
;
153 struct ncsi_channel
*nc
;
156 nd
->state
= ncsi_dev_state_functional
;
163 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
164 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
165 spin_lock_irqsave(&nc
->lock
, flags
);
167 if (!list_empty(&nc
->link
) ||
168 nc
->state
!= NCSI_CHANNEL_ACTIVE
) {
169 spin_unlock_irqrestore(&nc
->lock
, flags
);
173 if (nc
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1) {
174 spin_unlock_irqrestore(&nc
->lock
, flags
);
179 spin_unlock_irqrestore(&nc
->lock
, flags
);
187 static void ncsi_channel_monitor(unsigned long data
)
189 struct ncsi_channel
*nc
= (struct ncsi_channel
*)data
;
190 struct ncsi_package
*np
= nc
->package
;
191 struct ncsi_dev_priv
*ndp
= np
->ndp
;
192 struct ncsi_channel_mode
*ncm
;
193 struct ncsi_cmd_arg nca
;
194 bool enabled
, chained
;
195 unsigned int monitor_state
;
199 spin_lock_irqsave(&nc
->lock
, flags
);
201 chained
= !list_empty(&nc
->link
);
202 enabled
= nc
->monitor
.enabled
;
203 monitor_state
= nc
->monitor
.state
;
204 spin_unlock_irqrestore(&nc
->lock
, flags
);
206 if (!enabled
|| chained
) {
207 ncsi_stop_channel_monitor(nc
);
210 if (state
!= NCSI_CHANNEL_INACTIVE
&&
211 state
!= NCSI_CHANNEL_ACTIVE
) {
212 ncsi_stop_channel_monitor(nc
);
216 switch (monitor_state
) {
217 case NCSI_CHANNEL_MONITOR_START
:
218 case NCSI_CHANNEL_MONITOR_RETRY
:
220 nca
.package
= np
->id
;
221 nca
.channel
= nc
->id
;
222 nca
.type
= NCSI_PKT_CMD_GLS
;
224 ret
= ncsi_xmit_cmd(&nca
);
226 netdev_err(ndp
->ndev
.dev
, "Error %d sending GLS\n",
229 case NCSI_CHANNEL_MONITOR_WAIT
... NCSI_CHANNEL_MONITOR_WAIT_MAX
:
232 if (!(ndp
->flags
& NCSI_DEV_HWA
)) {
233 ncsi_report_link(ndp
, true);
234 ndp
->flags
|= NCSI_DEV_RESHUFFLE
;
237 ncsi_stop_channel_monitor(nc
);
239 ncm
= &nc
->modes
[NCSI_MODE_LINK
];
240 spin_lock_irqsave(&nc
->lock
, flags
);
241 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
242 ncm
->data
[2] &= ~0x1;
243 spin_unlock_irqrestore(&nc
->lock
, flags
);
245 spin_lock_irqsave(&ndp
->lock
, flags
);
246 nc
->state
= NCSI_CHANNEL_ACTIVE
;
247 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
248 spin_unlock_irqrestore(&ndp
->lock
, flags
);
249 ncsi_process_next_channel(ndp
);
253 spin_lock_irqsave(&nc
->lock
, flags
);
255 spin_unlock_irqrestore(&nc
->lock
, flags
);
256 mod_timer(&nc
->monitor
.timer
, jiffies
+ HZ
);
259 void ncsi_start_channel_monitor(struct ncsi_channel
*nc
)
263 spin_lock_irqsave(&nc
->lock
, flags
);
264 WARN_ON_ONCE(nc
->monitor
.enabled
);
265 nc
->monitor
.enabled
= true;
266 nc
->monitor
.state
= NCSI_CHANNEL_MONITOR_START
;
267 spin_unlock_irqrestore(&nc
->lock
, flags
);
269 mod_timer(&nc
->monitor
.timer
, jiffies
+ HZ
);
272 void ncsi_stop_channel_monitor(struct ncsi_channel
*nc
)
276 spin_lock_irqsave(&nc
->lock
, flags
);
277 if (!nc
->monitor
.enabled
) {
278 spin_unlock_irqrestore(&nc
->lock
, flags
);
281 nc
->monitor
.enabled
= false;
282 spin_unlock_irqrestore(&nc
->lock
, flags
);
284 del_timer_sync(&nc
->monitor
.timer
);
287 struct ncsi_channel
*ncsi_find_channel(struct ncsi_package
*np
,
290 struct ncsi_channel
*nc
;
292 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
300 struct ncsi_channel
*ncsi_add_channel(struct ncsi_package
*np
, unsigned char id
)
302 struct ncsi_channel
*nc
, *tmp
;
306 nc
= kzalloc(sizeof(*nc
), GFP_ATOMIC
);
312 nc
->state
= NCSI_CHANNEL_INACTIVE
;
313 nc
->monitor
.enabled
= false;
314 setup_timer(&nc
->monitor
.timer
,
315 ncsi_channel_monitor
, (unsigned long)nc
);
316 spin_lock_init(&nc
->lock
);
317 INIT_LIST_HEAD(&nc
->link
);
318 for (index
= 0; index
< NCSI_CAP_MAX
; index
++)
319 nc
->caps
[index
].index
= index
;
320 for (index
= 0; index
< NCSI_MODE_MAX
; index
++)
321 nc
->modes
[index
].index
= index
;
323 spin_lock_irqsave(&np
->lock
, flags
);
324 tmp
= ncsi_find_channel(np
, id
);
326 spin_unlock_irqrestore(&np
->lock
, flags
);
331 list_add_tail_rcu(&nc
->node
, &np
->channels
);
333 spin_unlock_irqrestore(&np
->lock
, flags
);
338 static void ncsi_remove_channel(struct ncsi_channel
*nc
)
340 struct ncsi_package
*np
= nc
->package
;
341 struct ncsi_channel_filter
*ncf
;
345 /* Release filters */
346 spin_lock_irqsave(&nc
->lock
, flags
);
347 for (i
= 0; i
< NCSI_FILTER_MAX
; i
++) {
348 ncf
= nc
->filters
[i
];
352 nc
->filters
[i
] = NULL
;
356 nc
->state
= NCSI_CHANNEL_INACTIVE
;
357 spin_unlock_irqrestore(&nc
->lock
, flags
);
358 ncsi_stop_channel_monitor(nc
);
360 /* Remove and free channel */
361 spin_lock_irqsave(&np
->lock
, flags
);
362 list_del_rcu(&nc
->node
);
364 spin_unlock_irqrestore(&np
->lock
, flags
);
369 struct ncsi_package
*ncsi_find_package(struct ncsi_dev_priv
*ndp
,
372 struct ncsi_package
*np
;
374 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
382 struct ncsi_package
*ncsi_add_package(struct ncsi_dev_priv
*ndp
,
385 struct ncsi_package
*np
, *tmp
;
388 np
= kzalloc(sizeof(*np
), GFP_ATOMIC
);
394 spin_lock_init(&np
->lock
);
395 INIT_LIST_HEAD(&np
->channels
);
397 spin_lock_irqsave(&ndp
->lock
, flags
);
398 tmp
= ncsi_find_package(ndp
, id
);
400 spin_unlock_irqrestore(&ndp
->lock
, flags
);
405 list_add_tail_rcu(&np
->node
, &ndp
->packages
);
407 spin_unlock_irqrestore(&ndp
->lock
, flags
);
412 void ncsi_remove_package(struct ncsi_package
*np
)
414 struct ncsi_dev_priv
*ndp
= np
->ndp
;
415 struct ncsi_channel
*nc
, *tmp
;
418 /* Release all child channels */
419 list_for_each_entry_safe(nc
, tmp
, &np
->channels
, node
)
420 ncsi_remove_channel(nc
);
422 /* Remove and free package */
423 spin_lock_irqsave(&ndp
->lock
, flags
);
424 list_del_rcu(&np
->node
);
426 spin_unlock_irqrestore(&ndp
->lock
, flags
);
431 void ncsi_find_package_and_channel(struct ncsi_dev_priv
*ndp
,
433 struct ncsi_package
**np
,
434 struct ncsi_channel
**nc
)
436 struct ncsi_package
*p
;
437 struct ncsi_channel
*c
;
439 p
= ncsi_find_package(ndp
, NCSI_PACKAGE_INDEX(id
));
440 c
= p
? ncsi_find_channel(p
, NCSI_CHANNEL_INDEX(id
)) : NULL
;
448 /* For two consecutive NCSI commands, the packet IDs shouldn't
449 * be same. Otherwise, the bogus response might be replied. So
450 * the available IDs are allocated in round-robin fashion.
452 struct ncsi_request
*ncsi_alloc_request(struct ncsi_dev_priv
*ndp
,
453 unsigned int req_flags
)
455 struct ncsi_request
*nr
= NULL
;
456 int i
, limit
= ARRAY_SIZE(ndp
->requests
);
459 /* Check if there is one available request until the ceiling */
460 spin_lock_irqsave(&ndp
->lock
, flags
);
461 for (i
= ndp
->request_id
; i
< limit
; i
++) {
462 if (ndp
->requests
[i
].used
)
465 nr
= &ndp
->requests
[i
];
467 nr
->flags
= req_flags
;
468 ndp
->request_id
= i
+ 1;
472 /* Fail back to check from the starting cursor */
473 for (i
= NCSI_REQ_START_IDX
; i
< ndp
->request_id
; i
++) {
474 if (ndp
->requests
[i
].used
)
477 nr
= &ndp
->requests
[i
];
479 nr
->flags
= req_flags
;
480 ndp
->request_id
= i
+ 1;
485 spin_unlock_irqrestore(&ndp
->lock
, flags
);
489 void ncsi_free_request(struct ncsi_request
*nr
)
491 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
492 struct sk_buff
*cmd
, *rsp
;
498 del_timer_sync(&nr
->timer
);
501 spin_lock_irqsave(&ndp
->lock
, flags
);
507 driven
= !!(nr
->flags
& NCSI_REQ_FLAG_EVENT_DRIVEN
);
508 spin_unlock_irqrestore(&ndp
->lock
, flags
);
510 if (driven
&& cmd
&& --ndp
->pending_req_num
== 0)
511 schedule_work(&ndp
->work
);
513 /* Release command and response */
518 struct ncsi_dev
*ncsi_find_dev(struct net_device
*dev
)
520 struct ncsi_dev_priv
*ndp
;
522 NCSI_FOR_EACH_DEV(ndp
) {
523 if (ndp
->ndev
.dev
== dev
)
530 static void ncsi_request_timeout(unsigned long data
)
532 struct ncsi_request
*nr
= (struct ncsi_request
*)data
;
533 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
536 /* If the request already had associated response,
537 * let the response handler to release it.
539 spin_lock_irqsave(&ndp
->lock
, flags
);
541 if (nr
->rsp
|| !nr
->cmd
) {
542 spin_unlock_irqrestore(&ndp
->lock
, flags
);
545 spin_unlock_irqrestore(&ndp
->lock
, flags
);
547 /* Release the request */
548 ncsi_free_request(nr
);
551 static void ncsi_suspend_channel(struct ncsi_dev_priv
*ndp
)
553 struct ncsi_dev
*nd
= &ndp
->ndev
;
554 struct ncsi_package
*np
= ndp
->active_package
;
555 struct ncsi_channel
*nc
= ndp
->active_channel
;
556 struct ncsi_cmd_arg nca
;
561 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
563 case ncsi_dev_state_suspend
:
564 nd
->state
= ncsi_dev_state_suspend_select
;
566 case ncsi_dev_state_suspend_select
:
567 ndp
->pending_req_num
= 1;
569 nca
.type
= NCSI_PKT_CMD_SP
;
570 nca
.package
= np
->id
;
571 nca
.channel
= NCSI_RESERVED_CHANNEL
;
572 if (ndp
->flags
& NCSI_DEV_HWA
)
577 /* To retrieve the last link states of channels in current
578 * package when current active channel needs fail over to
579 * another one. It means we will possibly select another
580 * channel as next active one. The link states of channels
581 * are most important factor of the selection. So we need
582 * accurate link states. Unfortunately, the link states on
583 * inactive channels can't be updated with LSC AEN in time.
585 if (ndp
->flags
& NCSI_DEV_RESHUFFLE
)
586 nd
->state
= ncsi_dev_state_suspend_gls
;
588 nd
->state
= ncsi_dev_state_suspend_dcnt
;
589 ret
= ncsi_xmit_cmd(&nca
);
594 case ncsi_dev_state_suspend_gls
:
595 ndp
->pending_req_num
= np
->channel_num
;
597 nca
.type
= NCSI_PKT_CMD_GLS
;
598 nca
.package
= np
->id
;
600 nd
->state
= ncsi_dev_state_suspend_dcnt
;
601 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
602 nca
.channel
= nc
->id
;
603 ret
= ncsi_xmit_cmd(&nca
);
609 case ncsi_dev_state_suspend_dcnt
:
610 ndp
->pending_req_num
= 1;
612 nca
.type
= NCSI_PKT_CMD_DCNT
;
613 nca
.package
= np
->id
;
614 nca
.channel
= nc
->id
;
616 nd
->state
= ncsi_dev_state_suspend_dc
;
617 ret
= ncsi_xmit_cmd(&nca
);
622 case ncsi_dev_state_suspend_dc
:
623 ndp
->pending_req_num
= 1;
625 nca
.type
= NCSI_PKT_CMD_DC
;
626 nca
.package
= np
->id
;
627 nca
.channel
= nc
->id
;
630 nd
->state
= ncsi_dev_state_suspend_deselect
;
631 ret
= ncsi_xmit_cmd(&nca
);
636 case ncsi_dev_state_suspend_deselect
:
637 ndp
->pending_req_num
= 1;
639 nca
.type
= NCSI_PKT_CMD_DP
;
640 nca
.package
= np
->id
;
641 nca
.channel
= NCSI_RESERVED_CHANNEL
;
643 nd
->state
= ncsi_dev_state_suspend_done
;
644 ret
= ncsi_xmit_cmd(&nca
);
649 case ncsi_dev_state_suspend_done
:
650 spin_lock_irqsave(&nc
->lock
, flags
);
651 nc
->state
= NCSI_CHANNEL_INACTIVE
;
652 spin_unlock_irqrestore(&nc
->lock
, flags
);
653 ncsi_process_next_channel(ndp
);
657 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in suspend\n",
663 nd
->state
= ncsi_dev_state_functional
;
666 /* Check the VLAN filter bitmap for a set filter, and construct a
667 * "Set VLAN Filter - Disable" packet if found.
669 static int clear_one_vid(struct ncsi_dev_priv
*ndp
, struct ncsi_channel
*nc
,
670 struct ncsi_cmd_arg
*nca
)
676 index
= ncsi_find_filter(nc
, NCSI_FILTER_VLAN
, NULL
);
678 /* Filter table empty */
682 data
= ncsi_get_filter(nc
, NCSI_FILTER_VLAN
, index
);
684 netdev_err(ndp
->ndev
.dev
,
685 "ncsi: failed to retrieve filter %d\n", index
);
686 /* Set the VLAN id to 0 - this will still disable the entry in
687 * the filter table, but we won't know what it was.
694 netdev_printk(KERN_DEBUG
, ndp
->ndev
.dev
,
695 "ncsi: removed vlan tag %u at index %d\n",
697 ncsi_remove_filter(nc
, NCSI_FILTER_VLAN
, index
);
699 nca
->type
= NCSI_PKT_CMD_SVF
;
701 /* HW filter index starts at 1 */
702 nca
->bytes
[6] = index
+ 1;
703 nca
->bytes
[7] = 0x00;
707 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
710 static int set_one_vid(struct ncsi_dev_priv
*ndp
, struct ncsi_channel
*nc
,
711 struct ncsi_cmd_arg
*nca
)
713 struct vlan_vid
*vlan
= NULL
;
716 list_for_each_entry_rcu(vlan
, &ndp
->vlan_vids
, list
) {
717 index
= ncsi_find_filter(nc
, NCSI_FILTER_VLAN
, &vlan
->vid
);
720 netdev_printk(KERN_DEBUG
, ndp
->ndev
.dev
,
721 "ncsi: new vlan id to set: %u\n",
725 netdev_printk(KERN_DEBUG
, ndp
->ndev
.dev
,
726 "vid %u already at filter pos %d\n",
730 if (!vlan
|| index
>= 0) {
731 netdev_printk(KERN_DEBUG
, ndp
->ndev
.dev
,
732 "no vlan ids left to set\n");
736 index
= ncsi_add_filter(nc
, NCSI_FILTER_VLAN
, &vlan
->vid
);
738 netdev_err(ndp
->ndev
.dev
,
739 "Failed to add new VLAN tag, error %d\n", index
);
740 if (index
== -ENOSPC
)
741 netdev_err(ndp
->ndev
.dev
,
742 "Channel %u already has all VLAN filters set\n",
747 netdev_printk(KERN_DEBUG
, ndp
->ndev
.dev
,
748 "ncsi: set vid %u in packet, index %u\n",
749 vlan
->vid
, index
+ 1);
750 nca
->type
= NCSI_PKT_CMD_SVF
;
751 nca
->words
[1] = vlan
->vid
;
752 /* HW filter index starts at 1 */
753 nca
->bytes
[6] = index
+ 1;
754 nca
->bytes
[7] = 0x01;
759 static void ncsi_configure_channel(struct ncsi_dev_priv
*ndp
)
761 struct ncsi_dev
*nd
= &ndp
->ndev
;
762 struct net_device
*dev
= nd
->dev
;
763 struct ncsi_package
*np
= ndp
->active_package
;
764 struct ncsi_channel
*nc
= ndp
->active_channel
;
765 struct ncsi_channel
*hot_nc
= NULL
;
766 struct ncsi_cmd_arg nca
;
772 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
774 case ncsi_dev_state_config
:
775 case ncsi_dev_state_config_sp
:
776 ndp
->pending_req_num
= 1;
778 /* Select the specific package */
779 nca
.type
= NCSI_PKT_CMD_SP
;
780 if (ndp
->flags
& NCSI_DEV_HWA
)
784 nca
.package
= np
->id
;
785 nca
.channel
= NCSI_RESERVED_CHANNEL
;
786 ret
= ncsi_xmit_cmd(&nca
);
790 nd
->state
= ncsi_dev_state_config_cis
;
792 case ncsi_dev_state_config_cis
:
793 ndp
->pending_req_num
= 1;
795 /* Clear initial state */
796 nca
.type
= NCSI_PKT_CMD_CIS
;
797 nca
.package
= np
->id
;
798 nca
.channel
= nc
->id
;
799 ret
= ncsi_xmit_cmd(&nca
);
803 nd
->state
= ncsi_dev_state_config_clear_vids
;
805 case ncsi_dev_state_config_clear_vids
:
806 case ncsi_dev_state_config_svf
:
807 case ncsi_dev_state_config_ev
:
808 case ncsi_dev_state_config_sma
:
809 case ncsi_dev_state_config_ebf
:
810 #if IS_ENABLED(CONFIG_IPV6)
811 case ncsi_dev_state_config_egmf
:
813 case ncsi_dev_state_config_ecnt
:
814 case ncsi_dev_state_config_ec
:
815 case ncsi_dev_state_config_ae
:
816 case ncsi_dev_state_config_gls
:
817 ndp
->pending_req_num
= 1;
819 nca
.package
= np
->id
;
820 nca
.channel
= nc
->id
;
822 /* Clear any active filters on the channel before setting */
823 if (nd
->state
== ncsi_dev_state_config_clear_vids
) {
824 ret
= clear_one_vid(ndp
, nc
, &nca
);
826 nd
->state
= ncsi_dev_state_config_svf
;
827 schedule_work(&ndp
->work
);
831 nd
->state
= ncsi_dev_state_config_clear_vids
;
832 /* Add known VLAN tags to the filter */
833 } else if (nd
->state
== ncsi_dev_state_config_svf
) {
834 ret
= set_one_vid(ndp
, nc
, &nca
);
836 nd
->state
= ncsi_dev_state_config_ev
;
837 schedule_work(&ndp
->work
);
841 nd
->state
= ncsi_dev_state_config_svf
;
842 /* Enable/Disable the VLAN filter */
843 } else if (nd
->state
== ncsi_dev_state_config_ev
) {
844 if (list_empty(&ndp
->vlan_vids
)) {
845 nca
.type
= NCSI_PKT_CMD_DV
;
847 nca
.type
= NCSI_PKT_CMD_EV
;
848 nca
.bytes
[3] = NCSI_CAP_VLAN_NO
;
850 nd
->state
= ncsi_dev_state_config_sma
;
851 } else if (nd
->state
== ncsi_dev_state_config_sma
) {
852 /* Use first entry in unicast filter table. Note that
853 * the MAC filter table starts from entry 1 instead of
856 nca
.type
= NCSI_PKT_CMD_SMA
;
857 for (index
= 0; index
< 6; index
++)
858 nca
.bytes
[index
] = dev
->dev_addr
[index
];
861 nd
->state
= ncsi_dev_state_config_ebf
;
862 } else if (nd
->state
== ncsi_dev_state_config_ebf
) {
863 nca
.type
= NCSI_PKT_CMD_EBF
;
864 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_BC
].cap
;
865 nd
->state
= ncsi_dev_state_config_ecnt
;
866 #if IS_ENABLED(CONFIG_IPV6)
867 if (ndp
->inet6_addr_num
> 0 &&
868 (nc
->caps
[NCSI_CAP_GENERIC
].cap
&
869 NCSI_CAP_GENERIC_MC
))
870 nd
->state
= ncsi_dev_state_config_egmf
;
872 nd
->state
= ncsi_dev_state_config_ecnt
;
873 } else if (nd
->state
== ncsi_dev_state_config_egmf
) {
874 nca
.type
= NCSI_PKT_CMD_EGMF
;
875 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_MC
].cap
;
876 nd
->state
= ncsi_dev_state_config_ecnt
;
877 #endif /* CONFIG_IPV6 */
878 } else if (nd
->state
== ncsi_dev_state_config_ecnt
) {
879 nca
.type
= NCSI_PKT_CMD_ECNT
;
880 nd
->state
= ncsi_dev_state_config_ec
;
881 } else if (nd
->state
== ncsi_dev_state_config_ec
) {
882 /* Enable AEN if it's supported */
883 nca
.type
= NCSI_PKT_CMD_EC
;
884 nd
->state
= ncsi_dev_state_config_ae
;
885 if (!(nc
->caps
[NCSI_CAP_AEN
].cap
& NCSI_CAP_AEN_MASK
))
886 nd
->state
= ncsi_dev_state_config_gls
;
887 } else if (nd
->state
== ncsi_dev_state_config_ae
) {
888 nca
.type
= NCSI_PKT_CMD_AE
;
890 nca
.dwords
[1] = nc
->caps
[NCSI_CAP_AEN
].cap
;
891 nd
->state
= ncsi_dev_state_config_gls
;
892 } else if (nd
->state
== ncsi_dev_state_config_gls
) {
893 nca
.type
= NCSI_PKT_CMD_GLS
;
894 nd
->state
= ncsi_dev_state_config_done
;
897 ret
= ncsi_xmit_cmd(&nca
);
901 case ncsi_dev_state_config_done
:
902 spin_lock_irqsave(&nc
->lock
, flags
);
903 if (nc
->reconfigure_needed
) {
904 /* This channel's configuration has been updated
905 * part-way during the config state - start the
906 * channel configuration over
908 nc
->reconfigure_needed
= false;
909 nc
->state
= NCSI_CHANNEL_INACTIVE
;
910 spin_unlock_irqrestore(&nc
->lock
, flags
);
912 spin_lock_irqsave(&ndp
->lock
, flags
);
913 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
914 spin_unlock_irqrestore(&ndp
->lock
, flags
);
916 netdev_printk(KERN_DEBUG
, dev
,
917 "Dirty NCSI channel state reset\n");
918 ncsi_process_next_channel(ndp
);
922 if (nc
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1) {
924 nc
->state
= NCSI_CHANNEL_ACTIVE
;
927 nc
->state
= NCSI_CHANNEL_INACTIVE
;
929 spin_unlock_irqrestore(&nc
->lock
, flags
);
931 /* Update the hot channel */
932 spin_lock_irqsave(&ndp
->lock
, flags
);
933 ndp
->hot_channel
= hot_nc
;
934 spin_unlock_irqrestore(&ndp
->lock
, flags
);
936 ncsi_start_channel_monitor(nc
);
937 ncsi_process_next_channel(ndp
);
940 netdev_warn(dev
, "Wrong NCSI state 0x%x in config\n",
947 ncsi_report_link(ndp
, true);
950 static int ncsi_choose_active_channel(struct ncsi_dev_priv
*ndp
)
952 struct ncsi_package
*np
;
953 struct ncsi_channel
*nc
, *found
, *hot_nc
;
954 struct ncsi_channel_mode
*ncm
;
957 spin_lock_irqsave(&ndp
->lock
, flags
);
958 hot_nc
= ndp
->hot_channel
;
959 spin_unlock_irqrestore(&ndp
->lock
, flags
);
961 /* The search is done once an inactive channel with up
965 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
966 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
967 spin_lock_irqsave(&nc
->lock
, flags
);
969 if (!list_empty(&nc
->link
) ||
970 nc
->state
!= NCSI_CHANNEL_INACTIVE
) {
971 spin_unlock_irqrestore(&nc
->lock
, flags
);
981 ncm
= &nc
->modes
[NCSI_MODE_LINK
];
982 if (ncm
->data
[2] & 0x1) {
983 spin_unlock_irqrestore(&nc
->lock
, flags
);
988 spin_unlock_irqrestore(&nc
->lock
, flags
);
993 ncsi_report_link(ndp
, true);
998 spin_lock_irqsave(&ndp
->lock
, flags
);
999 list_add_tail_rcu(&found
->link
, &ndp
->channel_queue
);
1000 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1002 return ncsi_process_next_channel(ndp
);
1005 static bool ncsi_check_hwa(struct ncsi_dev_priv
*ndp
)
1007 struct ncsi_package
*np
;
1008 struct ncsi_channel
*nc
;
1010 bool has_channel
= false;
1012 /* The hardware arbitration is disabled if any one channel
1013 * doesn't support explicitly.
1015 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1016 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1019 cap
= nc
->caps
[NCSI_CAP_GENERIC
].cap
;
1020 if (!(cap
& NCSI_CAP_GENERIC_HWA
) ||
1021 (cap
& NCSI_CAP_GENERIC_HWA_MASK
) !=
1022 NCSI_CAP_GENERIC_HWA_SUPPORT
) {
1023 ndp
->flags
&= ~NCSI_DEV_HWA
;
1030 ndp
->flags
|= NCSI_DEV_HWA
;
1034 ndp
->flags
&= ~NCSI_DEV_HWA
;
1038 static int ncsi_enable_hwa(struct ncsi_dev_priv
*ndp
)
1040 struct ncsi_package
*np
;
1041 struct ncsi_channel
*nc
;
1042 unsigned long flags
;
1044 /* Move all available channels to processing queue */
1045 spin_lock_irqsave(&ndp
->lock
, flags
);
1046 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1047 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1048 WARN_ON_ONCE(nc
->state
!= NCSI_CHANNEL_INACTIVE
||
1049 !list_empty(&nc
->link
));
1050 ncsi_stop_channel_monitor(nc
);
1051 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
1054 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1056 /* We can have no channels in extremely case */
1057 if (list_empty(&ndp
->channel_queue
)) {
1058 ncsi_report_link(ndp
, false);
1062 return ncsi_process_next_channel(ndp
);
1065 static void ncsi_probe_channel(struct ncsi_dev_priv
*ndp
)
1067 struct ncsi_dev
*nd
= &ndp
->ndev
;
1068 struct ncsi_package
*np
;
1069 struct ncsi_channel
*nc
;
1070 struct ncsi_cmd_arg nca
;
1071 unsigned char index
;
1075 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
1076 switch (nd
->state
) {
1077 case ncsi_dev_state_probe
:
1078 nd
->state
= ncsi_dev_state_probe_deselect
;
1080 case ncsi_dev_state_probe_deselect
:
1081 ndp
->pending_req_num
= 8;
1083 /* Deselect all possible packages */
1084 nca
.type
= NCSI_PKT_CMD_DP
;
1085 nca
.channel
= NCSI_RESERVED_CHANNEL
;
1086 for (index
= 0; index
< 8; index
++) {
1087 nca
.package
= index
;
1088 ret
= ncsi_xmit_cmd(&nca
);
1093 nd
->state
= ncsi_dev_state_probe_package
;
1095 case ncsi_dev_state_probe_package
:
1096 ndp
->pending_req_num
= 16;
1098 /* Select all possible packages */
1099 nca
.type
= NCSI_PKT_CMD_SP
;
1101 nca
.channel
= NCSI_RESERVED_CHANNEL
;
1102 for (index
= 0; index
< 8; index
++) {
1103 nca
.package
= index
;
1104 ret
= ncsi_xmit_cmd(&nca
);
1109 /* Disable all possible packages */
1110 nca
.type
= NCSI_PKT_CMD_DP
;
1111 for (index
= 0; index
< 8; index
++) {
1112 nca
.package
= index
;
1113 ret
= ncsi_xmit_cmd(&nca
);
1118 nd
->state
= ncsi_dev_state_probe_channel
;
1120 case ncsi_dev_state_probe_channel
:
1121 if (!ndp
->active_package
)
1122 ndp
->active_package
= list_first_or_null_rcu(
1123 &ndp
->packages
, struct ncsi_package
, node
);
1124 else if (list_is_last(&ndp
->active_package
->node
,
1126 ndp
->active_package
= NULL
;
1128 ndp
->active_package
= list_next_entry(
1129 ndp
->active_package
, node
);
1131 /* All available packages and channels are enumerated. The
1132 * enumeration happens for once when the NCSI interface is
1133 * started. So we need continue to start the interface after
1136 * We have to choose an active channel before configuring it.
1137 * Note that we possibly don't have active channel in extreme
1140 if (!ndp
->active_package
) {
1141 ndp
->flags
|= NCSI_DEV_PROBED
;
1142 if (ncsi_check_hwa(ndp
))
1143 ncsi_enable_hwa(ndp
);
1145 ncsi_choose_active_channel(ndp
);
1149 /* Select the active package */
1150 ndp
->pending_req_num
= 1;
1151 nca
.type
= NCSI_PKT_CMD_SP
;
1153 nca
.package
= ndp
->active_package
->id
;
1154 nca
.channel
= NCSI_RESERVED_CHANNEL
;
1155 ret
= ncsi_xmit_cmd(&nca
);
1159 nd
->state
= ncsi_dev_state_probe_cis
;
1161 case ncsi_dev_state_probe_cis
:
1162 ndp
->pending_req_num
= NCSI_RESERVED_CHANNEL
;
1164 /* Clear initial state */
1165 nca
.type
= NCSI_PKT_CMD_CIS
;
1166 nca
.package
= ndp
->active_package
->id
;
1167 for (index
= 0; index
< NCSI_RESERVED_CHANNEL
; index
++) {
1168 nca
.channel
= index
;
1169 ret
= ncsi_xmit_cmd(&nca
);
1174 nd
->state
= ncsi_dev_state_probe_gvi
;
1176 case ncsi_dev_state_probe_gvi
:
1177 case ncsi_dev_state_probe_gc
:
1178 case ncsi_dev_state_probe_gls
:
1179 np
= ndp
->active_package
;
1180 ndp
->pending_req_num
= np
->channel_num
;
1182 /* Retrieve version, capability or link status */
1183 if (nd
->state
== ncsi_dev_state_probe_gvi
)
1184 nca
.type
= NCSI_PKT_CMD_GVI
;
1185 else if (nd
->state
== ncsi_dev_state_probe_gc
)
1186 nca
.type
= NCSI_PKT_CMD_GC
;
1188 nca
.type
= NCSI_PKT_CMD_GLS
;
1190 nca
.package
= np
->id
;
1191 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1192 nca
.channel
= nc
->id
;
1193 ret
= ncsi_xmit_cmd(&nca
);
1198 if (nd
->state
== ncsi_dev_state_probe_gvi
)
1199 nd
->state
= ncsi_dev_state_probe_gc
;
1200 else if (nd
->state
== ncsi_dev_state_probe_gc
)
1201 nd
->state
= ncsi_dev_state_probe_gls
;
1203 nd
->state
= ncsi_dev_state_probe_dp
;
1205 case ncsi_dev_state_probe_dp
:
1206 ndp
->pending_req_num
= 1;
1208 /* Deselect the active package */
1209 nca
.type
= NCSI_PKT_CMD_DP
;
1210 nca
.package
= ndp
->active_package
->id
;
1211 nca
.channel
= NCSI_RESERVED_CHANNEL
;
1212 ret
= ncsi_xmit_cmd(&nca
);
1216 /* Scan channels in next package */
1217 nd
->state
= ncsi_dev_state_probe_channel
;
1220 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%0x in enumeration\n",
1226 ncsi_report_link(ndp
, true);
1229 static void ncsi_dev_work(struct work_struct
*work
)
1231 struct ncsi_dev_priv
*ndp
= container_of(work
,
1232 struct ncsi_dev_priv
, work
);
1233 struct ncsi_dev
*nd
= &ndp
->ndev
;
1235 switch (nd
->state
& ncsi_dev_state_major
) {
1236 case ncsi_dev_state_probe
:
1237 ncsi_probe_channel(ndp
);
1239 case ncsi_dev_state_suspend
:
1240 ncsi_suspend_channel(ndp
);
1242 case ncsi_dev_state_config
:
1243 ncsi_configure_channel(ndp
);
1246 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in workqueue\n",
1251 int ncsi_process_next_channel(struct ncsi_dev_priv
*ndp
)
1253 struct ncsi_channel
*nc
;
1255 unsigned long flags
;
1257 spin_lock_irqsave(&ndp
->lock
, flags
);
1258 nc
= list_first_or_null_rcu(&ndp
->channel_queue
,
1259 struct ncsi_channel
, link
);
1261 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1265 list_del_init(&nc
->link
);
1266 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1268 spin_lock_irqsave(&nc
->lock
, flags
);
1269 old_state
= nc
->state
;
1270 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
1271 spin_unlock_irqrestore(&nc
->lock
, flags
);
1273 ndp
->active_channel
= nc
;
1274 ndp
->active_package
= nc
->package
;
1276 switch (old_state
) {
1277 case NCSI_CHANNEL_INACTIVE
:
1278 ndp
->ndev
.state
= ncsi_dev_state_config
;
1279 ncsi_configure_channel(ndp
);
1281 case NCSI_CHANNEL_ACTIVE
:
1282 ndp
->ndev
.state
= ncsi_dev_state_suspend
;
1283 ncsi_suspend_channel(ndp
);
1286 netdev_err(ndp
->ndev
.dev
, "Invalid state 0x%x on %d:%d\n",
1287 old_state
, nc
->package
->id
, nc
->id
);
1288 ncsi_report_link(ndp
, false);
1295 ndp
->active_channel
= NULL
;
1296 ndp
->active_package
= NULL
;
1297 if (ndp
->flags
& NCSI_DEV_RESHUFFLE
) {
1298 ndp
->flags
&= ~NCSI_DEV_RESHUFFLE
;
1299 return ncsi_choose_active_channel(ndp
);
1302 ncsi_report_link(ndp
, false);
1306 #if IS_ENABLED(CONFIG_IPV6)
1307 static int ncsi_inet6addr_event(struct notifier_block
*this,
1308 unsigned long event
, void *data
)
1310 struct inet6_ifaddr
*ifa
= data
;
1311 struct net_device
*dev
= ifa
->idev
->dev
;
1312 struct ncsi_dev
*nd
= ncsi_find_dev(dev
);
1313 struct ncsi_dev_priv
*ndp
= nd
? TO_NCSI_DEV_PRIV(nd
) : NULL
;
1314 struct ncsi_package
*np
;
1315 struct ncsi_channel
*nc
;
1316 struct ncsi_cmd_arg nca
;
1320 if (!ndp
|| (ipv6_addr_type(&ifa
->addr
) &
1321 (IPV6_ADDR_LINKLOCAL
| IPV6_ADDR_LOOPBACK
)))
1326 action
= (++ndp
->inet6_addr_num
) == 1;
1327 nca
.type
= NCSI_PKT_CMD_EGMF
;
1330 action
= (--ndp
->inet6_addr_num
== 0);
1331 nca
.type
= NCSI_PKT_CMD_DGMF
;
1337 /* We might not have active channel or packages. The IPv6
1338 * required multicast will be enabled when active channel
1339 * or packages are chosen.
1341 np
= ndp
->active_package
;
1342 nc
= ndp
->active_channel
;
1343 if (!action
|| !np
|| !nc
)
1346 /* We needn't enable or disable it if the function isn't supported */
1347 if (!(nc
->caps
[NCSI_CAP_GENERIC
].cap
& NCSI_CAP_GENERIC_MC
))
1352 nca
.package
= np
->id
;
1353 nca
.channel
= nc
->id
;
1354 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_MC
].cap
;
1355 ret
= ncsi_xmit_cmd(&nca
);
1357 netdev_warn(dev
, "Fail to %s global multicast filter (%d)\n",
1358 (event
== NETDEV_UP
) ? "enable" : "disable", ret
);
1365 static struct notifier_block ncsi_inet6addr_notifier
= {
1366 .notifier_call
= ncsi_inet6addr_event
,
1368 #endif /* CONFIG_IPV6 */
1370 static int ncsi_kick_channels(struct ncsi_dev_priv
*ndp
)
1372 struct ncsi_dev
*nd
= &ndp
->ndev
;
1373 struct ncsi_channel
*nc
;
1374 struct ncsi_package
*np
;
1375 unsigned long flags
;
1378 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1379 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1380 spin_lock_irqsave(&nc
->lock
, flags
);
1382 /* Channels may be busy, mark dirty instead of
1384 * a) not ACTIVE (configured)
1385 * b) in the channel_queue (to be configured)
1386 * c) it's ndev is in the config state
1388 if (nc
->state
!= NCSI_CHANNEL_ACTIVE
) {
1389 if ((ndp
->ndev
.state
& 0xff00) ==
1390 ncsi_dev_state_config
||
1391 !list_empty(&nc
->link
)) {
1392 netdev_printk(KERN_DEBUG
, nd
->dev
,
1393 "ncsi: channel %p marked dirty\n",
1395 nc
->reconfigure_needed
= true;
1397 spin_unlock_irqrestore(&nc
->lock
, flags
);
1401 spin_unlock_irqrestore(&nc
->lock
, flags
);
1403 ncsi_stop_channel_monitor(nc
);
1404 spin_lock_irqsave(&nc
->lock
, flags
);
1405 nc
->state
= NCSI_CHANNEL_INACTIVE
;
1406 spin_unlock_irqrestore(&nc
->lock
, flags
);
1408 spin_lock_irqsave(&ndp
->lock
, flags
);
1409 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
1410 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1412 netdev_printk(KERN_DEBUG
, nd
->dev
,
1413 "ncsi: kicked channel %p\n", nc
);
1421 int ncsi_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1423 struct ncsi_dev_priv
*ndp
;
1424 unsigned int n_vids
= 0;
1425 struct vlan_vid
*vlan
;
1426 struct ncsi_dev
*nd
;
1432 nd
= ncsi_find_dev(dev
);
1434 netdev_warn(dev
, "ncsi: No net_device?\n");
1438 ndp
= TO_NCSI_DEV_PRIV(nd
);
1440 /* Add the VLAN id to our internal list */
1441 list_for_each_entry_rcu(vlan
, &ndp
->vlan_vids
, list
) {
1443 if (vlan
->vid
== vid
) {
1444 netdev_printk(KERN_DEBUG
, dev
,
1445 "vid %u already registered\n", vid
);
1449 if (n_vids
>= NCSI_MAX_VLAN_VIDS
) {
1451 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1452 vid
, NCSI_MAX_VLAN_VIDS
);
1456 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
1460 vlan
->proto
= proto
;
1462 list_add_rcu(&vlan
->list
, &ndp
->vlan_vids
);
1464 netdev_printk(KERN_DEBUG
, dev
, "Added new vid %u\n", vid
);
1466 found
= ncsi_kick_channels(ndp
) != 0;
1468 return found
? ncsi_process_next_channel(ndp
) : 0;
1470 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid
);
1472 int ncsi_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1474 struct vlan_vid
*vlan
, *tmp
;
1475 struct ncsi_dev_priv
*ndp
;
1476 struct ncsi_dev
*nd
;
1482 nd
= ncsi_find_dev(dev
);
1484 netdev_warn(dev
, "ncsi: no net_device?\n");
1488 ndp
= TO_NCSI_DEV_PRIV(nd
);
1490 /* Remove the VLAN id from our internal list */
1491 list_for_each_entry_safe(vlan
, tmp
, &ndp
->vlan_vids
, list
)
1492 if (vlan
->vid
== vid
) {
1493 netdev_printk(KERN_DEBUG
, dev
,
1494 "vid %u found, removing\n", vid
);
1495 list_del_rcu(&vlan
->list
);
1501 netdev_err(dev
, "ncsi: vid %u wasn't registered!\n", vid
);
1505 found
= ncsi_kick_channels(ndp
) != 0;
1507 return found
? ncsi_process_next_channel(ndp
) : 0;
1509 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid
);
1511 struct ncsi_dev
*ncsi_register_dev(struct net_device
*dev
,
1512 void (*handler
)(struct ncsi_dev
*ndev
))
1514 struct ncsi_dev_priv
*ndp
;
1515 struct ncsi_dev
*nd
;
1516 unsigned long flags
;
1519 /* Check if the device has been registered or not */
1520 nd
= ncsi_find_dev(dev
);
1524 /* Create NCSI device */
1525 ndp
= kzalloc(sizeof(*ndp
), GFP_ATOMIC
);
1530 nd
->state
= ncsi_dev_state_registered
;
1532 nd
->handler
= handler
;
1533 ndp
->pending_req_num
= 0;
1534 INIT_LIST_HEAD(&ndp
->channel_queue
);
1535 INIT_LIST_HEAD(&ndp
->vlan_vids
);
1536 INIT_WORK(&ndp
->work
, ncsi_dev_work
);
1538 /* Initialize private NCSI device */
1539 spin_lock_init(&ndp
->lock
);
1540 INIT_LIST_HEAD(&ndp
->packages
);
1541 ndp
->request_id
= NCSI_REQ_START_IDX
;
1542 for (i
= 0; i
< ARRAY_SIZE(ndp
->requests
); i
++) {
1543 ndp
->requests
[i
].id
= i
;
1544 ndp
->requests
[i
].ndp
= ndp
;
1545 setup_timer(&ndp
->requests
[i
].timer
,
1546 ncsi_request_timeout
,
1547 (unsigned long)&ndp
->requests
[i
]);
1550 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1551 #if IS_ENABLED(CONFIG_IPV6)
1552 ndp
->inet6_addr_num
= 0;
1553 if (list_empty(&ncsi_dev_list
))
1554 register_inet6addr_notifier(&ncsi_inet6addr_notifier
);
1556 list_add_tail_rcu(&ndp
->node
, &ncsi_dev_list
);
1557 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1559 /* Register NCSI packet Rx handler */
1560 ndp
->ptype
.type
= cpu_to_be16(ETH_P_NCSI
);
1561 ndp
->ptype
.func
= ncsi_rcv_rsp
;
1562 ndp
->ptype
.dev
= dev
;
1563 dev_add_pack(&ndp
->ptype
);
1567 EXPORT_SYMBOL_GPL(ncsi_register_dev
);
1569 int ncsi_start_dev(struct ncsi_dev
*nd
)
1571 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1574 if (nd
->state
!= ncsi_dev_state_registered
&&
1575 nd
->state
!= ncsi_dev_state_functional
)
1578 if (!(ndp
->flags
& NCSI_DEV_PROBED
)) {
1579 nd
->state
= ncsi_dev_state_probe
;
1580 schedule_work(&ndp
->work
);
1584 if (ndp
->flags
& NCSI_DEV_HWA
)
1585 ret
= ncsi_enable_hwa(ndp
);
1587 ret
= ncsi_choose_active_channel(ndp
);
1591 EXPORT_SYMBOL_GPL(ncsi_start_dev
);
1593 void ncsi_stop_dev(struct ncsi_dev
*nd
)
1595 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1596 struct ncsi_package
*np
;
1597 struct ncsi_channel
*nc
;
1600 unsigned long flags
;
1602 /* Stop the channel monitor and reset channel's state */
1603 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1604 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1605 ncsi_stop_channel_monitor(nc
);
1607 spin_lock_irqsave(&nc
->lock
, flags
);
1608 chained
= !list_empty(&nc
->link
);
1609 old_state
= nc
->state
;
1610 nc
->state
= NCSI_CHANNEL_INACTIVE
;
1611 spin_unlock_irqrestore(&nc
->lock
, flags
);
1613 WARN_ON_ONCE(chained
||
1614 old_state
== NCSI_CHANNEL_INVISIBLE
);
1618 ncsi_report_link(ndp
, true);
1620 EXPORT_SYMBOL_GPL(ncsi_stop_dev
);
1622 void ncsi_unregister_dev(struct ncsi_dev
*nd
)
1624 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1625 struct ncsi_package
*np
, *tmp
;
1626 unsigned long flags
;
1628 dev_remove_pack(&ndp
->ptype
);
1630 list_for_each_entry_safe(np
, tmp
, &ndp
->packages
, node
)
1631 ncsi_remove_package(np
);
1633 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1634 list_del_rcu(&ndp
->node
);
1635 #if IS_ENABLED(CONFIG_IPV6)
1636 if (list_empty(&ncsi_dev_list
))
1637 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier
);
1639 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1643 EXPORT_SYMBOL_GPL(ncsi_unregister_dev
);