2 * drivers/net/macsec.c - MACsec device
4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/socket.h>
15 #include <linux/module.h>
16 #include <crypto/aead.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rtnetlink.h>
19 #include <net/genetlink.h>
21 #include <net/gro_cells.h>
23 #include <uapi/linux/if_macsec.h>
25 typedef u64 __bitwise sci_t
;
27 #define MACSEC_SCI_LEN 8
29 /* SecTAG length = macsec_eth_header without the optional SCI */
30 #define MACSEC_TAG_LEN 6
32 struct macsec_eth_header
{
36 #if defined(__LITTLE_ENDIAN_BITFIELD)
39 #elif defined(__BIG_ENDIAN_BITFIELD)
43 #error "Please fix <asm/byteorder.h>"
46 u8 secure_channel_id
[8]; /* optional */
49 #define MACSEC_TCI_VERSION 0x80
50 #define MACSEC_TCI_ES 0x40 /* end station */
51 #define MACSEC_TCI_SC 0x20 /* SCI present */
52 #define MACSEC_TCI_SCB 0x10 /* epon */
53 #define MACSEC_TCI_E 0x08 /* encryption */
54 #define MACSEC_TCI_C 0x04 /* changed text */
55 #define MACSEC_AN_MASK 0x03 /* association number */
56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
59 #define MIN_NON_SHORT_LEN 48
61 #define GCM_AES_IV_LEN 12
62 #define DEFAULT_ICV_LEN 16
64 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */
66 #define for_each_rxsc(secy, sc) \
67 for (sc = rcu_dereference_bh(secy->rx_sc); \
69 sc = rcu_dereference_bh(sc->next))
70 #define for_each_rxsc_rtnl(secy, sc) \
71 for (sc = rtnl_dereference(secy->rx_sc); \
73 sc = rtnl_dereference(sc->next))
77 u8 secure_channel_id
[8];
84 * struct macsec_key - SA key
85 * @id: user-provided key identifier
86 * @tfm: crypto struct, key storage
89 u8 id
[MACSEC_KEYID_LEN
];
90 struct crypto_aead
*tfm
;
93 struct macsec_rx_sc_stats
{
94 __u64 InOctetsValidated
;
95 __u64 InOctetsDecrypted
;
96 __u64 InPktsUnchecked
;
101 __u64 InPktsNotValid
;
102 __u64 InPktsNotUsingSA
;
103 __u64 InPktsUnusedSA
;
106 struct macsec_rx_sa_stats
{
109 __u32 InPktsNotValid
;
110 __u32 InPktsNotUsingSA
;
111 __u32 InPktsUnusedSA
;
114 struct macsec_tx_sa_stats
{
115 __u32 OutPktsProtected
;
116 __u32 OutPktsEncrypted
;
119 struct macsec_tx_sc_stats
{
120 __u64 OutPktsProtected
;
121 __u64 OutPktsEncrypted
;
122 __u64 OutOctetsProtected
;
123 __u64 OutOctetsEncrypted
;
126 struct macsec_dev_stats
{
127 __u64 OutPktsUntagged
;
128 __u64 InPktsUntagged
;
129 __u64 OutPktsTooLong
;
132 __u64 InPktsUnknownSCI
;
138 * struct macsec_rx_sa - receive secure association
140 * @next_pn: packet number expected for the next packet
141 * @lock: protects next_pn manipulations
142 * @key: key structure
143 * @stats: per-SA stats
145 struct macsec_rx_sa
{
146 struct macsec_key key
;
151 struct macsec_rx_sa_stats __percpu
*stats
;
152 struct macsec_rx_sc
*sc
;
156 struct pcpu_rx_sc_stats
{
157 struct macsec_rx_sc_stats stats
;
158 struct u64_stats_sync syncp
;
162 * struct macsec_rx_sc - receive secure channel
163 * @sci: secure channel identifier for this SC
164 * @active: channel is active
165 * @sa: array of secure associations
166 * @stats: per-SC stats
168 struct macsec_rx_sc
{
169 struct macsec_rx_sc __rcu
*next
;
172 struct macsec_rx_sa __rcu
*sa
[MACSEC_NUM_AN
];
173 struct pcpu_rx_sc_stats __percpu
*stats
;
175 struct rcu_head rcu_head
;
179 * struct macsec_tx_sa - transmit secure association
181 * @next_pn: packet number to use for the next packet
182 * @lock: protects next_pn manipulations
183 * @key: key structure
184 * @stats: per-SA stats
186 struct macsec_tx_sa
{
187 struct macsec_key key
;
192 struct macsec_tx_sa_stats __percpu
*stats
;
196 struct pcpu_tx_sc_stats
{
197 struct macsec_tx_sc_stats stats
;
198 struct u64_stats_sync syncp
;
202 * struct macsec_tx_sc - transmit secure channel
204 * @encoding_sa: association number of the SA currently in use
205 * @encrypt: encrypt packets on transmit, or authenticate only
206 * @send_sci: always include the SCI in the SecTAG
208 * @scb: single copy broadcast flag
209 * @sa: array of secure associations
210 * @stats: stats for this TXSC
212 struct macsec_tx_sc
{
219 struct macsec_tx_sa __rcu
*sa
[MACSEC_NUM_AN
];
220 struct pcpu_tx_sc_stats __percpu
*stats
;
223 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
226 * struct macsec_secy - MACsec Security Entity
227 * @netdev: netdevice for this SecY
228 * @n_rx_sc: number of receive secure channels configured on this SecY
229 * @sci: secure channel identifier used for tx
230 * @key_len: length of keys used by the cipher suite
231 * @icv_len: length of ICV used by the cipher suite
232 * @validate_frames: validation mode
233 * @operational: MAC_Operational flag
234 * @protect_frames: enable protection for this SecY
235 * @replay_protect: enable packet number checks on receive
236 * @replay_window: size of the replay window
237 * @tx_sc: transmit secure channel
238 * @rx_sc: linked list of receive secure channels
241 struct net_device
*netdev
;
242 unsigned int n_rx_sc
;
246 enum macsec_validation_type validate_frames
;
251 struct macsec_tx_sc tx_sc
;
252 struct macsec_rx_sc __rcu
*rx_sc
;
255 struct pcpu_secy_stats
{
256 struct macsec_dev_stats stats
;
257 struct u64_stats_sync syncp
;
261 * struct macsec_dev - private data
263 * @real_dev: pointer to underlying netdevice
264 * @stats: MACsec device stats
265 * @secys: linked list of SecY's on the underlying device
268 struct macsec_secy secy
;
269 struct net_device
*real_dev
;
270 struct pcpu_secy_stats __percpu
*stats
;
271 struct list_head secys
;
272 struct gro_cells gro_cells
;
273 unsigned int nest_level
;
277 * struct macsec_rxh_data - rx_handler private argument
278 * @secys: linked list of SecY's on this underlying device
280 struct macsec_rxh_data
{
281 struct list_head secys
;
284 static struct macsec_dev
*macsec_priv(const struct net_device
*dev
)
286 return (struct macsec_dev
*)netdev_priv(dev
);
289 static struct macsec_rxh_data
*macsec_data_rcu(const struct net_device
*dev
)
291 return rcu_dereference_bh(dev
->rx_handler_data
);
294 static struct macsec_rxh_data
*macsec_data_rtnl(const struct net_device
*dev
)
296 return rtnl_dereference(dev
->rx_handler_data
);
300 struct aead_request
*req
;
302 struct macsec_tx_sa
*tx_sa
;
303 struct macsec_rx_sa
*rx_sa
;
310 static struct macsec_rx_sa
*macsec_rxsa_get(struct macsec_rx_sa __rcu
*ptr
)
312 struct macsec_rx_sa
*sa
= rcu_dereference_bh(ptr
);
314 if (!sa
|| !sa
->active
)
317 if (!atomic_inc_not_zero(&sa
->refcnt
))
323 static void free_rx_sc_rcu(struct rcu_head
*head
)
325 struct macsec_rx_sc
*rx_sc
= container_of(head
, struct macsec_rx_sc
, rcu_head
);
327 free_percpu(rx_sc
->stats
);
331 static struct macsec_rx_sc
*macsec_rxsc_get(struct macsec_rx_sc
*sc
)
333 return atomic_inc_not_zero(&sc
->refcnt
) ? sc
: NULL
;
336 static void macsec_rxsc_put(struct macsec_rx_sc
*sc
)
338 if (atomic_dec_and_test(&sc
->refcnt
))
339 call_rcu(&sc
->rcu_head
, free_rx_sc_rcu
);
342 static void free_rxsa(struct rcu_head
*head
)
344 struct macsec_rx_sa
*sa
= container_of(head
, struct macsec_rx_sa
, rcu
);
346 crypto_free_aead(sa
->key
.tfm
);
347 free_percpu(sa
->stats
);
351 static void macsec_rxsa_put(struct macsec_rx_sa
*sa
)
353 if (atomic_dec_and_test(&sa
->refcnt
))
354 call_rcu(&sa
->rcu
, free_rxsa
);
357 static struct macsec_tx_sa
*macsec_txsa_get(struct macsec_tx_sa __rcu
*ptr
)
359 struct macsec_tx_sa
*sa
= rcu_dereference_bh(ptr
);
361 if (!sa
|| !sa
->active
)
364 if (!atomic_inc_not_zero(&sa
->refcnt
))
370 static void free_txsa(struct rcu_head
*head
)
372 struct macsec_tx_sa
*sa
= container_of(head
, struct macsec_tx_sa
, rcu
);
374 crypto_free_aead(sa
->key
.tfm
);
375 free_percpu(sa
->stats
);
379 static void macsec_txsa_put(struct macsec_tx_sa
*sa
)
381 if (atomic_dec_and_test(&sa
->refcnt
))
382 call_rcu(&sa
->rcu
, free_txsa
);
385 static struct macsec_cb
*macsec_skb_cb(struct sk_buff
*skb
)
387 BUILD_BUG_ON(sizeof(struct macsec_cb
) > sizeof(skb
->cb
));
388 return (struct macsec_cb
*)skb
->cb
;
391 #define MACSEC_PORT_ES (htons(0x0001))
392 #define MACSEC_PORT_SCB (0x0000)
393 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
395 #define DEFAULT_SAK_LEN 16
396 #define DEFAULT_SEND_SCI true
397 #define DEFAULT_ENCRYPT false
398 #define DEFAULT_ENCODING_SA 0
400 static bool send_sci(const struct macsec_secy
*secy
)
402 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
404 return tx_sc
->send_sci
||
405 (secy
->n_rx_sc
> 1 && !tx_sc
->end_station
&& !tx_sc
->scb
);
408 static sci_t
make_sci(u8
*addr
, __be16 port
)
412 memcpy(&sci
, addr
, ETH_ALEN
);
413 memcpy(((char *)&sci
) + ETH_ALEN
, &port
, sizeof(port
));
418 static sci_t
macsec_frame_sci(struct macsec_eth_header
*hdr
, bool sci_present
)
423 memcpy(&sci
, hdr
->secure_channel_id
,
424 sizeof(hdr
->secure_channel_id
));
426 sci
= make_sci(hdr
->eth
.h_source
, MACSEC_PORT_ES
);
431 static unsigned int macsec_sectag_len(bool sci_present
)
433 return MACSEC_TAG_LEN
+ (sci_present
? MACSEC_SCI_LEN
: 0);
436 static unsigned int macsec_hdr_len(bool sci_present
)
438 return macsec_sectag_len(sci_present
) + ETH_HLEN
;
441 static unsigned int macsec_extra_len(bool sci_present
)
443 return macsec_sectag_len(sci_present
) + sizeof(__be16
);
446 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
447 static void macsec_fill_sectag(struct macsec_eth_header
*h
,
448 const struct macsec_secy
*secy
, u32 pn
,
451 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
453 memset(&h
->tci_an
, 0, macsec_sectag_len(sci_present
));
454 h
->eth
.h_proto
= htons(ETH_P_MACSEC
);
457 h
->tci_an
|= MACSEC_TCI_SC
;
458 memcpy(&h
->secure_channel_id
, &secy
->sci
,
459 sizeof(h
->secure_channel_id
));
461 if (tx_sc
->end_station
)
462 h
->tci_an
|= MACSEC_TCI_ES
;
464 h
->tci_an
|= MACSEC_TCI_SCB
;
467 h
->packet_number
= htonl(pn
);
469 /* with GCM, C/E clear for !encrypt, both set for encrypt */
471 h
->tci_an
|= MACSEC_TCI_CONFID
;
472 else if (secy
->icv_len
!= DEFAULT_ICV_LEN
)
473 h
->tci_an
|= MACSEC_TCI_C
;
475 h
->tci_an
|= tx_sc
->encoding_sa
;
478 static void macsec_set_shortlen(struct macsec_eth_header
*h
, size_t data_len
)
480 if (data_len
< MIN_NON_SHORT_LEN
)
481 h
->short_length
= data_len
;
484 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
485 static bool macsec_validate_skb(struct sk_buff
*skb
, u16 icv_len
)
487 struct macsec_eth_header
*h
= (struct macsec_eth_header
*)skb
->data
;
488 int len
= skb
->len
- 2 * ETH_ALEN
;
489 int extra_len
= macsec_extra_len(!!(h
->tci_an
& MACSEC_TCI_SC
)) + icv_len
;
491 /* a) It comprises at least 17 octets */
495 /* b) MACsec EtherType: already checked */
497 /* c) V bit is clear */
498 if (h
->tci_an
& MACSEC_TCI_VERSION
)
501 /* d) ES or SCB => !SC */
502 if ((h
->tci_an
& MACSEC_TCI_ES
|| h
->tci_an
& MACSEC_TCI_SCB
) &&
503 (h
->tci_an
& MACSEC_TCI_SC
))
506 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
510 /* rx.pn != 0 (figure 10-5) */
511 if (!h
->packet_number
)
514 /* length check, f) g) h) i) */
516 return len
== extra_len
+ h
->short_length
;
517 return len
>= extra_len
+ MIN_NON_SHORT_LEN
;
520 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
521 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
523 static void macsec_fill_iv(unsigned char *iv
, sci_t sci
, u32 pn
)
525 struct gcm_iv
*gcm_iv
= (struct gcm_iv
*)iv
;
528 gcm_iv
->pn
= htonl(pn
);
531 static struct macsec_eth_header
*macsec_ethhdr(struct sk_buff
*skb
)
533 return (struct macsec_eth_header
*)skb_mac_header(skb
);
536 static u32
tx_sa_update_pn(struct macsec_tx_sa
*tx_sa
, struct macsec_secy
*secy
)
540 spin_lock_bh(&tx_sa
->lock
);
544 if (tx_sa
->next_pn
== 0) {
545 pr_debug("PN wrapped, transitioning to !oper\n");
546 tx_sa
->active
= false;
547 if (secy
->protect_frames
)
548 secy
->operational
= false;
550 spin_unlock_bh(&tx_sa
->lock
);
555 static void macsec_encrypt_finish(struct sk_buff
*skb
, struct net_device
*dev
)
557 struct macsec_dev
*macsec
= netdev_priv(dev
);
559 skb
->dev
= macsec
->real_dev
;
560 skb_reset_mac_header(skb
);
561 skb
->protocol
= eth_hdr(skb
)->h_proto
;
564 static void macsec_count_tx(struct sk_buff
*skb
, struct macsec_tx_sc
*tx_sc
,
565 struct macsec_tx_sa
*tx_sa
)
567 struct pcpu_tx_sc_stats
*txsc_stats
= this_cpu_ptr(tx_sc
->stats
);
569 u64_stats_update_begin(&txsc_stats
->syncp
);
570 if (tx_sc
->encrypt
) {
571 txsc_stats
->stats
.OutOctetsEncrypted
+= skb
->len
;
572 txsc_stats
->stats
.OutPktsEncrypted
++;
573 this_cpu_inc(tx_sa
->stats
->OutPktsEncrypted
);
575 txsc_stats
->stats
.OutOctetsProtected
+= skb
->len
;
576 txsc_stats
->stats
.OutPktsProtected
++;
577 this_cpu_inc(tx_sa
->stats
->OutPktsProtected
);
579 u64_stats_update_end(&txsc_stats
->syncp
);
582 static void count_tx(struct net_device
*dev
, int ret
, int len
)
584 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
585 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
587 u64_stats_update_begin(&stats
->syncp
);
589 stats
->tx_bytes
+= len
;
590 u64_stats_update_end(&stats
->syncp
);
592 dev
->stats
.tx_dropped
++;
596 static void macsec_encrypt_done(struct crypto_async_request
*base
, int err
)
598 struct sk_buff
*skb
= base
->data
;
599 struct net_device
*dev
= skb
->dev
;
600 struct macsec_dev
*macsec
= macsec_priv(dev
);
601 struct macsec_tx_sa
*sa
= macsec_skb_cb(skb
)->tx_sa
;
604 aead_request_free(macsec_skb_cb(skb
)->req
);
607 macsec_encrypt_finish(skb
, dev
);
608 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
610 ret
= dev_queue_xmit(skb
);
611 count_tx(dev
, ret
, len
);
612 rcu_read_unlock_bh();
618 static struct aead_request
*macsec_alloc_req(struct crypto_aead
*tfm
,
620 struct scatterlist
**sg
,
623 size_t size
, iv_offset
, sg_offset
;
624 struct aead_request
*req
;
627 size
= sizeof(struct aead_request
) + crypto_aead_reqsize(tfm
);
629 size
+= GCM_AES_IV_LEN
;
631 size
= ALIGN(size
, __alignof__(struct scatterlist
));
633 size
+= sizeof(struct scatterlist
) * num_frags
;
635 tmp
= kmalloc(size
, GFP_ATOMIC
);
639 *iv
= (unsigned char *)(tmp
+ iv_offset
);
640 *sg
= (struct scatterlist
*)(tmp
+ sg_offset
);
643 aead_request_set_tfm(req
, tfm
);
648 static struct sk_buff
*macsec_encrypt(struct sk_buff
*skb
,
649 struct net_device
*dev
)
652 struct scatterlist
*sg
;
653 struct sk_buff
*trailer
;
656 struct macsec_eth_header
*hh
;
657 size_t unprotected_len
;
658 struct aead_request
*req
;
659 struct macsec_secy
*secy
;
660 struct macsec_tx_sc
*tx_sc
;
661 struct macsec_tx_sa
*tx_sa
;
662 struct macsec_dev
*macsec
= macsec_priv(dev
);
666 secy
= &macsec
->secy
;
667 tx_sc
= &secy
->tx_sc
;
669 /* 10.5.1 TX SA assignment */
670 tx_sa
= macsec_txsa_get(tx_sc
->sa
[tx_sc
->encoding_sa
]);
672 secy
->operational
= false;
674 return ERR_PTR(-EINVAL
);
677 if (unlikely(skb_headroom(skb
) < MACSEC_NEEDED_HEADROOM
||
678 skb_tailroom(skb
) < MACSEC_NEEDED_TAILROOM
)) {
679 struct sk_buff
*nskb
= skb_copy_expand(skb
,
680 MACSEC_NEEDED_HEADROOM
,
681 MACSEC_NEEDED_TAILROOM
,
687 macsec_txsa_put(tx_sa
);
689 return ERR_PTR(-ENOMEM
);
692 skb
= skb_unshare(skb
, GFP_ATOMIC
);
694 macsec_txsa_put(tx_sa
);
695 return ERR_PTR(-ENOMEM
);
699 unprotected_len
= skb
->len
;
701 sci_present
= send_sci(secy
);
702 hh
= (struct macsec_eth_header
*)skb_push(skb
, macsec_extra_len(sci_present
));
703 memmove(hh
, eth
, 2 * ETH_ALEN
);
705 pn
= tx_sa_update_pn(tx_sa
, secy
);
707 macsec_txsa_put(tx_sa
);
709 return ERR_PTR(-ENOLINK
);
711 macsec_fill_sectag(hh
, secy
, pn
, sci_present
);
712 macsec_set_shortlen(hh
, unprotected_len
- 2 * ETH_ALEN
);
714 skb_put(skb
, secy
->icv_len
);
716 if (skb
->len
- ETH_HLEN
> macsec_priv(dev
)->real_dev
->mtu
) {
717 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
719 u64_stats_update_begin(&secy_stats
->syncp
);
720 secy_stats
->stats
.OutPktsTooLong
++;
721 u64_stats_update_end(&secy_stats
->syncp
);
723 macsec_txsa_put(tx_sa
);
725 return ERR_PTR(-EINVAL
);
728 ret
= skb_cow_data(skb
, 0, &trailer
);
729 if (unlikely(ret
< 0)) {
730 macsec_txsa_put(tx_sa
);
735 req
= macsec_alloc_req(tx_sa
->key
.tfm
, &iv
, &sg
, ret
);
737 macsec_txsa_put(tx_sa
);
739 return ERR_PTR(-ENOMEM
);
742 macsec_fill_iv(iv
, secy
->sci
, pn
);
744 sg_init_table(sg
, ret
);
745 skb_to_sgvec(skb
, sg
, 0, skb
->len
);
747 if (tx_sc
->encrypt
) {
748 int len
= skb
->len
- macsec_hdr_len(sci_present
) -
750 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
751 aead_request_set_ad(req
, macsec_hdr_len(sci_present
));
753 aead_request_set_crypt(req
, sg
, sg
, 0, iv
);
754 aead_request_set_ad(req
, skb
->len
- secy
->icv_len
);
757 macsec_skb_cb(skb
)->req
= req
;
758 macsec_skb_cb(skb
)->tx_sa
= tx_sa
;
759 aead_request_set_callback(req
, 0, macsec_encrypt_done
, skb
);
762 ret
= crypto_aead_encrypt(req
);
763 if (ret
== -EINPROGRESS
) {
765 } else if (ret
!= 0) {
768 aead_request_free(req
);
769 macsec_txsa_put(tx_sa
);
770 return ERR_PTR(-EINVAL
);
774 aead_request_free(req
);
775 macsec_txsa_put(tx_sa
);
780 static bool macsec_post_decrypt(struct sk_buff
*skb
, struct macsec_secy
*secy
, u32 pn
)
782 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
783 struct pcpu_rx_sc_stats
*rxsc_stats
= this_cpu_ptr(rx_sa
->sc
->stats
);
784 struct macsec_eth_header
*hdr
= macsec_ethhdr(skb
);
787 spin_lock(&rx_sa
->lock
);
788 if (rx_sa
->next_pn
>= secy
->replay_window
)
789 lowest_pn
= rx_sa
->next_pn
- secy
->replay_window
;
791 /* Now perform replay protection check again
792 * (see IEEE 802.1AE-2006 figure 10-5)
794 if (secy
->replay_protect
&& pn
< lowest_pn
) {
795 spin_unlock(&rx_sa
->lock
);
796 u64_stats_update_begin(&rxsc_stats
->syncp
);
797 rxsc_stats
->stats
.InPktsLate
++;
798 u64_stats_update_end(&rxsc_stats
->syncp
);
802 if (secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
) {
803 u64_stats_update_begin(&rxsc_stats
->syncp
);
804 if (hdr
->tci_an
& MACSEC_TCI_E
)
805 rxsc_stats
->stats
.InOctetsDecrypted
+= skb
->len
;
807 rxsc_stats
->stats
.InOctetsValidated
+= skb
->len
;
808 u64_stats_update_end(&rxsc_stats
->syncp
);
811 if (!macsec_skb_cb(skb
)->valid
) {
812 spin_unlock(&rx_sa
->lock
);
815 if (hdr
->tci_an
& MACSEC_TCI_C
||
816 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
817 u64_stats_update_begin(&rxsc_stats
->syncp
);
818 rxsc_stats
->stats
.InPktsNotValid
++;
819 u64_stats_update_end(&rxsc_stats
->syncp
);
823 u64_stats_update_begin(&rxsc_stats
->syncp
);
824 if (secy
->validate_frames
== MACSEC_VALIDATE_CHECK
) {
825 rxsc_stats
->stats
.InPktsInvalid
++;
826 this_cpu_inc(rx_sa
->stats
->InPktsInvalid
);
827 } else if (pn
< lowest_pn
) {
828 rxsc_stats
->stats
.InPktsDelayed
++;
830 rxsc_stats
->stats
.InPktsUnchecked
++;
832 u64_stats_update_end(&rxsc_stats
->syncp
);
834 u64_stats_update_begin(&rxsc_stats
->syncp
);
835 if (pn
< lowest_pn
) {
836 rxsc_stats
->stats
.InPktsDelayed
++;
838 rxsc_stats
->stats
.InPktsOK
++;
839 this_cpu_inc(rx_sa
->stats
->InPktsOK
);
841 u64_stats_update_end(&rxsc_stats
->syncp
);
843 if (pn
>= rx_sa
->next_pn
)
844 rx_sa
->next_pn
= pn
+ 1;
845 spin_unlock(&rx_sa
->lock
);
851 static void macsec_reset_skb(struct sk_buff
*skb
, struct net_device
*dev
)
853 skb
->pkt_type
= PACKET_HOST
;
854 skb
->protocol
= eth_type_trans(skb
, dev
);
856 skb_reset_network_header(skb
);
857 if (!skb_transport_header_was_set(skb
))
858 skb_reset_transport_header(skb
);
859 skb_reset_mac_len(skb
);
862 static void macsec_finalize_skb(struct sk_buff
*skb
, u8 icv_len
, u8 hdr_len
)
864 memmove(skb
->data
+ hdr_len
, skb
->data
, 2 * ETH_ALEN
);
865 skb_pull(skb
, hdr_len
);
866 pskb_trim_unique(skb
, skb
->len
- icv_len
);
869 static void count_rx(struct net_device
*dev
, int len
)
871 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
873 u64_stats_update_begin(&stats
->syncp
);
875 stats
->rx_bytes
+= len
;
876 u64_stats_update_end(&stats
->syncp
);
879 static void macsec_decrypt_done(struct crypto_async_request
*base
, int err
)
881 struct sk_buff
*skb
= base
->data
;
882 struct net_device
*dev
= skb
->dev
;
883 struct macsec_dev
*macsec
= macsec_priv(dev
);
884 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
885 struct macsec_rx_sc
*rx_sc
= rx_sa
->sc
;
889 aead_request_free(macsec_skb_cb(skb
)->req
);
892 pn
= ntohl(macsec_ethhdr(skb
)->packet_number
);
893 if (!macsec_post_decrypt(skb
, &macsec
->secy
, pn
)) {
894 rcu_read_unlock_bh();
899 macsec_finalize_skb(skb
, macsec
->secy
.icv_len
,
900 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
901 macsec_reset_skb(skb
, macsec
->secy
.netdev
);
904 ret
= gro_cells_receive(&macsec
->gro_cells
, skb
);
905 if (ret
== NET_RX_SUCCESS
)
908 macsec
->secy
.netdev
->stats
.rx_dropped
++;
910 rcu_read_unlock_bh();
913 macsec_rxsa_put(rx_sa
);
914 macsec_rxsc_put(rx_sc
);
918 static struct sk_buff
*macsec_decrypt(struct sk_buff
*skb
,
919 struct net_device
*dev
,
920 struct macsec_rx_sa
*rx_sa
,
922 struct macsec_secy
*secy
)
925 struct scatterlist
*sg
;
926 struct sk_buff
*trailer
;
928 struct aead_request
*req
;
929 struct macsec_eth_header
*hdr
;
930 u16 icv_len
= secy
->icv_len
;
932 macsec_skb_cb(skb
)->valid
= false;
933 skb
= skb_share_check(skb
, GFP_ATOMIC
);
935 return ERR_PTR(-ENOMEM
);
937 ret
= skb_cow_data(skb
, 0, &trailer
);
938 if (unlikely(ret
< 0)) {
942 req
= macsec_alloc_req(rx_sa
->key
.tfm
, &iv
, &sg
, ret
);
945 return ERR_PTR(-ENOMEM
);
948 hdr
= (struct macsec_eth_header
*)skb
->data
;
949 macsec_fill_iv(iv
, sci
, ntohl(hdr
->packet_number
));
951 sg_init_table(sg
, ret
);
952 skb_to_sgvec(skb
, sg
, 0, skb
->len
);
954 if (hdr
->tci_an
& MACSEC_TCI_E
) {
955 /* confidentiality: ethernet + macsec header
956 * authenticated, encrypted payload
958 int len
= skb
->len
- macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
);
960 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
961 aead_request_set_ad(req
, macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
));
962 skb
= skb_unshare(skb
, GFP_ATOMIC
);
964 aead_request_free(req
);
965 return ERR_PTR(-ENOMEM
);
968 /* integrity only: all headers + data authenticated */
969 aead_request_set_crypt(req
, sg
, sg
, icv_len
, iv
);
970 aead_request_set_ad(req
, skb
->len
- icv_len
);
973 macsec_skb_cb(skb
)->req
= req
;
975 aead_request_set_callback(req
, 0, macsec_decrypt_done
, skb
);
978 ret
= crypto_aead_decrypt(req
);
979 if (ret
== -EINPROGRESS
) {
981 } else if (ret
!= 0) {
982 /* decryption/authentication failed
983 * 10.6 if validateFrames is disabled, deliver anyway
985 if (ret
!= -EBADMSG
) {
990 macsec_skb_cb(skb
)->valid
= true;
994 aead_request_free(req
);
999 static struct macsec_rx_sc
*find_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
1001 struct macsec_rx_sc
*rx_sc
;
1003 for_each_rxsc(secy
, rx_sc
) {
1004 if (rx_sc
->sci
== sci
)
1011 static struct macsec_rx_sc
*find_rx_sc_rtnl(struct macsec_secy
*secy
, sci_t sci
)
1013 struct macsec_rx_sc
*rx_sc
;
1015 for_each_rxsc_rtnl(secy
, rx_sc
) {
1016 if (rx_sc
->sci
== sci
)
1023 static void handle_not_macsec(struct sk_buff
*skb
)
1025 struct macsec_rxh_data
*rxd
;
1026 struct macsec_dev
*macsec
;
1029 rxd
= macsec_data_rcu(skb
->dev
);
1031 /* 10.6 If the management control validateFrames is not
1032 * Strict, frames without a SecTAG are received, counted, and
1033 * delivered to the Controlled Port
1035 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1036 struct sk_buff
*nskb
;
1038 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
1040 if (macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1041 u64_stats_update_begin(&secy_stats
->syncp
);
1042 secy_stats
->stats
.InPktsNoTag
++;
1043 u64_stats_update_end(&secy_stats
->syncp
);
1047 /* deliver on this port */
1048 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1052 nskb
->dev
= macsec
->secy
.netdev
;
1054 ret
= netif_rx(nskb
);
1055 if (ret
== NET_RX_SUCCESS
) {
1056 u64_stats_update_begin(&secy_stats
->syncp
);
1057 secy_stats
->stats
.InPktsUntagged
++;
1058 u64_stats_update_end(&secy_stats
->syncp
);
1060 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1067 static rx_handler_result_t
macsec_handle_frame(struct sk_buff
**pskb
)
1069 struct sk_buff
*skb
= *pskb
;
1070 struct net_device
*dev
= skb
->dev
;
1071 struct macsec_eth_header
*hdr
;
1072 struct macsec_secy
*secy
= NULL
;
1073 struct macsec_rx_sc
*rx_sc
;
1074 struct macsec_rx_sa
*rx_sa
;
1075 struct macsec_rxh_data
*rxd
;
1076 struct macsec_dev
*macsec
;
1080 struct pcpu_rx_sc_stats
*rxsc_stats
;
1081 struct pcpu_secy_stats
*secy_stats
;
1085 if (skb_headroom(skb
) < ETH_HLEN
)
1088 hdr
= macsec_ethhdr(skb
);
1089 if (hdr
->eth
.h_proto
!= htons(ETH_P_MACSEC
)) {
1090 handle_not_macsec(skb
);
1092 /* and deliver to the uncontrolled port */
1093 return RX_HANDLER_PASS
;
1096 skb
= skb_unshare(skb
, GFP_ATOMIC
);
1099 return RX_HANDLER_CONSUMED
;
1102 pulled_sci
= pskb_may_pull(skb
, macsec_extra_len(true));
1104 if (!pskb_may_pull(skb
, macsec_extra_len(false)))
1108 hdr
= macsec_ethhdr(skb
);
1110 /* Frames with a SecTAG that has the TCI E bit set but the C
1111 * bit clear are discarded, as this reserved encoding is used
1112 * to identify frames with a SecTAG that are not to be
1113 * delivered to the Controlled Port.
1115 if ((hdr
->tci_an
& (MACSEC_TCI_C
| MACSEC_TCI_E
)) == MACSEC_TCI_E
)
1116 return RX_HANDLER_PASS
;
1118 /* now, pull the extra length */
1119 if (hdr
->tci_an
& MACSEC_TCI_SC
) {
1124 /* ethernet header is part of crypto processing */
1125 skb_push(skb
, ETH_HLEN
);
1127 macsec_skb_cb(skb
)->has_sci
= !!(hdr
->tci_an
& MACSEC_TCI_SC
);
1128 macsec_skb_cb(skb
)->assoc_num
= hdr
->tci_an
& MACSEC_AN_MASK
;
1129 sci
= macsec_frame_sci(hdr
, macsec_skb_cb(skb
)->has_sci
);
1132 rxd
= macsec_data_rcu(skb
->dev
);
1134 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1135 struct macsec_rx_sc
*sc
= find_rx_sc(&macsec
->secy
, sci
);
1136 sc
= sc
? macsec_rxsc_get(sc
) : NULL
;
1139 secy
= &macsec
->secy
;
1149 macsec
= macsec_priv(dev
);
1150 secy_stats
= this_cpu_ptr(macsec
->stats
);
1151 rxsc_stats
= this_cpu_ptr(rx_sc
->stats
);
1153 if (!macsec_validate_skb(skb
, secy
->icv_len
)) {
1154 u64_stats_update_begin(&secy_stats
->syncp
);
1155 secy_stats
->stats
.InPktsBadTag
++;
1156 u64_stats_update_end(&secy_stats
->syncp
);
1160 rx_sa
= macsec_rxsa_get(rx_sc
->sa
[macsec_skb_cb(skb
)->assoc_num
]);
1162 /* 10.6.1 if the SA is not in use */
1164 /* If validateFrames is Strict or the C bit in the
1165 * SecTAG is set, discard
1167 if (hdr
->tci_an
& MACSEC_TCI_C
||
1168 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
1169 u64_stats_update_begin(&rxsc_stats
->syncp
);
1170 rxsc_stats
->stats
.InPktsNotUsingSA
++;
1171 u64_stats_update_end(&rxsc_stats
->syncp
);
1175 /* not Strict, the frame (with the SecTAG and ICV
1176 * removed) is delivered to the Controlled Port.
1178 u64_stats_update_begin(&rxsc_stats
->syncp
);
1179 rxsc_stats
->stats
.InPktsUnusedSA
++;
1180 u64_stats_update_end(&rxsc_stats
->syncp
);
1184 /* First, PN check to avoid decrypting obviously wrong packets */
1185 pn
= ntohl(hdr
->packet_number
);
1186 if (secy
->replay_protect
) {
1189 spin_lock(&rx_sa
->lock
);
1190 late
= rx_sa
->next_pn
>= secy
->replay_window
&&
1191 pn
< (rx_sa
->next_pn
- secy
->replay_window
);
1192 spin_unlock(&rx_sa
->lock
);
1195 u64_stats_update_begin(&rxsc_stats
->syncp
);
1196 rxsc_stats
->stats
.InPktsLate
++;
1197 u64_stats_update_end(&rxsc_stats
->syncp
);
1202 macsec_skb_cb(skb
)->rx_sa
= rx_sa
;
1204 /* Disabled && !changed text => skip validation */
1205 if (hdr
->tci_an
& MACSEC_TCI_C
||
1206 secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
)
1207 skb
= macsec_decrypt(skb
, dev
, rx_sa
, sci
, secy
);
1210 /* the decrypt callback needs the reference */
1211 if (PTR_ERR(skb
) != -EINPROGRESS
) {
1212 macsec_rxsa_put(rx_sa
);
1213 macsec_rxsc_put(rx_sc
);
1217 return RX_HANDLER_CONSUMED
;
1220 if (!macsec_post_decrypt(skb
, secy
, pn
))
1224 macsec_finalize_skb(skb
, secy
->icv_len
,
1225 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1226 macsec_reset_skb(skb
, secy
->netdev
);
1229 macsec_rxsa_put(rx_sa
);
1230 macsec_rxsc_put(rx_sc
);
1232 ret
= gro_cells_receive(&macsec
->gro_cells
, skb
);
1233 if (ret
== NET_RX_SUCCESS
)
1234 count_rx(dev
, skb
->len
);
1236 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1241 return RX_HANDLER_CONSUMED
;
1244 macsec_rxsa_put(rx_sa
);
1246 macsec_rxsc_put(rx_sc
);
1251 return RX_HANDLER_CONSUMED
;
1254 /* 10.6.1 if the SC is not found */
1255 cbit
= !!(hdr
->tci_an
& MACSEC_TCI_C
);
1257 macsec_finalize_skb(skb
, DEFAULT_ICV_LEN
,
1258 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1260 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1261 struct sk_buff
*nskb
;
1263 secy_stats
= this_cpu_ptr(macsec
->stats
);
1265 /* If validateFrames is Strict or the C bit in the
1266 * SecTAG is set, discard
1269 macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1270 u64_stats_update_begin(&secy_stats
->syncp
);
1271 secy_stats
->stats
.InPktsNoSCI
++;
1272 u64_stats_update_end(&secy_stats
->syncp
);
1276 /* not strict, the frame (with the SecTAG and ICV
1277 * removed) is delivered to the Controlled Port.
1279 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1283 macsec_reset_skb(nskb
, macsec
->secy
.netdev
);
1285 ret
= netif_rx(nskb
);
1286 if (ret
== NET_RX_SUCCESS
) {
1287 u64_stats_update_begin(&secy_stats
->syncp
);
1288 secy_stats
->stats
.InPktsUnknownSCI
++;
1289 u64_stats_update_end(&secy_stats
->syncp
);
1291 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1297 return RX_HANDLER_PASS
;
1300 static struct crypto_aead
*macsec_alloc_tfm(char *key
, int key_len
, int icv_len
)
1302 struct crypto_aead
*tfm
;
1305 tfm
= crypto_alloc_aead("gcm(aes)", 0, 0);
1310 ret
= crypto_aead_setkey(tfm
, key
, key_len
);
1314 ret
= crypto_aead_setauthsize(tfm
, icv_len
);
1320 crypto_free_aead(tfm
);
1321 return ERR_PTR(ret
);
1324 static int init_rx_sa(struct macsec_rx_sa
*rx_sa
, char *sak
, int key_len
,
1327 rx_sa
->stats
= alloc_percpu(struct macsec_rx_sa_stats
);
1331 rx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1332 if (IS_ERR(rx_sa
->key
.tfm
)) {
1333 free_percpu(rx_sa
->stats
);
1334 return PTR_ERR(rx_sa
->key
.tfm
);
1337 rx_sa
->active
= false;
1339 atomic_set(&rx_sa
->refcnt
, 1);
1340 spin_lock_init(&rx_sa
->lock
);
1345 static void clear_rx_sa(struct macsec_rx_sa
*rx_sa
)
1347 rx_sa
->active
= false;
1349 macsec_rxsa_put(rx_sa
);
1352 static void free_rx_sc(struct macsec_rx_sc
*rx_sc
)
1356 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
1357 struct macsec_rx_sa
*sa
= rtnl_dereference(rx_sc
->sa
[i
]);
1359 RCU_INIT_POINTER(rx_sc
->sa
[i
], NULL
);
1364 macsec_rxsc_put(rx_sc
);
1367 static struct macsec_rx_sc
*del_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
1369 struct macsec_rx_sc
*rx_sc
, __rcu
**rx_scp
;
1371 for (rx_scp
= &secy
->rx_sc
, rx_sc
= rtnl_dereference(*rx_scp
);
1373 rx_scp
= &rx_sc
->next
, rx_sc
= rtnl_dereference(*rx_scp
)) {
1374 if (rx_sc
->sci
== sci
) {
1377 rcu_assign_pointer(*rx_scp
, rx_sc
->next
);
1385 static struct macsec_rx_sc
*create_rx_sc(struct net_device
*dev
, sci_t sci
)
1387 struct macsec_rx_sc
*rx_sc
;
1388 struct macsec_dev
*macsec
;
1389 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
1390 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
1391 struct macsec_secy
*secy
;
1393 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
1394 if (find_rx_sc_rtnl(&macsec
->secy
, sci
))
1395 return ERR_PTR(-EEXIST
);
1398 rx_sc
= kzalloc(sizeof(*rx_sc
), GFP_KERNEL
);
1400 return ERR_PTR(-ENOMEM
);
1402 rx_sc
->stats
= netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats
);
1403 if (!rx_sc
->stats
) {
1405 return ERR_PTR(-ENOMEM
);
1409 rx_sc
->active
= true;
1410 atomic_set(&rx_sc
->refcnt
, 1);
1412 secy
= &macsec_priv(dev
)->secy
;
1413 rcu_assign_pointer(rx_sc
->next
, secy
->rx_sc
);
1414 rcu_assign_pointer(secy
->rx_sc
, rx_sc
);
1422 static int init_tx_sa(struct macsec_tx_sa
*tx_sa
, char *sak
, int key_len
,
1425 tx_sa
->stats
= alloc_percpu(struct macsec_tx_sa_stats
);
1429 tx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1430 if (IS_ERR(tx_sa
->key
.tfm
)) {
1431 free_percpu(tx_sa
->stats
);
1432 return PTR_ERR(tx_sa
->key
.tfm
);
1435 tx_sa
->active
= false;
1436 atomic_set(&tx_sa
->refcnt
, 1);
1437 spin_lock_init(&tx_sa
->lock
);
1442 static void clear_tx_sa(struct macsec_tx_sa
*tx_sa
)
1444 tx_sa
->active
= false;
1446 macsec_txsa_put(tx_sa
);
1449 static struct genl_family macsec_fam
;
1451 static struct net_device
*get_dev_from_nl(struct net
*net
,
1452 struct nlattr
**attrs
)
1454 int ifindex
= nla_get_u32(attrs
[MACSEC_ATTR_IFINDEX
]);
1455 struct net_device
*dev
;
1457 dev
= __dev_get_by_index(net
, ifindex
);
1459 return ERR_PTR(-ENODEV
);
1461 if (!netif_is_macsec(dev
))
1462 return ERR_PTR(-ENODEV
);
1467 static sci_t
nla_get_sci(const struct nlattr
*nla
)
1469 return (__force sci_t
)nla_get_u64(nla
);
1472 static int nla_put_sci(struct sk_buff
*skb
, int attrtype
, sci_t value
,
1475 return nla_put_u64_64bit(skb
, attrtype
, (__force u64
)value
, padattr
);
1478 static struct macsec_tx_sa
*get_txsa_from_nl(struct net
*net
,
1479 struct nlattr
**attrs
,
1480 struct nlattr
**tb_sa
,
1481 struct net_device
**devp
,
1482 struct macsec_secy
**secyp
,
1483 struct macsec_tx_sc
**scp
,
1486 struct net_device
*dev
;
1487 struct macsec_secy
*secy
;
1488 struct macsec_tx_sc
*tx_sc
;
1489 struct macsec_tx_sa
*tx_sa
;
1491 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1492 return ERR_PTR(-EINVAL
);
1494 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1496 dev
= get_dev_from_nl(net
, attrs
);
1498 return ERR_CAST(dev
);
1500 if (*assoc_num
>= MACSEC_NUM_AN
)
1501 return ERR_PTR(-EINVAL
);
1503 secy
= &macsec_priv(dev
)->secy
;
1504 tx_sc
= &secy
->tx_sc
;
1506 tx_sa
= rtnl_dereference(tx_sc
->sa
[*assoc_num
]);
1508 return ERR_PTR(-ENODEV
);
1516 static struct macsec_rx_sc
*get_rxsc_from_nl(struct net
*net
,
1517 struct nlattr
**attrs
,
1518 struct nlattr
**tb_rxsc
,
1519 struct net_device
**devp
,
1520 struct macsec_secy
**secyp
)
1522 struct net_device
*dev
;
1523 struct macsec_secy
*secy
;
1524 struct macsec_rx_sc
*rx_sc
;
1527 dev
= get_dev_from_nl(net
, attrs
);
1529 return ERR_CAST(dev
);
1531 secy
= &macsec_priv(dev
)->secy
;
1533 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1534 return ERR_PTR(-EINVAL
);
1536 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1537 rx_sc
= find_rx_sc_rtnl(secy
, sci
);
1539 return ERR_PTR(-ENODEV
);
1547 static struct macsec_rx_sa
*get_rxsa_from_nl(struct net
*net
,
1548 struct nlattr
**attrs
,
1549 struct nlattr
**tb_rxsc
,
1550 struct nlattr
**tb_sa
,
1551 struct net_device
**devp
,
1552 struct macsec_secy
**secyp
,
1553 struct macsec_rx_sc
**scp
,
1556 struct macsec_rx_sc
*rx_sc
;
1557 struct macsec_rx_sa
*rx_sa
;
1559 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1560 return ERR_PTR(-EINVAL
);
1562 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1563 if (*assoc_num
>= MACSEC_NUM_AN
)
1564 return ERR_PTR(-EINVAL
);
1566 rx_sc
= get_rxsc_from_nl(net
, attrs
, tb_rxsc
, devp
, secyp
);
1568 return ERR_CAST(rx_sc
);
1570 rx_sa
= rtnl_dereference(rx_sc
->sa
[*assoc_num
]);
1572 return ERR_PTR(-ENODEV
);
1579 static const struct nla_policy macsec_genl_policy
[NUM_MACSEC_ATTR
] = {
1580 [MACSEC_ATTR_IFINDEX
] = { .type
= NLA_U32
},
1581 [MACSEC_ATTR_RXSC_CONFIG
] = { .type
= NLA_NESTED
},
1582 [MACSEC_ATTR_SA_CONFIG
] = { .type
= NLA_NESTED
},
1585 static const struct nla_policy macsec_genl_rxsc_policy
[NUM_MACSEC_RXSC_ATTR
] = {
1586 [MACSEC_RXSC_ATTR_SCI
] = { .type
= NLA_U64
},
1587 [MACSEC_RXSC_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1590 static const struct nla_policy macsec_genl_sa_policy
[NUM_MACSEC_SA_ATTR
] = {
1591 [MACSEC_SA_ATTR_AN
] = { .type
= NLA_U8
},
1592 [MACSEC_SA_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1593 [MACSEC_SA_ATTR_PN
] = { .type
= NLA_U32
},
1594 [MACSEC_SA_ATTR_KEYID
] = { .type
= NLA_BINARY
,
1595 .len
= MACSEC_KEYID_LEN
, },
1596 [MACSEC_SA_ATTR_KEY
] = { .type
= NLA_BINARY
,
1597 .len
= MACSEC_MAX_KEY_LEN
, },
1600 static int parse_sa_config(struct nlattr
**attrs
, struct nlattr
**tb_sa
)
1602 if (!attrs
[MACSEC_ATTR_SA_CONFIG
])
1605 if (nla_parse_nested(tb_sa
, MACSEC_SA_ATTR_MAX
, attrs
[MACSEC_ATTR_SA_CONFIG
],
1606 macsec_genl_sa_policy
))
1612 static int parse_rxsc_config(struct nlattr
**attrs
, struct nlattr
**tb_rxsc
)
1614 if (!attrs
[MACSEC_ATTR_RXSC_CONFIG
])
1617 if (nla_parse_nested(tb_rxsc
, MACSEC_RXSC_ATTR_MAX
, attrs
[MACSEC_ATTR_RXSC_CONFIG
],
1618 macsec_genl_rxsc_policy
))
1624 static bool validate_add_rxsa(struct nlattr
**attrs
)
1626 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1627 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1628 !attrs
[MACSEC_SA_ATTR_KEYID
])
1631 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1634 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1637 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1638 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1642 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1648 static int macsec_add_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1650 struct net_device
*dev
;
1651 struct nlattr
**attrs
= info
->attrs
;
1652 struct macsec_secy
*secy
;
1653 struct macsec_rx_sc
*rx_sc
;
1654 struct macsec_rx_sa
*rx_sa
;
1655 unsigned char assoc_num
;
1656 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1657 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1660 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1663 if (parse_sa_config(attrs
, tb_sa
))
1666 if (parse_rxsc_config(attrs
, tb_rxsc
))
1669 if (!validate_add_rxsa(tb_sa
))
1673 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
1674 if (IS_ERR(rx_sc
)) {
1676 return PTR_ERR(rx_sc
);
1679 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1681 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1682 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1683 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1688 rx_sa
= rtnl_dereference(rx_sc
->sa
[assoc_num
]);
1694 rx_sa
= kmalloc(sizeof(*rx_sa
), GFP_KERNEL
);
1700 err
= init_rx_sa(rx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1701 secy
->key_len
, secy
->icv_len
);
1708 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
1709 spin_lock_bh(&rx_sa
->lock
);
1710 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1711 spin_unlock_bh(&rx_sa
->lock
);
1714 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1715 rx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1717 nla_memcpy(rx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1719 rcu_assign_pointer(rx_sc
->sa
[assoc_num
], rx_sa
);
1726 static bool validate_add_rxsc(struct nlattr
**attrs
)
1728 if (!attrs
[MACSEC_RXSC_ATTR_SCI
])
1731 if (attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) {
1732 if (nla_get_u8(attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) > 1)
1739 static int macsec_add_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1741 struct net_device
*dev
;
1742 sci_t sci
= MACSEC_UNDEF_SCI
;
1743 struct nlattr
**attrs
= info
->attrs
;
1744 struct macsec_rx_sc
*rx_sc
;
1745 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1747 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1750 if (parse_rxsc_config(attrs
, tb_rxsc
))
1753 if (!validate_add_rxsc(tb_rxsc
))
1757 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1760 return PTR_ERR(dev
);
1763 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1765 rx_sc
= create_rx_sc(dev
, sci
);
1766 if (IS_ERR(rx_sc
)) {
1768 return PTR_ERR(rx_sc
);
1771 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
])
1772 rx_sc
->active
= !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
1779 static bool validate_add_txsa(struct nlattr
**attrs
)
1781 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1782 !attrs
[MACSEC_SA_ATTR_PN
] ||
1783 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1784 !attrs
[MACSEC_SA_ATTR_KEYID
])
1787 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1790 if (nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1793 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1794 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1798 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1804 static int macsec_add_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1806 struct net_device
*dev
;
1807 struct nlattr
**attrs
= info
->attrs
;
1808 struct macsec_secy
*secy
;
1809 struct macsec_tx_sc
*tx_sc
;
1810 struct macsec_tx_sa
*tx_sa
;
1811 unsigned char assoc_num
;
1812 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1815 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1818 if (parse_sa_config(attrs
, tb_sa
))
1821 if (!validate_add_txsa(tb_sa
))
1825 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1828 return PTR_ERR(dev
);
1831 secy
= &macsec_priv(dev
)->secy
;
1832 tx_sc
= &secy
->tx_sc
;
1834 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1836 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1837 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1838 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1843 tx_sa
= rtnl_dereference(tx_sc
->sa
[assoc_num
]);
1849 tx_sa
= kmalloc(sizeof(*tx_sa
), GFP_KERNEL
);
1855 err
= init_tx_sa(tx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1856 secy
->key_len
, secy
->icv_len
);
1863 nla_memcpy(tx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1865 spin_lock_bh(&tx_sa
->lock
);
1866 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1867 spin_unlock_bh(&tx_sa
->lock
);
1869 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1870 tx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1872 if (assoc_num
== tx_sc
->encoding_sa
&& tx_sa
->active
)
1873 secy
->operational
= true;
1875 rcu_assign_pointer(tx_sc
->sa
[assoc_num
], tx_sa
);
1882 static int macsec_del_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1884 struct nlattr
**attrs
= info
->attrs
;
1885 struct net_device
*dev
;
1886 struct macsec_secy
*secy
;
1887 struct macsec_rx_sc
*rx_sc
;
1888 struct macsec_rx_sa
*rx_sa
;
1890 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1891 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1893 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1896 if (parse_sa_config(attrs
, tb_sa
))
1899 if (parse_rxsc_config(attrs
, tb_rxsc
))
1903 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
1904 &dev
, &secy
, &rx_sc
, &assoc_num
);
1905 if (IS_ERR(rx_sa
)) {
1907 return PTR_ERR(rx_sa
);
1910 if (rx_sa
->active
) {
1915 RCU_INIT_POINTER(rx_sc
->sa
[assoc_num
], NULL
);
1923 static int macsec_del_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1925 struct nlattr
**attrs
= info
->attrs
;
1926 struct net_device
*dev
;
1927 struct macsec_secy
*secy
;
1928 struct macsec_rx_sc
*rx_sc
;
1930 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1932 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1935 if (parse_rxsc_config(attrs
, tb_rxsc
))
1938 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1942 dev
= get_dev_from_nl(genl_info_net(info
), info
->attrs
);
1945 return PTR_ERR(dev
);
1948 secy
= &macsec_priv(dev
)->secy
;
1949 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1951 rx_sc
= del_rx_sc(secy
, sci
);
1963 static int macsec_del_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1965 struct nlattr
**attrs
= info
->attrs
;
1966 struct net_device
*dev
;
1967 struct macsec_secy
*secy
;
1968 struct macsec_tx_sc
*tx_sc
;
1969 struct macsec_tx_sa
*tx_sa
;
1971 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1973 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1976 if (parse_sa_config(attrs
, tb_sa
))
1980 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
1981 &dev
, &secy
, &tx_sc
, &assoc_num
);
1982 if (IS_ERR(tx_sa
)) {
1984 return PTR_ERR(tx_sa
);
1987 if (tx_sa
->active
) {
1992 RCU_INIT_POINTER(tx_sc
->sa
[assoc_num
], NULL
);
2000 static bool validate_upd_sa(struct nlattr
**attrs
)
2002 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
2003 attrs
[MACSEC_SA_ATTR_KEY
] ||
2004 attrs
[MACSEC_SA_ATTR_KEYID
])
2007 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
2010 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
2013 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
2014 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
2021 static int macsec_upd_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
2023 struct nlattr
**attrs
= info
->attrs
;
2024 struct net_device
*dev
;
2025 struct macsec_secy
*secy
;
2026 struct macsec_tx_sc
*tx_sc
;
2027 struct macsec_tx_sa
*tx_sa
;
2029 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2031 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2034 if (parse_sa_config(attrs
, tb_sa
))
2037 if (!validate_upd_sa(tb_sa
))
2041 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
2042 &dev
, &secy
, &tx_sc
, &assoc_num
);
2043 if (IS_ERR(tx_sa
)) {
2045 return PTR_ERR(tx_sa
);
2048 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2049 spin_lock_bh(&tx_sa
->lock
);
2050 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2051 spin_unlock_bh(&tx_sa
->lock
);
2054 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2055 tx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2057 if (assoc_num
== tx_sc
->encoding_sa
)
2058 secy
->operational
= tx_sa
->active
;
2065 static int macsec_upd_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
2067 struct nlattr
**attrs
= info
->attrs
;
2068 struct net_device
*dev
;
2069 struct macsec_secy
*secy
;
2070 struct macsec_rx_sc
*rx_sc
;
2071 struct macsec_rx_sa
*rx_sa
;
2073 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2074 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2076 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2079 if (parse_rxsc_config(attrs
, tb_rxsc
))
2082 if (parse_sa_config(attrs
, tb_sa
))
2085 if (!validate_upd_sa(tb_sa
))
2089 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
2090 &dev
, &secy
, &rx_sc
, &assoc_num
);
2091 if (IS_ERR(rx_sa
)) {
2093 return PTR_ERR(rx_sa
);
2096 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2097 spin_lock_bh(&rx_sa
->lock
);
2098 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2099 spin_unlock_bh(&rx_sa
->lock
);
2102 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2103 rx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2109 static int macsec_upd_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
2111 struct nlattr
**attrs
= info
->attrs
;
2112 struct net_device
*dev
;
2113 struct macsec_secy
*secy
;
2114 struct macsec_rx_sc
*rx_sc
;
2115 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2117 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2120 if (parse_rxsc_config(attrs
, tb_rxsc
))
2123 if (!validate_add_rxsc(tb_rxsc
))
2127 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
2128 if (IS_ERR(rx_sc
)) {
2130 return PTR_ERR(rx_sc
);
2133 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]) {
2134 bool new = !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
2136 if (rx_sc
->active
!= new)
2137 secy
->n_rx_sc
+= new ? 1 : -1;
2139 rx_sc
->active
= new;
2147 static int copy_tx_sa_stats(struct sk_buff
*skb
,
2148 struct macsec_tx_sa_stats __percpu
*pstats
)
2150 struct macsec_tx_sa_stats sum
= {0, };
2153 for_each_possible_cpu(cpu
) {
2154 const struct macsec_tx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2156 sum
.OutPktsProtected
+= stats
->OutPktsProtected
;
2157 sum
.OutPktsEncrypted
+= stats
->OutPktsEncrypted
;
2160 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED
, sum
.OutPktsProtected
) ||
2161 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED
, sum
.OutPktsEncrypted
))
2167 static int copy_rx_sa_stats(struct sk_buff
*skb
,
2168 struct macsec_rx_sa_stats __percpu
*pstats
)
2170 struct macsec_rx_sa_stats sum
= {0, };
2173 for_each_possible_cpu(cpu
) {
2174 const struct macsec_rx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2176 sum
.InPktsOK
+= stats
->InPktsOK
;
2177 sum
.InPktsInvalid
+= stats
->InPktsInvalid
;
2178 sum
.InPktsNotValid
+= stats
->InPktsNotValid
;
2179 sum
.InPktsNotUsingSA
+= stats
->InPktsNotUsingSA
;
2180 sum
.InPktsUnusedSA
+= stats
->InPktsUnusedSA
;
2183 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_OK
, sum
.InPktsOK
) ||
2184 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID
, sum
.InPktsInvalid
) ||
2185 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID
, sum
.InPktsNotValid
) ||
2186 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA
, sum
.InPktsNotUsingSA
) ||
2187 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA
, sum
.InPktsUnusedSA
))
2193 static int copy_rx_sc_stats(struct sk_buff
*skb
,
2194 struct pcpu_rx_sc_stats __percpu
*pstats
)
2196 struct macsec_rx_sc_stats sum
= {0, };
2199 for_each_possible_cpu(cpu
) {
2200 const struct pcpu_rx_sc_stats
*stats
;
2201 struct macsec_rx_sc_stats tmp
;
2204 stats
= per_cpu_ptr(pstats
, cpu
);
2206 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2207 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2208 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2210 sum
.InOctetsValidated
+= tmp
.InOctetsValidated
;
2211 sum
.InOctetsDecrypted
+= tmp
.InOctetsDecrypted
;
2212 sum
.InPktsUnchecked
+= tmp
.InPktsUnchecked
;
2213 sum
.InPktsDelayed
+= tmp
.InPktsDelayed
;
2214 sum
.InPktsOK
+= tmp
.InPktsOK
;
2215 sum
.InPktsInvalid
+= tmp
.InPktsInvalid
;
2216 sum
.InPktsLate
+= tmp
.InPktsLate
;
2217 sum
.InPktsNotValid
+= tmp
.InPktsNotValid
;
2218 sum
.InPktsNotUsingSA
+= tmp
.InPktsNotUsingSA
;
2219 sum
.InPktsUnusedSA
+= tmp
.InPktsUnusedSA
;
2222 if (nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED
,
2223 sum
.InOctetsValidated
,
2224 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2225 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED
,
2226 sum
.InOctetsDecrypted
,
2227 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2228 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED
,
2229 sum
.InPktsUnchecked
,
2230 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2231 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED
,
2233 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2234 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK
,
2236 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2237 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID
,
2239 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2240 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE
,
2242 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2243 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID
,
2245 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2246 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA
,
2247 sum
.InPktsNotUsingSA
,
2248 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2249 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA
,
2251 MACSEC_RXSC_STATS_ATTR_PAD
))
2257 static int copy_tx_sc_stats(struct sk_buff
*skb
,
2258 struct pcpu_tx_sc_stats __percpu
*pstats
)
2260 struct macsec_tx_sc_stats sum
= {0, };
2263 for_each_possible_cpu(cpu
) {
2264 const struct pcpu_tx_sc_stats
*stats
;
2265 struct macsec_tx_sc_stats tmp
;
2268 stats
= per_cpu_ptr(pstats
, cpu
);
2270 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2271 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2272 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2274 sum
.OutPktsProtected
+= tmp
.OutPktsProtected
;
2275 sum
.OutPktsEncrypted
+= tmp
.OutPktsEncrypted
;
2276 sum
.OutOctetsProtected
+= tmp
.OutOctetsProtected
;
2277 sum
.OutOctetsEncrypted
+= tmp
.OutOctetsEncrypted
;
2280 if (nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED
,
2281 sum
.OutPktsProtected
,
2282 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2283 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED
,
2284 sum
.OutPktsEncrypted
,
2285 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2286 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED
,
2287 sum
.OutOctetsProtected
,
2288 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2289 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED
,
2290 sum
.OutOctetsEncrypted
,
2291 MACSEC_TXSC_STATS_ATTR_PAD
))
2297 static int copy_secy_stats(struct sk_buff
*skb
,
2298 struct pcpu_secy_stats __percpu
*pstats
)
2300 struct macsec_dev_stats sum
= {0, };
2303 for_each_possible_cpu(cpu
) {
2304 const struct pcpu_secy_stats
*stats
;
2305 struct macsec_dev_stats tmp
;
2308 stats
= per_cpu_ptr(pstats
, cpu
);
2310 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2311 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2312 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2314 sum
.OutPktsUntagged
+= tmp
.OutPktsUntagged
;
2315 sum
.InPktsUntagged
+= tmp
.InPktsUntagged
;
2316 sum
.OutPktsTooLong
+= tmp
.OutPktsTooLong
;
2317 sum
.InPktsNoTag
+= tmp
.InPktsNoTag
;
2318 sum
.InPktsBadTag
+= tmp
.InPktsBadTag
;
2319 sum
.InPktsUnknownSCI
+= tmp
.InPktsUnknownSCI
;
2320 sum
.InPktsNoSCI
+= tmp
.InPktsNoSCI
;
2321 sum
.InPktsOverrun
+= tmp
.InPktsOverrun
;
2324 if (nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED
,
2325 sum
.OutPktsUntagged
,
2326 MACSEC_SECY_STATS_ATTR_PAD
) ||
2327 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED
,
2329 MACSEC_SECY_STATS_ATTR_PAD
) ||
2330 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG
,
2332 MACSEC_SECY_STATS_ATTR_PAD
) ||
2333 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG
,
2335 MACSEC_SECY_STATS_ATTR_PAD
) ||
2336 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG
,
2338 MACSEC_SECY_STATS_ATTR_PAD
) ||
2339 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI
,
2340 sum
.InPktsUnknownSCI
,
2341 MACSEC_SECY_STATS_ATTR_PAD
) ||
2342 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI
,
2344 MACSEC_SECY_STATS_ATTR_PAD
) ||
2345 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN
,
2347 MACSEC_SECY_STATS_ATTR_PAD
))
2353 static int nla_put_secy(struct macsec_secy
*secy
, struct sk_buff
*skb
)
2355 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2356 struct nlattr
*secy_nest
= nla_nest_start(skb
, MACSEC_ATTR_SECY
);
2361 if (nla_put_sci(skb
, MACSEC_SECY_ATTR_SCI
, secy
->sci
,
2362 MACSEC_SECY_ATTR_PAD
) ||
2363 nla_put_u64_64bit(skb
, MACSEC_SECY_ATTR_CIPHER_SUITE
,
2364 MACSEC_DEFAULT_CIPHER_ID
,
2365 MACSEC_SECY_ATTR_PAD
) ||
2366 nla_put_u8(skb
, MACSEC_SECY_ATTR_ICV_LEN
, secy
->icv_len
) ||
2367 nla_put_u8(skb
, MACSEC_SECY_ATTR_OPER
, secy
->operational
) ||
2368 nla_put_u8(skb
, MACSEC_SECY_ATTR_PROTECT
, secy
->protect_frames
) ||
2369 nla_put_u8(skb
, MACSEC_SECY_ATTR_REPLAY
, secy
->replay_protect
) ||
2370 nla_put_u8(skb
, MACSEC_SECY_ATTR_VALIDATE
, secy
->validate_frames
) ||
2371 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCRYPT
, tx_sc
->encrypt
) ||
2372 nla_put_u8(skb
, MACSEC_SECY_ATTR_INC_SCI
, tx_sc
->send_sci
) ||
2373 nla_put_u8(skb
, MACSEC_SECY_ATTR_ES
, tx_sc
->end_station
) ||
2374 nla_put_u8(skb
, MACSEC_SECY_ATTR_SCB
, tx_sc
->scb
) ||
2375 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCODING_SA
, tx_sc
->encoding_sa
))
2378 if (secy
->replay_protect
) {
2379 if (nla_put_u32(skb
, MACSEC_SECY_ATTR_WINDOW
, secy
->replay_window
))
2383 nla_nest_end(skb
, secy_nest
);
2387 nla_nest_cancel(skb
, secy_nest
);
2391 static int dump_secy(struct macsec_secy
*secy
, struct net_device
*dev
,
2392 struct sk_buff
*skb
, struct netlink_callback
*cb
)
2394 struct macsec_rx_sc
*rx_sc
;
2395 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2396 struct nlattr
*txsa_list
, *rxsc_list
;
2399 struct nlattr
*attr
;
2401 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
2402 &macsec_fam
, NLM_F_MULTI
, MACSEC_CMD_GET_TXSC
);
2406 genl_dump_check_consistent(cb
, hdr
, &macsec_fam
);
2408 if (nla_put_u32(skb
, MACSEC_ATTR_IFINDEX
, dev
->ifindex
))
2409 goto nla_put_failure
;
2411 if (nla_put_secy(secy
, skb
))
2412 goto nla_put_failure
;
2414 attr
= nla_nest_start(skb
, MACSEC_ATTR_TXSC_STATS
);
2416 goto nla_put_failure
;
2417 if (copy_tx_sc_stats(skb
, tx_sc
->stats
)) {
2418 nla_nest_cancel(skb
, attr
);
2419 goto nla_put_failure
;
2421 nla_nest_end(skb
, attr
);
2423 attr
= nla_nest_start(skb
, MACSEC_ATTR_SECY_STATS
);
2425 goto nla_put_failure
;
2426 if (copy_secy_stats(skb
, macsec_priv(dev
)->stats
)) {
2427 nla_nest_cancel(skb
, attr
);
2428 goto nla_put_failure
;
2430 nla_nest_end(skb
, attr
);
2432 txsa_list
= nla_nest_start(skb
, MACSEC_ATTR_TXSA_LIST
);
2434 goto nla_put_failure
;
2435 for (i
= 0, j
= 1; i
< MACSEC_NUM_AN
; i
++) {
2436 struct macsec_tx_sa
*tx_sa
= rtnl_dereference(tx_sc
->sa
[i
]);
2437 struct nlattr
*txsa_nest
;
2442 txsa_nest
= nla_nest_start(skb
, j
++);
2444 nla_nest_cancel(skb
, txsa_list
);
2445 goto nla_put_failure
;
2448 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2449 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, tx_sa
->next_pn
) ||
2450 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, tx_sa
->key
.id
) ||
2451 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, tx_sa
->active
)) {
2452 nla_nest_cancel(skb
, txsa_nest
);
2453 nla_nest_cancel(skb
, txsa_list
);
2454 goto nla_put_failure
;
2457 attr
= nla_nest_start(skb
, MACSEC_SA_ATTR_STATS
);
2459 nla_nest_cancel(skb
, txsa_nest
);
2460 nla_nest_cancel(skb
, txsa_list
);
2461 goto nla_put_failure
;
2463 if (copy_tx_sa_stats(skb
, tx_sa
->stats
)) {
2464 nla_nest_cancel(skb
, attr
);
2465 nla_nest_cancel(skb
, txsa_nest
);
2466 nla_nest_cancel(skb
, txsa_list
);
2467 goto nla_put_failure
;
2469 nla_nest_end(skb
, attr
);
2471 nla_nest_end(skb
, txsa_nest
);
2473 nla_nest_end(skb
, txsa_list
);
2475 rxsc_list
= nla_nest_start(skb
, MACSEC_ATTR_RXSC_LIST
);
2477 goto nla_put_failure
;
2480 for_each_rxsc_rtnl(secy
, rx_sc
) {
2482 struct nlattr
*rxsa_list
;
2483 struct nlattr
*rxsc_nest
= nla_nest_start(skb
, j
++);
2486 nla_nest_cancel(skb
, rxsc_list
);
2487 goto nla_put_failure
;
2490 if (nla_put_u8(skb
, MACSEC_RXSC_ATTR_ACTIVE
, rx_sc
->active
) ||
2491 nla_put_sci(skb
, MACSEC_RXSC_ATTR_SCI
, rx_sc
->sci
,
2492 MACSEC_RXSC_ATTR_PAD
)) {
2493 nla_nest_cancel(skb
, rxsc_nest
);
2494 nla_nest_cancel(skb
, rxsc_list
);
2495 goto nla_put_failure
;
2498 attr
= nla_nest_start(skb
, MACSEC_RXSC_ATTR_STATS
);
2500 nla_nest_cancel(skb
, rxsc_nest
);
2501 nla_nest_cancel(skb
, rxsc_list
);
2502 goto nla_put_failure
;
2504 if (copy_rx_sc_stats(skb
, rx_sc
->stats
)) {
2505 nla_nest_cancel(skb
, attr
);
2506 nla_nest_cancel(skb
, rxsc_nest
);
2507 nla_nest_cancel(skb
, rxsc_list
);
2508 goto nla_put_failure
;
2510 nla_nest_end(skb
, attr
);
2512 rxsa_list
= nla_nest_start(skb
, MACSEC_RXSC_ATTR_SA_LIST
);
2514 nla_nest_cancel(skb
, rxsc_nest
);
2515 nla_nest_cancel(skb
, rxsc_list
);
2516 goto nla_put_failure
;
2519 for (i
= 0, k
= 1; i
< MACSEC_NUM_AN
; i
++) {
2520 struct macsec_rx_sa
*rx_sa
= rtnl_dereference(rx_sc
->sa
[i
]);
2521 struct nlattr
*rxsa_nest
;
2526 rxsa_nest
= nla_nest_start(skb
, k
++);
2528 nla_nest_cancel(skb
, rxsa_list
);
2529 nla_nest_cancel(skb
, rxsc_nest
);
2530 nla_nest_cancel(skb
, rxsc_list
);
2531 goto nla_put_failure
;
2534 attr
= nla_nest_start(skb
, MACSEC_SA_ATTR_STATS
);
2536 nla_nest_cancel(skb
, rxsa_list
);
2537 nla_nest_cancel(skb
, rxsc_nest
);
2538 nla_nest_cancel(skb
, rxsc_list
);
2539 goto nla_put_failure
;
2541 if (copy_rx_sa_stats(skb
, rx_sa
->stats
)) {
2542 nla_nest_cancel(skb
, attr
);
2543 nla_nest_cancel(skb
, rxsa_list
);
2544 nla_nest_cancel(skb
, rxsc_nest
);
2545 nla_nest_cancel(skb
, rxsc_list
);
2546 goto nla_put_failure
;
2548 nla_nest_end(skb
, attr
);
2550 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2551 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, rx_sa
->next_pn
) ||
2552 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, rx_sa
->key
.id
) ||
2553 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, rx_sa
->active
)) {
2554 nla_nest_cancel(skb
, rxsa_nest
);
2555 nla_nest_cancel(skb
, rxsc_nest
);
2556 nla_nest_cancel(skb
, rxsc_list
);
2557 goto nla_put_failure
;
2559 nla_nest_end(skb
, rxsa_nest
);
2562 nla_nest_end(skb
, rxsa_list
);
2563 nla_nest_end(skb
, rxsc_nest
);
2566 nla_nest_end(skb
, rxsc_list
);
2568 genlmsg_end(skb
, hdr
);
2573 genlmsg_cancel(skb
, hdr
);
2577 static int macsec_generation
= 1; /* protected by RTNL */
2579 static int macsec_dump_txsc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2581 struct net
*net
= sock_net(skb
->sk
);
2582 struct net_device
*dev
;
2585 dev_idx
= cb
->args
[0];
2590 cb
->seq
= macsec_generation
;
2592 for_each_netdev(net
, dev
) {
2593 struct macsec_secy
*secy
;
2598 if (!netif_is_macsec(dev
))
2601 secy
= &macsec_priv(dev
)->secy
;
2602 if (dump_secy(secy
, dev
, skb
, cb
) < 0)
2614 static const struct genl_ops macsec_genl_ops
[] = {
2616 .cmd
= MACSEC_CMD_GET_TXSC
,
2617 .dumpit
= macsec_dump_txsc
,
2618 .policy
= macsec_genl_policy
,
2621 .cmd
= MACSEC_CMD_ADD_RXSC
,
2622 .doit
= macsec_add_rxsc
,
2623 .policy
= macsec_genl_policy
,
2624 .flags
= GENL_ADMIN_PERM
,
2627 .cmd
= MACSEC_CMD_DEL_RXSC
,
2628 .doit
= macsec_del_rxsc
,
2629 .policy
= macsec_genl_policy
,
2630 .flags
= GENL_ADMIN_PERM
,
2633 .cmd
= MACSEC_CMD_UPD_RXSC
,
2634 .doit
= macsec_upd_rxsc
,
2635 .policy
= macsec_genl_policy
,
2636 .flags
= GENL_ADMIN_PERM
,
2639 .cmd
= MACSEC_CMD_ADD_TXSA
,
2640 .doit
= macsec_add_txsa
,
2641 .policy
= macsec_genl_policy
,
2642 .flags
= GENL_ADMIN_PERM
,
2645 .cmd
= MACSEC_CMD_DEL_TXSA
,
2646 .doit
= macsec_del_txsa
,
2647 .policy
= macsec_genl_policy
,
2648 .flags
= GENL_ADMIN_PERM
,
2651 .cmd
= MACSEC_CMD_UPD_TXSA
,
2652 .doit
= macsec_upd_txsa
,
2653 .policy
= macsec_genl_policy
,
2654 .flags
= GENL_ADMIN_PERM
,
2657 .cmd
= MACSEC_CMD_ADD_RXSA
,
2658 .doit
= macsec_add_rxsa
,
2659 .policy
= macsec_genl_policy
,
2660 .flags
= GENL_ADMIN_PERM
,
2663 .cmd
= MACSEC_CMD_DEL_RXSA
,
2664 .doit
= macsec_del_rxsa
,
2665 .policy
= macsec_genl_policy
,
2666 .flags
= GENL_ADMIN_PERM
,
2669 .cmd
= MACSEC_CMD_UPD_RXSA
,
2670 .doit
= macsec_upd_rxsa
,
2671 .policy
= macsec_genl_policy
,
2672 .flags
= GENL_ADMIN_PERM
,
2676 static struct genl_family macsec_fam __ro_after_init
= {
2677 .name
= MACSEC_GENL_NAME
,
2679 .version
= MACSEC_GENL_VERSION
,
2680 .maxattr
= MACSEC_ATTR_MAX
,
2682 .module
= THIS_MODULE
,
2683 .ops
= macsec_genl_ops
,
2684 .n_ops
= ARRAY_SIZE(macsec_genl_ops
),
2687 static netdev_tx_t
macsec_start_xmit(struct sk_buff
*skb
,
2688 struct net_device
*dev
)
2690 struct macsec_dev
*macsec
= netdev_priv(dev
);
2691 struct macsec_secy
*secy
= &macsec
->secy
;
2692 struct pcpu_secy_stats
*secy_stats
;
2696 if (!secy
->protect_frames
) {
2697 secy_stats
= this_cpu_ptr(macsec
->stats
);
2698 u64_stats_update_begin(&secy_stats
->syncp
);
2699 secy_stats
->stats
.OutPktsUntagged
++;
2700 u64_stats_update_end(&secy_stats
->syncp
);
2701 skb
->dev
= macsec
->real_dev
;
2703 ret
= dev_queue_xmit(skb
);
2704 count_tx(dev
, ret
, len
);
2708 if (!secy
->operational
) {
2710 dev
->stats
.tx_dropped
++;
2711 return NETDEV_TX_OK
;
2714 skb
= macsec_encrypt(skb
, dev
);
2716 if (PTR_ERR(skb
) != -EINPROGRESS
)
2717 dev
->stats
.tx_dropped
++;
2718 return NETDEV_TX_OK
;
2721 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
2723 macsec_encrypt_finish(skb
, dev
);
2725 ret
= dev_queue_xmit(skb
);
2726 count_tx(dev
, ret
, len
);
2730 #define MACSEC_FEATURES \
2731 (NETIF_F_SG | NETIF_F_HIGHDMA)
2732 static struct lock_class_key macsec_netdev_addr_lock_key
;
2734 static int macsec_dev_init(struct net_device
*dev
)
2736 struct macsec_dev
*macsec
= macsec_priv(dev
);
2737 struct net_device
*real_dev
= macsec
->real_dev
;
2740 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
2744 err
= gro_cells_init(&macsec
->gro_cells
, dev
);
2746 free_percpu(dev
->tstats
);
2750 dev
->features
= real_dev
->features
& MACSEC_FEATURES
;
2751 dev
->features
|= NETIF_F_LLTX
| NETIF_F_GSO_SOFTWARE
;
2753 dev
->needed_headroom
= real_dev
->needed_headroom
+
2754 MACSEC_NEEDED_HEADROOM
;
2755 dev
->needed_tailroom
= real_dev
->needed_tailroom
+
2756 MACSEC_NEEDED_TAILROOM
;
2758 if (is_zero_ether_addr(dev
->dev_addr
))
2759 eth_hw_addr_inherit(dev
, real_dev
);
2760 if (is_zero_ether_addr(dev
->broadcast
))
2761 memcpy(dev
->broadcast
, real_dev
->broadcast
, dev
->addr_len
);
2766 static void macsec_dev_uninit(struct net_device
*dev
)
2768 struct macsec_dev
*macsec
= macsec_priv(dev
);
2770 gro_cells_destroy(&macsec
->gro_cells
);
2771 free_percpu(dev
->tstats
);
2774 static netdev_features_t
macsec_fix_features(struct net_device
*dev
,
2775 netdev_features_t features
)
2777 struct macsec_dev
*macsec
= macsec_priv(dev
);
2778 struct net_device
*real_dev
= macsec
->real_dev
;
2780 features
&= (real_dev
->features
& MACSEC_FEATURES
) |
2781 NETIF_F_GSO_SOFTWARE
| NETIF_F_SOFT_FEATURES
;
2782 features
|= NETIF_F_LLTX
;
2787 static int macsec_dev_open(struct net_device
*dev
)
2789 struct macsec_dev
*macsec
= macsec_priv(dev
);
2790 struct net_device
*real_dev
= macsec
->real_dev
;
2793 if (!(real_dev
->flags
& IFF_UP
))
2796 err
= dev_uc_add(real_dev
, dev
->dev_addr
);
2800 if (dev
->flags
& IFF_ALLMULTI
) {
2801 err
= dev_set_allmulti(real_dev
, 1);
2806 if (dev
->flags
& IFF_PROMISC
) {
2807 err
= dev_set_promiscuity(real_dev
, 1);
2809 goto clear_allmulti
;
2812 if (netif_carrier_ok(real_dev
))
2813 netif_carrier_on(dev
);
2817 if (dev
->flags
& IFF_ALLMULTI
)
2818 dev_set_allmulti(real_dev
, -1);
2820 dev_uc_del(real_dev
, dev
->dev_addr
);
2821 netif_carrier_off(dev
);
2825 static int macsec_dev_stop(struct net_device
*dev
)
2827 struct macsec_dev
*macsec
= macsec_priv(dev
);
2828 struct net_device
*real_dev
= macsec
->real_dev
;
2830 netif_carrier_off(dev
);
2832 dev_mc_unsync(real_dev
, dev
);
2833 dev_uc_unsync(real_dev
, dev
);
2835 if (dev
->flags
& IFF_ALLMULTI
)
2836 dev_set_allmulti(real_dev
, -1);
2838 if (dev
->flags
& IFF_PROMISC
)
2839 dev_set_promiscuity(real_dev
, -1);
2841 dev_uc_del(real_dev
, dev
->dev_addr
);
2846 static void macsec_dev_change_rx_flags(struct net_device
*dev
, int change
)
2848 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
2850 if (!(dev
->flags
& IFF_UP
))
2853 if (change
& IFF_ALLMULTI
)
2854 dev_set_allmulti(real_dev
, dev
->flags
& IFF_ALLMULTI
? 1 : -1);
2856 if (change
& IFF_PROMISC
)
2857 dev_set_promiscuity(real_dev
,
2858 dev
->flags
& IFF_PROMISC
? 1 : -1);
2861 static void macsec_dev_set_rx_mode(struct net_device
*dev
)
2863 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
2865 dev_mc_sync(real_dev
, dev
);
2866 dev_uc_sync(real_dev
, dev
);
2869 static int macsec_set_mac_address(struct net_device
*dev
, void *p
)
2871 struct macsec_dev
*macsec
= macsec_priv(dev
);
2872 struct net_device
*real_dev
= macsec
->real_dev
;
2873 struct sockaddr
*addr
= p
;
2876 if (!is_valid_ether_addr(addr
->sa_data
))
2877 return -EADDRNOTAVAIL
;
2879 if (!(dev
->flags
& IFF_UP
))
2882 err
= dev_uc_add(real_dev
, addr
->sa_data
);
2886 dev_uc_del(real_dev
, dev
->dev_addr
);
2889 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
2893 static int macsec_change_mtu(struct net_device
*dev
, int new_mtu
)
2895 struct macsec_dev
*macsec
= macsec_priv(dev
);
2896 unsigned int extra
= macsec
->secy
.icv_len
+ macsec_extra_len(true);
2898 if (macsec
->real_dev
->mtu
- extra
< new_mtu
)
2906 static struct rtnl_link_stats64
*macsec_get_stats64(struct net_device
*dev
,
2907 struct rtnl_link_stats64
*s
)
2914 for_each_possible_cpu(cpu
) {
2915 struct pcpu_sw_netstats
*stats
;
2916 struct pcpu_sw_netstats tmp
;
2919 stats
= per_cpu_ptr(dev
->tstats
, cpu
);
2921 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2922 tmp
.rx_packets
= stats
->rx_packets
;
2923 tmp
.rx_bytes
= stats
->rx_bytes
;
2924 tmp
.tx_packets
= stats
->tx_packets
;
2925 tmp
.tx_bytes
= stats
->tx_bytes
;
2926 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2928 s
->rx_packets
+= tmp
.rx_packets
;
2929 s
->rx_bytes
+= tmp
.rx_bytes
;
2930 s
->tx_packets
+= tmp
.tx_packets
;
2931 s
->tx_bytes
+= tmp
.tx_bytes
;
2934 s
->rx_dropped
= dev
->stats
.rx_dropped
;
2935 s
->tx_dropped
= dev
->stats
.tx_dropped
;
2940 static int macsec_get_iflink(const struct net_device
*dev
)
2942 return macsec_priv(dev
)->real_dev
->ifindex
;
2946 static int macsec_get_nest_level(struct net_device
*dev
)
2948 return macsec_priv(dev
)->nest_level
;
2952 static const struct net_device_ops macsec_netdev_ops
= {
2953 .ndo_init
= macsec_dev_init
,
2954 .ndo_uninit
= macsec_dev_uninit
,
2955 .ndo_open
= macsec_dev_open
,
2956 .ndo_stop
= macsec_dev_stop
,
2957 .ndo_fix_features
= macsec_fix_features
,
2958 .ndo_change_mtu
= macsec_change_mtu
,
2959 .ndo_set_rx_mode
= macsec_dev_set_rx_mode
,
2960 .ndo_change_rx_flags
= macsec_dev_change_rx_flags
,
2961 .ndo_set_mac_address
= macsec_set_mac_address
,
2962 .ndo_start_xmit
= macsec_start_xmit
,
2963 .ndo_get_stats64
= macsec_get_stats64
,
2964 .ndo_get_iflink
= macsec_get_iflink
,
2965 .ndo_get_lock_subclass
= macsec_get_nest_level
,
2968 static const struct device_type macsec_type
= {
2972 static const struct nla_policy macsec_rtnl_policy
[IFLA_MACSEC_MAX
+ 1] = {
2973 [IFLA_MACSEC_SCI
] = { .type
= NLA_U64
},
2974 [IFLA_MACSEC_ICV_LEN
] = { .type
= NLA_U8
},
2975 [IFLA_MACSEC_CIPHER_SUITE
] = { .type
= NLA_U64
},
2976 [IFLA_MACSEC_WINDOW
] = { .type
= NLA_U32
},
2977 [IFLA_MACSEC_ENCODING_SA
] = { .type
= NLA_U8
},
2978 [IFLA_MACSEC_ENCRYPT
] = { .type
= NLA_U8
},
2979 [IFLA_MACSEC_PROTECT
] = { .type
= NLA_U8
},
2980 [IFLA_MACSEC_INC_SCI
] = { .type
= NLA_U8
},
2981 [IFLA_MACSEC_ES
] = { .type
= NLA_U8
},
2982 [IFLA_MACSEC_SCB
] = { .type
= NLA_U8
},
2983 [IFLA_MACSEC_REPLAY_PROTECT
] = { .type
= NLA_U8
},
2984 [IFLA_MACSEC_VALIDATION
] = { .type
= NLA_U8
},
2987 static void macsec_free_netdev(struct net_device
*dev
)
2989 struct macsec_dev
*macsec
= macsec_priv(dev
);
2990 struct net_device
*real_dev
= macsec
->real_dev
;
2992 free_percpu(macsec
->stats
);
2993 free_percpu(macsec
->secy
.tx_sc
.stats
);
2999 static void macsec_setup(struct net_device
*dev
)
3003 dev
->max_mtu
= ETH_MAX_MTU
;
3004 dev
->priv_flags
|= IFF_NO_QUEUE
;
3005 dev
->netdev_ops
= &macsec_netdev_ops
;
3006 dev
->destructor
= macsec_free_netdev
;
3007 SET_NETDEV_DEVTYPE(dev
, &macsec_type
);
3009 eth_zero_addr(dev
->broadcast
);
3012 static void macsec_changelink_common(struct net_device
*dev
,
3013 struct nlattr
*data
[])
3015 struct macsec_secy
*secy
;
3016 struct macsec_tx_sc
*tx_sc
;
3018 secy
= &macsec_priv(dev
)->secy
;
3019 tx_sc
= &secy
->tx_sc
;
3021 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3022 struct macsec_tx_sa
*tx_sa
;
3024 tx_sc
->encoding_sa
= nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]);
3025 tx_sa
= rtnl_dereference(tx_sc
->sa
[tx_sc
->encoding_sa
]);
3027 secy
->operational
= tx_sa
&& tx_sa
->active
;
3030 if (data
[IFLA_MACSEC_WINDOW
])
3031 secy
->replay_window
= nla_get_u32(data
[IFLA_MACSEC_WINDOW
]);
3033 if (data
[IFLA_MACSEC_ENCRYPT
])
3034 tx_sc
->encrypt
= !!nla_get_u8(data
[IFLA_MACSEC_ENCRYPT
]);
3036 if (data
[IFLA_MACSEC_PROTECT
])
3037 secy
->protect_frames
= !!nla_get_u8(data
[IFLA_MACSEC_PROTECT
]);
3039 if (data
[IFLA_MACSEC_INC_SCI
])
3040 tx_sc
->send_sci
= !!nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]);
3042 if (data
[IFLA_MACSEC_ES
])
3043 tx_sc
->end_station
= !!nla_get_u8(data
[IFLA_MACSEC_ES
]);
3045 if (data
[IFLA_MACSEC_SCB
])
3046 tx_sc
->scb
= !!nla_get_u8(data
[IFLA_MACSEC_SCB
]);
3048 if (data
[IFLA_MACSEC_REPLAY_PROTECT
])
3049 secy
->replay_protect
= !!nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
]);
3051 if (data
[IFLA_MACSEC_VALIDATION
])
3052 secy
->validate_frames
= nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]);
3055 static int macsec_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
3056 struct nlattr
*data
[])
3061 if (data
[IFLA_MACSEC_CIPHER_SUITE
] ||
3062 data
[IFLA_MACSEC_ICV_LEN
] ||
3063 data
[IFLA_MACSEC_SCI
] ||
3064 data
[IFLA_MACSEC_PORT
])
3067 macsec_changelink_common(dev
, data
);
3072 static void macsec_del_dev(struct macsec_dev
*macsec
)
3076 while (macsec
->secy
.rx_sc
) {
3077 struct macsec_rx_sc
*rx_sc
= rtnl_dereference(macsec
->secy
.rx_sc
);
3079 rcu_assign_pointer(macsec
->secy
.rx_sc
, rx_sc
->next
);
3083 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
3084 struct macsec_tx_sa
*sa
= rtnl_dereference(macsec
->secy
.tx_sc
.sa
[i
]);
3087 RCU_INIT_POINTER(macsec
->secy
.tx_sc
.sa
[i
], NULL
);
3093 static void macsec_common_dellink(struct net_device
*dev
, struct list_head
*head
)
3095 struct macsec_dev
*macsec
= macsec_priv(dev
);
3096 struct net_device
*real_dev
= macsec
->real_dev
;
3098 unregister_netdevice_queue(dev
, head
);
3099 list_del_rcu(&macsec
->secys
);
3100 macsec_del_dev(macsec
);
3101 netdev_upper_dev_unlink(real_dev
, dev
);
3103 macsec_generation
++;
3106 static void macsec_dellink(struct net_device
*dev
, struct list_head
*head
)
3108 struct macsec_dev
*macsec
= macsec_priv(dev
);
3109 struct net_device
*real_dev
= macsec
->real_dev
;
3110 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3112 macsec_common_dellink(dev
, head
);
3114 if (list_empty(&rxd
->secys
)) {
3115 netdev_rx_handler_unregister(real_dev
);
3120 static int register_macsec_dev(struct net_device
*real_dev
,
3121 struct net_device
*dev
)
3123 struct macsec_dev
*macsec
= macsec_priv(dev
);
3124 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3129 rxd
= kmalloc(sizeof(*rxd
), GFP_KERNEL
);
3133 INIT_LIST_HEAD(&rxd
->secys
);
3135 err
= netdev_rx_handler_register(real_dev
, macsec_handle_frame
,
3143 list_add_tail_rcu(&macsec
->secys
, &rxd
->secys
);
3147 static bool sci_exists(struct net_device
*dev
, sci_t sci
)
3149 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(dev
);
3150 struct macsec_dev
*macsec
;
3152 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
3153 if (macsec
->secy
.sci
== sci
)
3160 static sci_t
dev_to_sci(struct net_device
*dev
, __be16 port
)
3162 return make_sci(dev
->dev_addr
, port
);
3165 static int macsec_add_dev(struct net_device
*dev
, sci_t sci
, u8 icv_len
)
3167 struct macsec_dev
*macsec
= macsec_priv(dev
);
3168 struct macsec_secy
*secy
= &macsec
->secy
;
3170 macsec
->stats
= netdev_alloc_pcpu_stats(struct pcpu_secy_stats
);
3174 secy
->tx_sc
.stats
= netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats
);
3175 if (!secy
->tx_sc
.stats
) {
3176 free_percpu(macsec
->stats
);
3180 if (sci
== MACSEC_UNDEF_SCI
)
3181 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3184 secy
->operational
= true;
3185 secy
->key_len
= DEFAULT_SAK_LEN
;
3186 secy
->icv_len
= icv_len
;
3187 secy
->validate_frames
= MACSEC_VALIDATE_DEFAULT
;
3188 secy
->protect_frames
= true;
3189 secy
->replay_protect
= false;
3192 secy
->tx_sc
.active
= true;
3193 secy
->tx_sc
.encoding_sa
= DEFAULT_ENCODING_SA
;
3194 secy
->tx_sc
.encrypt
= DEFAULT_ENCRYPT
;
3195 secy
->tx_sc
.send_sci
= DEFAULT_SEND_SCI
;
3196 secy
->tx_sc
.end_station
= false;
3197 secy
->tx_sc
.scb
= false;
3202 static int macsec_newlink(struct net
*net
, struct net_device
*dev
,
3203 struct nlattr
*tb
[], struct nlattr
*data
[])
3205 struct macsec_dev
*macsec
= macsec_priv(dev
);
3206 struct net_device
*real_dev
;
3209 u8 icv_len
= DEFAULT_ICV_LEN
;
3210 rx_handler_func_t
*rx_handler
;
3214 real_dev
= __dev_get_by_index(net
, nla_get_u32(tb
[IFLA_LINK
]));
3218 dev
->priv_flags
|= IFF_MACSEC
;
3220 macsec
->real_dev
= real_dev
;
3222 if (data
&& data
[IFLA_MACSEC_ICV_LEN
])
3223 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3224 dev
->mtu
= real_dev
->mtu
- icv_len
- macsec_extra_len(true);
3226 rx_handler
= rtnl_dereference(real_dev
->rx_handler
);
3227 if (rx_handler
&& rx_handler
!= macsec_handle_frame
)
3230 err
= register_netdevice(dev
);
3236 macsec
->nest_level
= dev_get_nest_level(real_dev
) + 1;
3237 netdev_lockdep_set_classes(dev
);
3238 lockdep_set_class_and_subclass(&dev
->addr_list_lock
,
3239 &macsec_netdev_addr_lock_key
,
3240 macsec_get_nest_level(dev
));
3242 err
= netdev_upper_dev_link(real_dev
, dev
);
3246 /* need to be already registered so that ->init has run and
3247 * the MAC addr is set
3249 if (data
&& data
[IFLA_MACSEC_SCI
])
3250 sci
= nla_get_sci(data
[IFLA_MACSEC_SCI
]);
3251 else if (data
&& data
[IFLA_MACSEC_PORT
])
3252 sci
= dev_to_sci(dev
, nla_get_be16(data
[IFLA_MACSEC_PORT
]));
3254 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3256 if (rx_handler
&& sci_exists(real_dev
, sci
)) {
3261 err
= macsec_add_dev(dev
, sci
, icv_len
);
3266 macsec_changelink_common(dev
, data
);
3268 err
= register_macsec_dev(real_dev
, dev
);
3272 macsec_generation
++;
3277 macsec_del_dev(macsec
);
3279 netdev_upper_dev_unlink(real_dev
, dev
);
3281 unregister_netdevice(dev
);
3285 static int macsec_validate_attr(struct nlattr
*tb
[], struct nlattr
*data
[])
3287 u64 csid
= MACSEC_DEFAULT_CIPHER_ID
;
3288 u8 icv_len
= DEFAULT_ICV_LEN
;
3295 if (data
[IFLA_MACSEC_CIPHER_SUITE
])
3296 csid
= nla_get_u64(data
[IFLA_MACSEC_CIPHER_SUITE
]);
3298 if (data
[IFLA_MACSEC_ICV_LEN
]) {
3299 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3300 if (icv_len
!= DEFAULT_ICV_LEN
) {
3301 char dummy_key
[DEFAULT_SAK_LEN
] = { 0 };
3302 struct crypto_aead
*dummy_tfm
;
3304 dummy_tfm
= macsec_alloc_tfm(dummy_key
,
3307 if (IS_ERR(dummy_tfm
))
3308 return PTR_ERR(dummy_tfm
);
3309 crypto_free_aead(dummy_tfm
);
3314 case MACSEC_DEFAULT_CIPHER_ID
:
3315 case MACSEC_DEFAULT_CIPHER_ALT
:
3316 if (icv_len
< MACSEC_MIN_ICV_LEN
||
3317 icv_len
> MACSEC_STD_ICV_LEN
)
3324 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3325 if (nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]) >= MACSEC_NUM_AN
)
3329 for (flag
= IFLA_MACSEC_ENCODING_SA
+ 1;
3330 flag
< IFLA_MACSEC_VALIDATION
;
3333 if (nla_get_u8(data
[flag
]) > 1)
3338 es
= data
[IFLA_MACSEC_ES
] ? nla_get_u8(data
[IFLA_MACSEC_ES
]) : false;
3339 sci
= data
[IFLA_MACSEC_INC_SCI
] ? nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]) : false;
3340 scb
= data
[IFLA_MACSEC_SCB
] ? nla_get_u8(data
[IFLA_MACSEC_SCB
]) : false;
3342 if ((sci
&& (scb
|| es
)) || (scb
&& es
))
3345 if (data
[IFLA_MACSEC_VALIDATION
] &&
3346 nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]) > MACSEC_VALIDATE_MAX
)
3349 if ((data
[IFLA_MACSEC_REPLAY_PROTECT
] &&
3350 nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
])) &&
3351 !data
[IFLA_MACSEC_WINDOW
])
3357 static struct net
*macsec_get_link_net(const struct net_device
*dev
)
3359 return dev_net(macsec_priv(dev
)->real_dev
);
3362 static size_t macsec_get_size(const struct net_device
*dev
)
3364 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
3365 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
3366 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
3367 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
3368 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
3369 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
3370 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
3371 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
3372 nla_total_size(1) + /* IFLA_MACSEC_ES */
3373 nla_total_size(1) + /* IFLA_MACSEC_SCB */
3374 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
3375 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
3379 static int macsec_fill_info(struct sk_buff
*skb
,
3380 const struct net_device
*dev
)
3382 struct macsec_secy
*secy
= &macsec_priv(dev
)->secy
;
3383 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
3385 if (nla_put_sci(skb
, IFLA_MACSEC_SCI
, secy
->sci
,
3387 nla_put_u8(skb
, IFLA_MACSEC_ICV_LEN
, secy
->icv_len
) ||
3388 nla_put_u64_64bit(skb
, IFLA_MACSEC_CIPHER_SUITE
,
3389 MACSEC_DEFAULT_CIPHER_ID
, IFLA_MACSEC_PAD
) ||
3390 nla_put_u8(skb
, IFLA_MACSEC_ENCODING_SA
, tx_sc
->encoding_sa
) ||
3391 nla_put_u8(skb
, IFLA_MACSEC_ENCRYPT
, tx_sc
->encrypt
) ||
3392 nla_put_u8(skb
, IFLA_MACSEC_PROTECT
, secy
->protect_frames
) ||
3393 nla_put_u8(skb
, IFLA_MACSEC_INC_SCI
, tx_sc
->send_sci
) ||
3394 nla_put_u8(skb
, IFLA_MACSEC_ES
, tx_sc
->end_station
) ||
3395 nla_put_u8(skb
, IFLA_MACSEC_SCB
, tx_sc
->scb
) ||
3396 nla_put_u8(skb
, IFLA_MACSEC_REPLAY_PROTECT
, secy
->replay_protect
) ||
3397 nla_put_u8(skb
, IFLA_MACSEC_VALIDATION
, secy
->validate_frames
) ||
3399 goto nla_put_failure
;
3401 if (secy
->replay_protect
) {
3402 if (nla_put_u32(skb
, IFLA_MACSEC_WINDOW
, secy
->replay_window
))
3403 goto nla_put_failure
;
3412 static struct rtnl_link_ops macsec_link_ops __read_mostly
= {
3414 .priv_size
= sizeof(struct macsec_dev
),
3415 .maxtype
= IFLA_MACSEC_MAX
,
3416 .policy
= macsec_rtnl_policy
,
3417 .setup
= macsec_setup
,
3418 .validate
= macsec_validate_attr
,
3419 .newlink
= macsec_newlink
,
3420 .changelink
= macsec_changelink
,
3421 .dellink
= macsec_dellink
,
3422 .get_size
= macsec_get_size
,
3423 .fill_info
= macsec_fill_info
,
3424 .get_link_net
= macsec_get_link_net
,
3427 static bool is_macsec_master(struct net_device
*dev
)
3429 return rcu_access_pointer(dev
->rx_handler
) == macsec_handle_frame
;
3432 static int macsec_notify(struct notifier_block
*this, unsigned long event
,
3435 struct net_device
*real_dev
= netdev_notifier_info_to_dev(ptr
);
3438 if (!is_macsec_master(real_dev
))
3442 case NETDEV_UNREGISTER
: {
3443 struct macsec_dev
*m
, *n
;
3444 struct macsec_rxh_data
*rxd
;
3446 rxd
= macsec_data_rtnl(real_dev
);
3447 list_for_each_entry_safe(m
, n
, &rxd
->secys
, secys
) {
3448 macsec_common_dellink(m
->secy
.netdev
, &head
);
3451 netdev_rx_handler_unregister(real_dev
);
3454 unregister_netdevice_many(&head
);
3457 case NETDEV_CHANGEMTU
: {
3458 struct macsec_dev
*m
;
3459 struct macsec_rxh_data
*rxd
;
3461 rxd
= macsec_data_rtnl(real_dev
);
3462 list_for_each_entry(m
, &rxd
->secys
, secys
) {
3463 struct net_device
*dev
= m
->secy
.netdev
;
3464 unsigned int mtu
= real_dev
->mtu
- (m
->secy
.icv_len
+
3465 macsec_extra_len(true));
3468 dev_set_mtu(dev
, mtu
);
3476 static struct notifier_block macsec_notifier
= {
3477 .notifier_call
= macsec_notify
,
3480 static int __init
macsec_init(void)
3484 pr_info("MACsec IEEE 802.1AE\n");
3485 err
= register_netdevice_notifier(&macsec_notifier
);
3489 err
= rtnl_link_register(&macsec_link_ops
);
3493 err
= genl_register_family(&macsec_fam
);
3500 rtnl_link_unregister(&macsec_link_ops
);
3502 unregister_netdevice_notifier(&macsec_notifier
);
3506 static void __exit
macsec_exit(void)
3508 genl_unregister_family(&macsec_fam
);
3509 rtnl_link_unregister(&macsec_link_ops
);
3510 unregister_netdevice_notifier(&macsec_notifier
);
3514 module_init(macsec_init
);
3515 module_exit(macsec_exit
);
3517 MODULE_ALIAS_RTNL_LINK("macsec");
3519 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3520 MODULE_LICENSE("GPL v2");