2 * drivers/net/macsec.c - MACsec device
4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/socket.h>
15 #include <linux/module.h>
16 #include <crypto/aead.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rtnetlink.h>
19 #include <linux/refcount.h>
20 #include <net/genetlink.h>
22 #include <net/gro_cells.h>
24 #include <uapi/linux/if_macsec.h>
26 typedef u64 __bitwise sci_t
;
28 #define MACSEC_SCI_LEN 8
30 /* SecTAG length = macsec_eth_header without the optional SCI */
31 #define MACSEC_TAG_LEN 6
33 struct macsec_eth_header
{
37 #if defined(__LITTLE_ENDIAN_BITFIELD)
40 #elif defined(__BIG_ENDIAN_BITFIELD)
44 #error "Please fix <asm/byteorder.h>"
47 u8 secure_channel_id
[8]; /* optional */
50 #define MACSEC_TCI_VERSION 0x80
51 #define MACSEC_TCI_ES 0x40 /* end station */
52 #define MACSEC_TCI_SC 0x20 /* SCI present */
53 #define MACSEC_TCI_SCB 0x10 /* epon */
54 #define MACSEC_TCI_E 0x08 /* encryption */
55 #define MACSEC_TCI_C 0x04 /* changed text */
56 #define MACSEC_AN_MASK 0x03 /* association number */
57 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
59 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
60 #define MIN_NON_SHORT_LEN 48
62 #define GCM_AES_IV_LEN 12
63 #define DEFAULT_ICV_LEN 16
65 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */
67 #define for_each_rxsc(secy, sc) \
68 for (sc = rcu_dereference_bh(secy->rx_sc); \
70 sc = rcu_dereference_bh(sc->next))
71 #define for_each_rxsc_rtnl(secy, sc) \
72 for (sc = rtnl_dereference(secy->rx_sc); \
74 sc = rtnl_dereference(sc->next))
78 u8 secure_channel_id
[8];
85 * struct macsec_key - SA key
86 * @id: user-provided key identifier
87 * @tfm: crypto struct, key storage
90 u8 id
[MACSEC_KEYID_LEN
];
91 struct crypto_aead
*tfm
;
94 struct macsec_rx_sc_stats
{
95 __u64 InOctetsValidated
;
96 __u64 InOctetsDecrypted
;
97 __u64 InPktsUnchecked
;
102 __u64 InPktsNotValid
;
103 __u64 InPktsNotUsingSA
;
104 __u64 InPktsUnusedSA
;
107 struct macsec_rx_sa_stats
{
110 __u32 InPktsNotValid
;
111 __u32 InPktsNotUsingSA
;
112 __u32 InPktsUnusedSA
;
115 struct macsec_tx_sa_stats
{
116 __u32 OutPktsProtected
;
117 __u32 OutPktsEncrypted
;
120 struct macsec_tx_sc_stats
{
121 __u64 OutPktsProtected
;
122 __u64 OutPktsEncrypted
;
123 __u64 OutOctetsProtected
;
124 __u64 OutOctetsEncrypted
;
127 struct macsec_dev_stats
{
128 __u64 OutPktsUntagged
;
129 __u64 InPktsUntagged
;
130 __u64 OutPktsTooLong
;
133 __u64 InPktsUnknownSCI
;
139 * struct macsec_rx_sa - receive secure association
141 * @next_pn: packet number expected for the next packet
142 * @lock: protects next_pn manipulations
143 * @key: key structure
144 * @stats: per-SA stats
146 struct macsec_rx_sa
{
147 struct macsec_key key
;
152 struct macsec_rx_sa_stats __percpu
*stats
;
153 struct macsec_rx_sc
*sc
;
157 struct pcpu_rx_sc_stats
{
158 struct macsec_rx_sc_stats stats
;
159 struct u64_stats_sync syncp
;
163 * struct macsec_rx_sc - receive secure channel
164 * @sci: secure channel identifier for this SC
165 * @active: channel is active
166 * @sa: array of secure associations
167 * @stats: per-SC stats
169 struct macsec_rx_sc
{
170 struct macsec_rx_sc __rcu
*next
;
173 struct macsec_rx_sa __rcu
*sa
[MACSEC_NUM_AN
];
174 struct pcpu_rx_sc_stats __percpu
*stats
;
176 struct rcu_head rcu_head
;
180 * struct macsec_tx_sa - transmit secure association
182 * @next_pn: packet number to use for the next packet
183 * @lock: protects next_pn manipulations
184 * @key: key structure
185 * @stats: per-SA stats
187 struct macsec_tx_sa
{
188 struct macsec_key key
;
193 struct macsec_tx_sa_stats __percpu
*stats
;
197 struct pcpu_tx_sc_stats
{
198 struct macsec_tx_sc_stats stats
;
199 struct u64_stats_sync syncp
;
203 * struct macsec_tx_sc - transmit secure channel
205 * @encoding_sa: association number of the SA currently in use
206 * @encrypt: encrypt packets on transmit, or authenticate only
207 * @send_sci: always include the SCI in the SecTAG
209 * @scb: single copy broadcast flag
210 * @sa: array of secure associations
211 * @stats: stats for this TXSC
213 struct macsec_tx_sc
{
220 struct macsec_tx_sa __rcu
*sa
[MACSEC_NUM_AN
];
221 struct pcpu_tx_sc_stats __percpu
*stats
;
224 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
227 * struct macsec_secy - MACsec Security Entity
228 * @netdev: netdevice for this SecY
229 * @n_rx_sc: number of receive secure channels configured on this SecY
230 * @sci: secure channel identifier used for tx
231 * @key_len: length of keys used by the cipher suite
232 * @icv_len: length of ICV used by the cipher suite
233 * @validate_frames: validation mode
234 * @operational: MAC_Operational flag
235 * @protect_frames: enable protection for this SecY
236 * @replay_protect: enable packet number checks on receive
237 * @replay_window: size of the replay window
238 * @tx_sc: transmit secure channel
239 * @rx_sc: linked list of receive secure channels
242 struct net_device
*netdev
;
243 unsigned int n_rx_sc
;
247 enum macsec_validation_type validate_frames
;
252 struct macsec_tx_sc tx_sc
;
253 struct macsec_rx_sc __rcu
*rx_sc
;
256 struct pcpu_secy_stats
{
257 struct macsec_dev_stats stats
;
258 struct u64_stats_sync syncp
;
262 * struct macsec_dev - private data
264 * @real_dev: pointer to underlying netdevice
265 * @stats: MACsec device stats
266 * @secys: linked list of SecY's on the underlying device
269 struct macsec_secy secy
;
270 struct net_device
*real_dev
;
271 struct pcpu_secy_stats __percpu
*stats
;
272 struct list_head secys
;
273 struct gro_cells gro_cells
;
274 unsigned int nest_level
;
278 * struct macsec_rxh_data - rx_handler private argument
279 * @secys: linked list of SecY's on this underlying device
281 struct macsec_rxh_data
{
282 struct list_head secys
;
285 static struct macsec_dev
*macsec_priv(const struct net_device
*dev
)
287 return (struct macsec_dev
*)netdev_priv(dev
);
290 static struct macsec_rxh_data
*macsec_data_rcu(const struct net_device
*dev
)
292 return rcu_dereference_bh(dev
->rx_handler_data
);
295 static struct macsec_rxh_data
*macsec_data_rtnl(const struct net_device
*dev
)
297 return rtnl_dereference(dev
->rx_handler_data
);
301 struct aead_request
*req
;
303 struct macsec_tx_sa
*tx_sa
;
304 struct macsec_rx_sa
*rx_sa
;
311 static struct macsec_rx_sa
*macsec_rxsa_get(struct macsec_rx_sa __rcu
*ptr
)
313 struct macsec_rx_sa
*sa
= rcu_dereference_bh(ptr
);
315 if (!sa
|| !sa
->active
)
318 if (!refcount_inc_not_zero(&sa
->refcnt
))
324 static void free_rx_sc_rcu(struct rcu_head
*head
)
326 struct macsec_rx_sc
*rx_sc
= container_of(head
, struct macsec_rx_sc
, rcu_head
);
328 free_percpu(rx_sc
->stats
);
332 static struct macsec_rx_sc
*macsec_rxsc_get(struct macsec_rx_sc
*sc
)
334 return refcount_inc_not_zero(&sc
->refcnt
) ? sc
: NULL
;
337 static void macsec_rxsc_put(struct macsec_rx_sc
*sc
)
339 if (refcount_dec_and_test(&sc
->refcnt
))
340 call_rcu(&sc
->rcu_head
, free_rx_sc_rcu
);
343 static void free_rxsa(struct rcu_head
*head
)
345 struct macsec_rx_sa
*sa
= container_of(head
, struct macsec_rx_sa
, rcu
);
347 crypto_free_aead(sa
->key
.tfm
);
348 free_percpu(sa
->stats
);
352 static void macsec_rxsa_put(struct macsec_rx_sa
*sa
)
354 if (refcount_dec_and_test(&sa
->refcnt
))
355 call_rcu(&sa
->rcu
, free_rxsa
);
358 static struct macsec_tx_sa
*macsec_txsa_get(struct macsec_tx_sa __rcu
*ptr
)
360 struct macsec_tx_sa
*sa
= rcu_dereference_bh(ptr
);
362 if (!sa
|| !sa
->active
)
365 if (!refcount_inc_not_zero(&sa
->refcnt
))
371 static void free_txsa(struct rcu_head
*head
)
373 struct macsec_tx_sa
*sa
= container_of(head
, struct macsec_tx_sa
, rcu
);
375 crypto_free_aead(sa
->key
.tfm
);
376 free_percpu(sa
->stats
);
380 static void macsec_txsa_put(struct macsec_tx_sa
*sa
)
382 if (refcount_dec_and_test(&sa
->refcnt
))
383 call_rcu(&sa
->rcu
, free_txsa
);
386 static struct macsec_cb
*macsec_skb_cb(struct sk_buff
*skb
)
388 BUILD_BUG_ON(sizeof(struct macsec_cb
) > sizeof(skb
->cb
));
389 return (struct macsec_cb
*)skb
->cb
;
392 #define MACSEC_PORT_ES (htons(0x0001))
393 #define MACSEC_PORT_SCB (0x0000)
394 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
396 #define DEFAULT_SAK_LEN 16
397 #define DEFAULT_SEND_SCI true
398 #define DEFAULT_ENCRYPT false
399 #define DEFAULT_ENCODING_SA 0
401 static bool send_sci(const struct macsec_secy
*secy
)
403 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
405 return tx_sc
->send_sci
||
406 (secy
->n_rx_sc
> 1 && !tx_sc
->end_station
&& !tx_sc
->scb
);
409 static sci_t
make_sci(u8
*addr
, __be16 port
)
413 memcpy(&sci
, addr
, ETH_ALEN
);
414 memcpy(((char *)&sci
) + ETH_ALEN
, &port
, sizeof(port
));
419 static sci_t
macsec_frame_sci(struct macsec_eth_header
*hdr
, bool sci_present
)
424 memcpy(&sci
, hdr
->secure_channel_id
,
425 sizeof(hdr
->secure_channel_id
));
427 sci
= make_sci(hdr
->eth
.h_source
, MACSEC_PORT_ES
);
432 static unsigned int macsec_sectag_len(bool sci_present
)
434 return MACSEC_TAG_LEN
+ (sci_present
? MACSEC_SCI_LEN
: 0);
437 static unsigned int macsec_hdr_len(bool sci_present
)
439 return macsec_sectag_len(sci_present
) + ETH_HLEN
;
442 static unsigned int macsec_extra_len(bool sci_present
)
444 return macsec_sectag_len(sci_present
) + sizeof(__be16
);
447 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
448 static void macsec_fill_sectag(struct macsec_eth_header
*h
,
449 const struct macsec_secy
*secy
, u32 pn
,
452 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
454 memset(&h
->tci_an
, 0, macsec_sectag_len(sci_present
));
455 h
->eth
.h_proto
= htons(ETH_P_MACSEC
);
458 h
->tci_an
|= MACSEC_TCI_SC
;
459 memcpy(&h
->secure_channel_id
, &secy
->sci
,
460 sizeof(h
->secure_channel_id
));
462 if (tx_sc
->end_station
)
463 h
->tci_an
|= MACSEC_TCI_ES
;
465 h
->tci_an
|= MACSEC_TCI_SCB
;
468 h
->packet_number
= htonl(pn
);
470 /* with GCM, C/E clear for !encrypt, both set for encrypt */
472 h
->tci_an
|= MACSEC_TCI_CONFID
;
473 else if (secy
->icv_len
!= DEFAULT_ICV_LEN
)
474 h
->tci_an
|= MACSEC_TCI_C
;
476 h
->tci_an
|= tx_sc
->encoding_sa
;
479 static void macsec_set_shortlen(struct macsec_eth_header
*h
, size_t data_len
)
481 if (data_len
< MIN_NON_SHORT_LEN
)
482 h
->short_length
= data_len
;
485 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
486 static bool macsec_validate_skb(struct sk_buff
*skb
, u16 icv_len
)
488 struct macsec_eth_header
*h
= (struct macsec_eth_header
*)skb
->data
;
489 int len
= skb
->len
- 2 * ETH_ALEN
;
490 int extra_len
= macsec_extra_len(!!(h
->tci_an
& MACSEC_TCI_SC
)) + icv_len
;
492 /* a) It comprises at least 17 octets */
496 /* b) MACsec EtherType: already checked */
498 /* c) V bit is clear */
499 if (h
->tci_an
& MACSEC_TCI_VERSION
)
502 /* d) ES or SCB => !SC */
503 if ((h
->tci_an
& MACSEC_TCI_ES
|| h
->tci_an
& MACSEC_TCI_SCB
) &&
504 (h
->tci_an
& MACSEC_TCI_SC
))
507 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
511 /* rx.pn != 0 (figure 10-5) */
512 if (!h
->packet_number
)
515 /* length check, f) g) h) i) */
517 return len
== extra_len
+ h
->short_length
;
518 return len
>= extra_len
+ MIN_NON_SHORT_LEN
;
521 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
522 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
524 static void macsec_fill_iv(unsigned char *iv
, sci_t sci
, u32 pn
)
526 struct gcm_iv
*gcm_iv
= (struct gcm_iv
*)iv
;
529 gcm_iv
->pn
= htonl(pn
);
532 static struct macsec_eth_header
*macsec_ethhdr(struct sk_buff
*skb
)
534 return (struct macsec_eth_header
*)skb_mac_header(skb
);
537 static u32
tx_sa_update_pn(struct macsec_tx_sa
*tx_sa
, struct macsec_secy
*secy
)
541 spin_lock_bh(&tx_sa
->lock
);
545 if (tx_sa
->next_pn
== 0) {
546 pr_debug("PN wrapped, transitioning to !oper\n");
547 tx_sa
->active
= false;
548 if (secy
->protect_frames
)
549 secy
->operational
= false;
551 spin_unlock_bh(&tx_sa
->lock
);
556 static void macsec_encrypt_finish(struct sk_buff
*skb
, struct net_device
*dev
)
558 struct macsec_dev
*macsec
= netdev_priv(dev
);
560 skb
->dev
= macsec
->real_dev
;
561 skb_reset_mac_header(skb
);
562 skb
->protocol
= eth_hdr(skb
)->h_proto
;
565 static void macsec_count_tx(struct sk_buff
*skb
, struct macsec_tx_sc
*tx_sc
,
566 struct macsec_tx_sa
*tx_sa
)
568 struct pcpu_tx_sc_stats
*txsc_stats
= this_cpu_ptr(tx_sc
->stats
);
570 u64_stats_update_begin(&txsc_stats
->syncp
);
571 if (tx_sc
->encrypt
) {
572 txsc_stats
->stats
.OutOctetsEncrypted
+= skb
->len
;
573 txsc_stats
->stats
.OutPktsEncrypted
++;
574 this_cpu_inc(tx_sa
->stats
->OutPktsEncrypted
);
576 txsc_stats
->stats
.OutOctetsProtected
+= skb
->len
;
577 txsc_stats
->stats
.OutPktsProtected
++;
578 this_cpu_inc(tx_sa
->stats
->OutPktsProtected
);
580 u64_stats_update_end(&txsc_stats
->syncp
);
583 static void count_tx(struct net_device
*dev
, int ret
, int len
)
585 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
586 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
588 u64_stats_update_begin(&stats
->syncp
);
590 stats
->tx_bytes
+= len
;
591 u64_stats_update_end(&stats
->syncp
);
595 static void macsec_encrypt_done(struct crypto_async_request
*base
, int err
)
597 struct sk_buff
*skb
= base
->data
;
598 struct net_device
*dev
= skb
->dev
;
599 struct macsec_dev
*macsec
= macsec_priv(dev
);
600 struct macsec_tx_sa
*sa
= macsec_skb_cb(skb
)->tx_sa
;
603 aead_request_free(macsec_skb_cb(skb
)->req
);
606 macsec_encrypt_finish(skb
, dev
);
607 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
609 ret
= dev_queue_xmit(skb
);
610 count_tx(dev
, ret
, len
);
611 rcu_read_unlock_bh();
617 static struct aead_request
*macsec_alloc_req(struct crypto_aead
*tfm
,
619 struct scatterlist
**sg
,
622 size_t size
, iv_offset
, sg_offset
;
623 struct aead_request
*req
;
626 size
= sizeof(struct aead_request
) + crypto_aead_reqsize(tfm
);
628 size
+= GCM_AES_IV_LEN
;
630 size
= ALIGN(size
, __alignof__(struct scatterlist
));
632 size
+= sizeof(struct scatterlist
) * num_frags
;
634 tmp
= kmalloc(size
, GFP_ATOMIC
);
638 *iv
= (unsigned char *)(tmp
+ iv_offset
);
639 *sg
= (struct scatterlist
*)(tmp
+ sg_offset
);
642 aead_request_set_tfm(req
, tfm
);
647 static struct sk_buff
*macsec_encrypt(struct sk_buff
*skb
,
648 struct net_device
*dev
)
651 struct scatterlist
*sg
;
652 struct sk_buff
*trailer
;
655 struct macsec_eth_header
*hh
;
656 size_t unprotected_len
;
657 struct aead_request
*req
;
658 struct macsec_secy
*secy
;
659 struct macsec_tx_sc
*tx_sc
;
660 struct macsec_tx_sa
*tx_sa
;
661 struct macsec_dev
*macsec
= macsec_priv(dev
);
665 secy
= &macsec
->secy
;
666 tx_sc
= &secy
->tx_sc
;
668 /* 10.5.1 TX SA assignment */
669 tx_sa
= macsec_txsa_get(tx_sc
->sa
[tx_sc
->encoding_sa
]);
671 secy
->operational
= false;
673 return ERR_PTR(-EINVAL
);
676 if (unlikely(skb_headroom(skb
) < MACSEC_NEEDED_HEADROOM
||
677 skb_tailroom(skb
) < MACSEC_NEEDED_TAILROOM
)) {
678 struct sk_buff
*nskb
= skb_copy_expand(skb
,
679 MACSEC_NEEDED_HEADROOM
,
680 MACSEC_NEEDED_TAILROOM
,
686 macsec_txsa_put(tx_sa
);
688 return ERR_PTR(-ENOMEM
);
691 skb
= skb_unshare(skb
, GFP_ATOMIC
);
693 macsec_txsa_put(tx_sa
);
694 return ERR_PTR(-ENOMEM
);
698 unprotected_len
= skb
->len
;
700 sci_present
= send_sci(secy
);
701 hh
= skb_push(skb
, macsec_extra_len(sci_present
));
702 memmove(hh
, eth
, 2 * ETH_ALEN
);
704 pn
= tx_sa_update_pn(tx_sa
, secy
);
706 macsec_txsa_put(tx_sa
);
708 return ERR_PTR(-ENOLINK
);
710 macsec_fill_sectag(hh
, secy
, pn
, sci_present
);
711 macsec_set_shortlen(hh
, unprotected_len
- 2 * ETH_ALEN
);
713 skb_put(skb
, secy
->icv_len
);
715 if (skb
->len
- ETH_HLEN
> macsec_priv(dev
)->real_dev
->mtu
) {
716 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
718 u64_stats_update_begin(&secy_stats
->syncp
);
719 secy_stats
->stats
.OutPktsTooLong
++;
720 u64_stats_update_end(&secy_stats
->syncp
);
722 macsec_txsa_put(tx_sa
);
724 return ERR_PTR(-EINVAL
);
727 ret
= skb_cow_data(skb
, 0, &trailer
);
728 if (unlikely(ret
< 0)) {
729 macsec_txsa_put(tx_sa
);
734 req
= macsec_alloc_req(tx_sa
->key
.tfm
, &iv
, &sg
, ret
);
736 macsec_txsa_put(tx_sa
);
738 return ERR_PTR(-ENOMEM
);
741 macsec_fill_iv(iv
, secy
->sci
, pn
);
743 sg_init_table(sg
, ret
);
744 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
745 if (unlikely(ret
< 0)) {
746 aead_request_free(req
);
747 macsec_txsa_put(tx_sa
);
752 if (tx_sc
->encrypt
) {
753 int len
= skb
->len
- macsec_hdr_len(sci_present
) -
755 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
756 aead_request_set_ad(req
, macsec_hdr_len(sci_present
));
758 aead_request_set_crypt(req
, sg
, sg
, 0, iv
);
759 aead_request_set_ad(req
, skb
->len
- secy
->icv_len
);
762 macsec_skb_cb(skb
)->req
= req
;
763 macsec_skb_cb(skb
)->tx_sa
= tx_sa
;
764 aead_request_set_callback(req
, 0, macsec_encrypt_done
, skb
);
767 ret
= crypto_aead_encrypt(req
);
768 if (ret
== -EINPROGRESS
) {
770 } else if (ret
!= 0) {
773 aead_request_free(req
);
774 macsec_txsa_put(tx_sa
);
775 return ERR_PTR(-EINVAL
);
779 aead_request_free(req
);
780 macsec_txsa_put(tx_sa
);
785 static bool macsec_post_decrypt(struct sk_buff
*skb
, struct macsec_secy
*secy
, u32 pn
)
787 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
788 struct pcpu_rx_sc_stats
*rxsc_stats
= this_cpu_ptr(rx_sa
->sc
->stats
);
789 struct macsec_eth_header
*hdr
= macsec_ethhdr(skb
);
792 spin_lock(&rx_sa
->lock
);
793 if (rx_sa
->next_pn
>= secy
->replay_window
)
794 lowest_pn
= rx_sa
->next_pn
- secy
->replay_window
;
796 /* Now perform replay protection check again
797 * (see IEEE 802.1AE-2006 figure 10-5)
799 if (secy
->replay_protect
&& pn
< lowest_pn
) {
800 spin_unlock(&rx_sa
->lock
);
801 u64_stats_update_begin(&rxsc_stats
->syncp
);
802 rxsc_stats
->stats
.InPktsLate
++;
803 u64_stats_update_end(&rxsc_stats
->syncp
);
807 if (secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
) {
808 u64_stats_update_begin(&rxsc_stats
->syncp
);
809 if (hdr
->tci_an
& MACSEC_TCI_E
)
810 rxsc_stats
->stats
.InOctetsDecrypted
+= skb
->len
;
812 rxsc_stats
->stats
.InOctetsValidated
+= skb
->len
;
813 u64_stats_update_end(&rxsc_stats
->syncp
);
816 if (!macsec_skb_cb(skb
)->valid
) {
817 spin_unlock(&rx_sa
->lock
);
820 if (hdr
->tci_an
& MACSEC_TCI_C
||
821 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
822 u64_stats_update_begin(&rxsc_stats
->syncp
);
823 rxsc_stats
->stats
.InPktsNotValid
++;
824 u64_stats_update_end(&rxsc_stats
->syncp
);
828 u64_stats_update_begin(&rxsc_stats
->syncp
);
829 if (secy
->validate_frames
== MACSEC_VALIDATE_CHECK
) {
830 rxsc_stats
->stats
.InPktsInvalid
++;
831 this_cpu_inc(rx_sa
->stats
->InPktsInvalid
);
832 } else if (pn
< lowest_pn
) {
833 rxsc_stats
->stats
.InPktsDelayed
++;
835 rxsc_stats
->stats
.InPktsUnchecked
++;
837 u64_stats_update_end(&rxsc_stats
->syncp
);
839 u64_stats_update_begin(&rxsc_stats
->syncp
);
840 if (pn
< lowest_pn
) {
841 rxsc_stats
->stats
.InPktsDelayed
++;
843 rxsc_stats
->stats
.InPktsOK
++;
844 this_cpu_inc(rx_sa
->stats
->InPktsOK
);
846 u64_stats_update_end(&rxsc_stats
->syncp
);
848 if (pn
>= rx_sa
->next_pn
)
849 rx_sa
->next_pn
= pn
+ 1;
850 spin_unlock(&rx_sa
->lock
);
856 static void macsec_reset_skb(struct sk_buff
*skb
, struct net_device
*dev
)
858 skb
->pkt_type
= PACKET_HOST
;
859 skb
->protocol
= eth_type_trans(skb
, dev
);
861 skb_reset_network_header(skb
);
862 if (!skb_transport_header_was_set(skb
))
863 skb_reset_transport_header(skb
);
864 skb_reset_mac_len(skb
);
867 static void macsec_finalize_skb(struct sk_buff
*skb
, u8 icv_len
, u8 hdr_len
)
869 skb
->ip_summed
= CHECKSUM_NONE
;
870 memmove(skb
->data
+ hdr_len
, skb
->data
, 2 * ETH_ALEN
);
871 skb_pull(skb
, hdr_len
);
872 pskb_trim_unique(skb
, skb
->len
- icv_len
);
875 static void count_rx(struct net_device
*dev
, int len
)
877 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
879 u64_stats_update_begin(&stats
->syncp
);
881 stats
->rx_bytes
+= len
;
882 u64_stats_update_end(&stats
->syncp
);
885 static void macsec_decrypt_done(struct crypto_async_request
*base
, int err
)
887 struct sk_buff
*skb
= base
->data
;
888 struct net_device
*dev
= skb
->dev
;
889 struct macsec_dev
*macsec
= macsec_priv(dev
);
890 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
891 struct macsec_rx_sc
*rx_sc
= rx_sa
->sc
;
895 aead_request_free(macsec_skb_cb(skb
)->req
);
898 macsec_skb_cb(skb
)->valid
= true;
901 pn
= ntohl(macsec_ethhdr(skb
)->packet_number
);
902 if (!macsec_post_decrypt(skb
, &macsec
->secy
, pn
)) {
903 rcu_read_unlock_bh();
908 macsec_finalize_skb(skb
, macsec
->secy
.icv_len
,
909 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
910 macsec_reset_skb(skb
, macsec
->secy
.netdev
);
913 if (gro_cells_receive(&macsec
->gro_cells
, skb
) == NET_RX_SUCCESS
)
916 rcu_read_unlock_bh();
919 macsec_rxsa_put(rx_sa
);
920 macsec_rxsc_put(rx_sc
);
924 static struct sk_buff
*macsec_decrypt(struct sk_buff
*skb
,
925 struct net_device
*dev
,
926 struct macsec_rx_sa
*rx_sa
,
928 struct macsec_secy
*secy
)
931 struct scatterlist
*sg
;
932 struct sk_buff
*trailer
;
934 struct aead_request
*req
;
935 struct macsec_eth_header
*hdr
;
936 u16 icv_len
= secy
->icv_len
;
938 macsec_skb_cb(skb
)->valid
= false;
939 skb
= skb_share_check(skb
, GFP_ATOMIC
);
941 return ERR_PTR(-ENOMEM
);
943 ret
= skb_cow_data(skb
, 0, &trailer
);
944 if (unlikely(ret
< 0)) {
948 req
= macsec_alloc_req(rx_sa
->key
.tfm
, &iv
, &sg
, ret
);
951 return ERR_PTR(-ENOMEM
);
954 hdr
= (struct macsec_eth_header
*)skb
->data
;
955 macsec_fill_iv(iv
, sci
, ntohl(hdr
->packet_number
));
957 sg_init_table(sg
, ret
);
958 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
959 if (unlikely(ret
< 0)) {
960 aead_request_free(req
);
965 if (hdr
->tci_an
& MACSEC_TCI_E
) {
966 /* confidentiality: ethernet + macsec header
967 * authenticated, encrypted payload
969 int len
= skb
->len
- macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
);
971 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
972 aead_request_set_ad(req
, macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
));
973 skb
= skb_unshare(skb
, GFP_ATOMIC
);
975 aead_request_free(req
);
976 return ERR_PTR(-ENOMEM
);
979 /* integrity only: all headers + data authenticated */
980 aead_request_set_crypt(req
, sg
, sg
, icv_len
, iv
);
981 aead_request_set_ad(req
, skb
->len
- icv_len
);
984 macsec_skb_cb(skb
)->req
= req
;
986 aead_request_set_callback(req
, 0, macsec_decrypt_done
, skb
);
989 ret
= crypto_aead_decrypt(req
);
990 if (ret
== -EINPROGRESS
) {
992 } else if (ret
!= 0) {
993 /* decryption/authentication failed
994 * 10.6 if validateFrames is disabled, deliver anyway
996 if (ret
!= -EBADMSG
) {
1001 macsec_skb_cb(skb
)->valid
= true;
1005 aead_request_free(req
);
1010 static struct macsec_rx_sc
*find_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
1012 struct macsec_rx_sc
*rx_sc
;
1014 for_each_rxsc(secy
, rx_sc
) {
1015 if (rx_sc
->sci
== sci
)
1022 static struct macsec_rx_sc
*find_rx_sc_rtnl(struct macsec_secy
*secy
, sci_t sci
)
1024 struct macsec_rx_sc
*rx_sc
;
1026 for_each_rxsc_rtnl(secy
, rx_sc
) {
1027 if (rx_sc
->sci
== sci
)
1034 static void handle_not_macsec(struct sk_buff
*skb
)
1036 struct macsec_rxh_data
*rxd
;
1037 struct macsec_dev
*macsec
;
1040 rxd
= macsec_data_rcu(skb
->dev
);
1042 /* 10.6 If the management control validateFrames is not
1043 * Strict, frames without a SecTAG are received, counted, and
1044 * delivered to the Controlled Port
1046 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1047 struct sk_buff
*nskb
;
1048 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
1050 if (macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1051 u64_stats_update_begin(&secy_stats
->syncp
);
1052 secy_stats
->stats
.InPktsNoTag
++;
1053 u64_stats_update_end(&secy_stats
->syncp
);
1057 /* deliver on this port */
1058 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1062 nskb
->dev
= macsec
->secy
.netdev
;
1064 if (netif_rx(nskb
) == NET_RX_SUCCESS
) {
1065 u64_stats_update_begin(&secy_stats
->syncp
);
1066 secy_stats
->stats
.InPktsUntagged
++;
1067 u64_stats_update_end(&secy_stats
->syncp
);
1074 static rx_handler_result_t
macsec_handle_frame(struct sk_buff
**pskb
)
1076 struct sk_buff
*skb
= *pskb
;
1077 struct net_device
*dev
= skb
->dev
;
1078 struct macsec_eth_header
*hdr
;
1079 struct macsec_secy
*secy
= NULL
;
1080 struct macsec_rx_sc
*rx_sc
;
1081 struct macsec_rx_sa
*rx_sa
;
1082 struct macsec_rxh_data
*rxd
;
1083 struct macsec_dev
*macsec
;
1087 struct pcpu_rx_sc_stats
*rxsc_stats
;
1088 struct pcpu_secy_stats
*secy_stats
;
1092 if (skb_headroom(skb
) < ETH_HLEN
)
1095 hdr
= macsec_ethhdr(skb
);
1096 if (hdr
->eth
.h_proto
!= htons(ETH_P_MACSEC
)) {
1097 handle_not_macsec(skb
);
1099 /* and deliver to the uncontrolled port */
1100 return RX_HANDLER_PASS
;
1103 skb
= skb_unshare(skb
, GFP_ATOMIC
);
1106 return RX_HANDLER_CONSUMED
;
1108 pulled_sci
= pskb_may_pull(skb
, macsec_extra_len(true));
1110 if (!pskb_may_pull(skb
, macsec_extra_len(false)))
1114 hdr
= macsec_ethhdr(skb
);
1116 /* Frames with a SecTAG that has the TCI E bit set but the C
1117 * bit clear are discarded, as this reserved encoding is used
1118 * to identify frames with a SecTAG that are not to be
1119 * delivered to the Controlled Port.
1121 if ((hdr
->tci_an
& (MACSEC_TCI_C
| MACSEC_TCI_E
)) == MACSEC_TCI_E
)
1122 return RX_HANDLER_PASS
;
1124 /* now, pull the extra length */
1125 if (hdr
->tci_an
& MACSEC_TCI_SC
) {
1130 /* ethernet header is part of crypto processing */
1131 skb_push(skb
, ETH_HLEN
);
1133 macsec_skb_cb(skb
)->has_sci
= !!(hdr
->tci_an
& MACSEC_TCI_SC
);
1134 macsec_skb_cb(skb
)->assoc_num
= hdr
->tci_an
& MACSEC_AN_MASK
;
1135 sci
= macsec_frame_sci(hdr
, macsec_skb_cb(skb
)->has_sci
);
1138 rxd
= macsec_data_rcu(skb
->dev
);
1140 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1141 struct macsec_rx_sc
*sc
= find_rx_sc(&macsec
->secy
, sci
);
1142 sc
= sc
? macsec_rxsc_get(sc
) : NULL
;
1145 secy
= &macsec
->secy
;
1155 macsec
= macsec_priv(dev
);
1156 secy_stats
= this_cpu_ptr(macsec
->stats
);
1157 rxsc_stats
= this_cpu_ptr(rx_sc
->stats
);
1159 if (!macsec_validate_skb(skb
, secy
->icv_len
)) {
1160 u64_stats_update_begin(&secy_stats
->syncp
);
1161 secy_stats
->stats
.InPktsBadTag
++;
1162 u64_stats_update_end(&secy_stats
->syncp
);
1166 rx_sa
= macsec_rxsa_get(rx_sc
->sa
[macsec_skb_cb(skb
)->assoc_num
]);
1168 /* 10.6.1 if the SA is not in use */
1170 /* If validateFrames is Strict or the C bit in the
1171 * SecTAG is set, discard
1173 if (hdr
->tci_an
& MACSEC_TCI_C
||
1174 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
1175 u64_stats_update_begin(&rxsc_stats
->syncp
);
1176 rxsc_stats
->stats
.InPktsNotUsingSA
++;
1177 u64_stats_update_end(&rxsc_stats
->syncp
);
1181 /* not Strict, the frame (with the SecTAG and ICV
1182 * removed) is delivered to the Controlled Port.
1184 u64_stats_update_begin(&rxsc_stats
->syncp
);
1185 rxsc_stats
->stats
.InPktsUnusedSA
++;
1186 u64_stats_update_end(&rxsc_stats
->syncp
);
1190 /* First, PN check to avoid decrypting obviously wrong packets */
1191 pn
= ntohl(hdr
->packet_number
);
1192 if (secy
->replay_protect
) {
1195 spin_lock(&rx_sa
->lock
);
1196 late
= rx_sa
->next_pn
>= secy
->replay_window
&&
1197 pn
< (rx_sa
->next_pn
- secy
->replay_window
);
1198 spin_unlock(&rx_sa
->lock
);
1201 u64_stats_update_begin(&rxsc_stats
->syncp
);
1202 rxsc_stats
->stats
.InPktsLate
++;
1203 u64_stats_update_end(&rxsc_stats
->syncp
);
1208 macsec_skb_cb(skb
)->rx_sa
= rx_sa
;
1210 /* Disabled && !changed text => skip validation */
1211 if (hdr
->tci_an
& MACSEC_TCI_C
||
1212 secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
)
1213 skb
= macsec_decrypt(skb
, dev
, rx_sa
, sci
, secy
);
1216 /* the decrypt callback needs the reference */
1217 if (PTR_ERR(skb
) != -EINPROGRESS
) {
1218 macsec_rxsa_put(rx_sa
);
1219 macsec_rxsc_put(rx_sc
);
1223 return RX_HANDLER_CONSUMED
;
1226 if (!macsec_post_decrypt(skb
, secy
, pn
))
1230 macsec_finalize_skb(skb
, secy
->icv_len
,
1231 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1232 macsec_reset_skb(skb
, secy
->netdev
);
1235 macsec_rxsa_put(rx_sa
);
1236 macsec_rxsc_put(rx_sc
);
1239 ret
= gro_cells_receive(&macsec
->gro_cells
, skb
);
1240 if (ret
== NET_RX_SUCCESS
)
1241 count_rx(dev
, skb
->len
);
1243 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1248 return RX_HANDLER_CONSUMED
;
1251 macsec_rxsa_put(rx_sa
);
1253 macsec_rxsc_put(rx_sc
);
1258 return RX_HANDLER_CONSUMED
;
1261 /* 10.6.1 if the SC is not found */
1262 cbit
= !!(hdr
->tci_an
& MACSEC_TCI_C
);
1264 macsec_finalize_skb(skb
, DEFAULT_ICV_LEN
,
1265 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1267 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1268 struct sk_buff
*nskb
;
1270 secy_stats
= this_cpu_ptr(macsec
->stats
);
1272 /* If validateFrames is Strict or the C bit in the
1273 * SecTAG is set, discard
1276 macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1277 u64_stats_update_begin(&secy_stats
->syncp
);
1278 secy_stats
->stats
.InPktsNoSCI
++;
1279 u64_stats_update_end(&secy_stats
->syncp
);
1283 /* not strict, the frame (with the SecTAG and ICV
1284 * removed) is delivered to the Controlled Port.
1286 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1290 macsec_reset_skb(nskb
, macsec
->secy
.netdev
);
1292 ret
= netif_rx(nskb
);
1293 if (ret
== NET_RX_SUCCESS
) {
1294 u64_stats_update_begin(&secy_stats
->syncp
);
1295 secy_stats
->stats
.InPktsUnknownSCI
++;
1296 u64_stats_update_end(&secy_stats
->syncp
);
1298 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1304 return RX_HANDLER_PASS
;
1307 static struct crypto_aead
*macsec_alloc_tfm(char *key
, int key_len
, int icv_len
)
1309 struct crypto_aead
*tfm
;
1312 tfm
= crypto_alloc_aead("gcm(aes)", 0, 0);
1317 ret
= crypto_aead_setkey(tfm
, key
, key_len
);
1321 ret
= crypto_aead_setauthsize(tfm
, icv_len
);
1327 crypto_free_aead(tfm
);
1328 return ERR_PTR(ret
);
1331 static int init_rx_sa(struct macsec_rx_sa
*rx_sa
, char *sak
, int key_len
,
1334 rx_sa
->stats
= alloc_percpu(struct macsec_rx_sa_stats
);
1338 rx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1339 if (IS_ERR(rx_sa
->key
.tfm
)) {
1340 free_percpu(rx_sa
->stats
);
1341 return PTR_ERR(rx_sa
->key
.tfm
);
1344 rx_sa
->active
= false;
1346 refcount_set(&rx_sa
->refcnt
, 1);
1347 spin_lock_init(&rx_sa
->lock
);
1352 static void clear_rx_sa(struct macsec_rx_sa
*rx_sa
)
1354 rx_sa
->active
= false;
1356 macsec_rxsa_put(rx_sa
);
1359 static void free_rx_sc(struct macsec_rx_sc
*rx_sc
)
1363 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
1364 struct macsec_rx_sa
*sa
= rtnl_dereference(rx_sc
->sa
[i
]);
1366 RCU_INIT_POINTER(rx_sc
->sa
[i
], NULL
);
1371 macsec_rxsc_put(rx_sc
);
1374 static struct macsec_rx_sc
*del_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
1376 struct macsec_rx_sc
*rx_sc
, __rcu
**rx_scp
;
1378 for (rx_scp
= &secy
->rx_sc
, rx_sc
= rtnl_dereference(*rx_scp
);
1380 rx_scp
= &rx_sc
->next
, rx_sc
= rtnl_dereference(*rx_scp
)) {
1381 if (rx_sc
->sci
== sci
) {
1384 rcu_assign_pointer(*rx_scp
, rx_sc
->next
);
1392 static struct macsec_rx_sc
*create_rx_sc(struct net_device
*dev
, sci_t sci
)
1394 struct macsec_rx_sc
*rx_sc
;
1395 struct macsec_dev
*macsec
;
1396 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
1397 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
1398 struct macsec_secy
*secy
;
1400 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
1401 if (find_rx_sc_rtnl(&macsec
->secy
, sci
))
1402 return ERR_PTR(-EEXIST
);
1405 rx_sc
= kzalloc(sizeof(*rx_sc
), GFP_KERNEL
);
1407 return ERR_PTR(-ENOMEM
);
1409 rx_sc
->stats
= netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats
);
1410 if (!rx_sc
->stats
) {
1412 return ERR_PTR(-ENOMEM
);
1416 rx_sc
->active
= true;
1417 refcount_set(&rx_sc
->refcnt
, 1);
1419 secy
= &macsec_priv(dev
)->secy
;
1420 rcu_assign_pointer(rx_sc
->next
, secy
->rx_sc
);
1421 rcu_assign_pointer(secy
->rx_sc
, rx_sc
);
1429 static int init_tx_sa(struct macsec_tx_sa
*tx_sa
, char *sak
, int key_len
,
1432 tx_sa
->stats
= alloc_percpu(struct macsec_tx_sa_stats
);
1436 tx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1437 if (IS_ERR(tx_sa
->key
.tfm
)) {
1438 free_percpu(tx_sa
->stats
);
1439 return PTR_ERR(tx_sa
->key
.tfm
);
1442 tx_sa
->active
= false;
1443 refcount_set(&tx_sa
->refcnt
, 1);
1444 spin_lock_init(&tx_sa
->lock
);
1449 static void clear_tx_sa(struct macsec_tx_sa
*tx_sa
)
1451 tx_sa
->active
= false;
1453 macsec_txsa_put(tx_sa
);
1456 static struct genl_family macsec_fam
;
1458 static struct net_device
*get_dev_from_nl(struct net
*net
,
1459 struct nlattr
**attrs
)
1461 int ifindex
= nla_get_u32(attrs
[MACSEC_ATTR_IFINDEX
]);
1462 struct net_device
*dev
;
1464 dev
= __dev_get_by_index(net
, ifindex
);
1466 return ERR_PTR(-ENODEV
);
1468 if (!netif_is_macsec(dev
))
1469 return ERR_PTR(-ENODEV
);
1474 static sci_t
nla_get_sci(const struct nlattr
*nla
)
1476 return (__force sci_t
)nla_get_u64(nla
);
1479 static int nla_put_sci(struct sk_buff
*skb
, int attrtype
, sci_t value
,
1482 return nla_put_u64_64bit(skb
, attrtype
, (__force u64
)value
, padattr
);
1485 static struct macsec_tx_sa
*get_txsa_from_nl(struct net
*net
,
1486 struct nlattr
**attrs
,
1487 struct nlattr
**tb_sa
,
1488 struct net_device
**devp
,
1489 struct macsec_secy
**secyp
,
1490 struct macsec_tx_sc
**scp
,
1493 struct net_device
*dev
;
1494 struct macsec_secy
*secy
;
1495 struct macsec_tx_sc
*tx_sc
;
1496 struct macsec_tx_sa
*tx_sa
;
1498 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1499 return ERR_PTR(-EINVAL
);
1501 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1503 dev
= get_dev_from_nl(net
, attrs
);
1505 return ERR_CAST(dev
);
1507 if (*assoc_num
>= MACSEC_NUM_AN
)
1508 return ERR_PTR(-EINVAL
);
1510 secy
= &macsec_priv(dev
)->secy
;
1511 tx_sc
= &secy
->tx_sc
;
1513 tx_sa
= rtnl_dereference(tx_sc
->sa
[*assoc_num
]);
1515 return ERR_PTR(-ENODEV
);
1523 static struct macsec_rx_sc
*get_rxsc_from_nl(struct net
*net
,
1524 struct nlattr
**attrs
,
1525 struct nlattr
**tb_rxsc
,
1526 struct net_device
**devp
,
1527 struct macsec_secy
**secyp
)
1529 struct net_device
*dev
;
1530 struct macsec_secy
*secy
;
1531 struct macsec_rx_sc
*rx_sc
;
1534 dev
= get_dev_from_nl(net
, attrs
);
1536 return ERR_CAST(dev
);
1538 secy
= &macsec_priv(dev
)->secy
;
1540 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1541 return ERR_PTR(-EINVAL
);
1543 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1544 rx_sc
= find_rx_sc_rtnl(secy
, sci
);
1546 return ERR_PTR(-ENODEV
);
1554 static struct macsec_rx_sa
*get_rxsa_from_nl(struct net
*net
,
1555 struct nlattr
**attrs
,
1556 struct nlattr
**tb_rxsc
,
1557 struct nlattr
**tb_sa
,
1558 struct net_device
**devp
,
1559 struct macsec_secy
**secyp
,
1560 struct macsec_rx_sc
**scp
,
1563 struct macsec_rx_sc
*rx_sc
;
1564 struct macsec_rx_sa
*rx_sa
;
1566 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1567 return ERR_PTR(-EINVAL
);
1569 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1570 if (*assoc_num
>= MACSEC_NUM_AN
)
1571 return ERR_PTR(-EINVAL
);
1573 rx_sc
= get_rxsc_from_nl(net
, attrs
, tb_rxsc
, devp
, secyp
);
1575 return ERR_CAST(rx_sc
);
1577 rx_sa
= rtnl_dereference(rx_sc
->sa
[*assoc_num
]);
1579 return ERR_PTR(-ENODEV
);
1586 static const struct nla_policy macsec_genl_policy
[NUM_MACSEC_ATTR
] = {
1587 [MACSEC_ATTR_IFINDEX
] = { .type
= NLA_U32
},
1588 [MACSEC_ATTR_RXSC_CONFIG
] = { .type
= NLA_NESTED
},
1589 [MACSEC_ATTR_SA_CONFIG
] = { .type
= NLA_NESTED
},
1592 static const struct nla_policy macsec_genl_rxsc_policy
[NUM_MACSEC_RXSC_ATTR
] = {
1593 [MACSEC_RXSC_ATTR_SCI
] = { .type
= NLA_U64
},
1594 [MACSEC_RXSC_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1597 static const struct nla_policy macsec_genl_sa_policy
[NUM_MACSEC_SA_ATTR
] = {
1598 [MACSEC_SA_ATTR_AN
] = { .type
= NLA_U8
},
1599 [MACSEC_SA_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1600 [MACSEC_SA_ATTR_PN
] = { .type
= NLA_U32
},
1601 [MACSEC_SA_ATTR_KEYID
] = { .type
= NLA_BINARY
,
1602 .len
= MACSEC_KEYID_LEN
, },
1603 [MACSEC_SA_ATTR_KEY
] = { .type
= NLA_BINARY
,
1604 .len
= MACSEC_MAX_KEY_LEN
, },
1607 static int parse_sa_config(struct nlattr
**attrs
, struct nlattr
**tb_sa
)
1609 if (!attrs
[MACSEC_ATTR_SA_CONFIG
])
1612 if (nla_parse_nested(tb_sa
, MACSEC_SA_ATTR_MAX
,
1613 attrs
[MACSEC_ATTR_SA_CONFIG
],
1614 macsec_genl_sa_policy
, NULL
))
1620 static int parse_rxsc_config(struct nlattr
**attrs
, struct nlattr
**tb_rxsc
)
1622 if (!attrs
[MACSEC_ATTR_RXSC_CONFIG
])
1625 if (nla_parse_nested(tb_rxsc
, MACSEC_RXSC_ATTR_MAX
,
1626 attrs
[MACSEC_ATTR_RXSC_CONFIG
],
1627 macsec_genl_rxsc_policy
, NULL
))
1633 static bool validate_add_rxsa(struct nlattr
**attrs
)
1635 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1636 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1637 !attrs
[MACSEC_SA_ATTR_KEYID
])
1640 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1643 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1646 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1647 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1651 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1657 static int macsec_add_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1659 struct net_device
*dev
;
1660 struct nlattr
**attrs
= info
->attrs
;
1661 struct macsec_secy
*secy
;
1662 struct macsec_rx_sc
*rx_sc
;
1663 struct macsec_rx_sa
*rx_sa
;
1664 unsigned char assoc_num
;
1665 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1666 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1669 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1672 if (parse_sa_config(attrs
, tb_sa
))
1675 if (parse_rxsc_config(attrs
, tb_rxsc
))
1678 if (!validate_add_rxsa(tb_sa
))
1682 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
1683 if (IS_ERR(rx_sc
)) {
1685 return PTR_ERR(rx_sc
);
1688 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1690 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1691 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1692 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1697 rx_sa
= rtnl_dereference(rx_sc
->sa
[assoc_num
]);
1703 rx_sa
= kmalloc(sizeof(*rx_sa
), GFP_KERNEL
);
1709 err
= init_rx_sa(rx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1710 secy
->key_len
, secy
->icv_len
);
1717 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
1718 spin_lock_bh(&rx_sa
->lock
);
1719 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1720 spin_unlock_bh(&rx_sa
->lock
);
1723 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1724 rx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1726 nla_memcpy(rx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1728 rcu_assign_pointer(rx_sc
->sa
[assoc_num
], rx_sa
);
1735 static bool validate_add_rxsc(struct nlattr
**attrs
)
1737 if (!attrs
[MACSEC_RXSC_ATTR_SCI
])
1740 if (attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) {
1741 if (nla_get_u8(attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) > 1)
1748 static int macsec_add_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1750 struct net_device
*dev
;
1751 sci_t sci
= MACSEC_UNDEF_SCI
;
1752 struct nlattr
**attrs
= info
->attrs
;
1753 struct macsec_rx_sc
*rx_sc
;
1754 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1756 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1759 if (parse_rxsc_config(attrs
, tb_rxsc
))
1762 if (!validate_add_rxsc(tb_rxsc
))
1766 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1769 return PTR_ERR(dev
);
1772 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1774 rx_sc
= create_rx_sc(dev
, sci
);
1775 if (IS_ERR(rx_sc
)) {
1777 return PTR_ERR(rx_sc
);
1780 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
])
1781 rx_sc
->active
= !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
1788 static bool validate_add_txsa(struct nlattr
**attrs
)
1790 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1791 !attrs
[MACSEC_SA_ATTR_PN
] ||
1792 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1793 !attrs
[MACSEC_SA_ATTR_KEYID
])
1796 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1799 if (nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1802 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1803 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1807 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1813 static int macsec_add_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1815 struct net_device
*dev
;
1816 struct nlattr
**attrs
= info
->attrs
;
1817 struct macsec_secy
*secy
;
1818 struct macsec_tx_sc
*tx_sc
;
1819 struct macsec_tx_sa
*tx_sa
;
1820 unsigned char assoc_num
;
1821 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1824 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1827 if (parse_sa_config(attrs
, tb_sa
))
1830 if (!validate_add_txsa(tb_sa
))
1834 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1837 return PTR_ERR(dev
);
1840 secy
= &macsec_priv(dev
)->secy
;
1841 tx_sc
= &secy
->tx_sc
;
1843 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1845 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1846 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1847 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1852 tx_sa
= rtnl_dereference(tx_sc
->sa
[assoc_num
]);
1858 tx_sa
= kmalloc(sizeof(*tx_sa
), GFP_KERNEL
);
1864 err
= init_tx_sa(tx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1865 secy
->key_len
, secy
->icv_len
);
1872 nla_memcpy(tx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1874 spin_lock_bh(&tx_sa
->lock
);
1875 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1876 spin_unlock_bh(&tx_sa
->lock
);
1878 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1879 tx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1881 if (assoc_num
== tx_sc
->encoding_sa
&& tx_sa
->active
)
1882 secy
->operational
= true;
1884 rcu_assign_pointer(tx_sc
->sa
[assoc_num
], tx_sa
);
1891 static int macsec_del_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1893 struct nlattr
**attrs
= info
->attrs
;
1894 struct net_device
*dev
;
1895 struct macsec_secy
*secy
;
1896 struct macsec_rx_sc
*rx_sc
;
1897 struct macsec_rx_sa
*rx_sa
;
1899 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1900 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1902 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1905 if (parse_sa_config(attrs
, tb_sa
))
1908 if (parse_rxsc_config(attrs
, tb_rxsc
))
1912 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
1913 &dev
, &secy
, &rx_sc
, &assoc_num
);
1914 if (IS_ERR(rx_sa
)) {
1916 return PTR_ERR(rx_sa
);
1919 if (rx_sa
->active
) {
1924 RCU_INIT_POINTER(rx_sc
->sa
[assoc_num
], NULL
);
1932 static int macsec_del_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1934 struct nlattr
**attrs
= info
->attrs
;
1935 struct net_device
*dev
;
1936 struct macsec_secy
*secy
;
1937 struct macsec_rx_sc
*rx_sc
;
1939 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1941 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1944 if (parse_rxsc_config(attrs
, tb_rxsc
))
1947 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1951 dev
= get_dev_from_nl(genl_info_net(info
), info
->attrs
);
1954 return PTR_ERR(dev
);
1957 secy
= &macsec_priv(dev
)->secy
;
1958 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1960 rx_sc
= del_rx_sc(secy
, sci
);
1972 static int macsec_del_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1974 struct nlattr
**attrs
= info
->attrs
;
1975 struct net_device
*dev
;
1976 struct macsec_secy
*secy
;
1977 struct macsec_tx_sc
*tx_sc
;
1978 struct macsec_tx_sa
*tx_sa
;
1980 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1982 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1985 if (parse_sa_config(attrs
, tb_sa
))
1989 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
1990 &dev
, &secy
, &tx_sc
, &assoc_num
);
1991 if (IS_ERR(tx_sa
)) {
1993 return PTR_ERR(tx_sa
);
1996 if (tx_sa
->active
) {
2001 RCU_INIT_POINTER(tx_sc
->sa
[assoc_num
], NULL
);
2009 static bool validate_upd_sa(struct nlattr
**attrs
)
2011 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
2012 attrs
[MACSEC_SA_ATTR_KEY
] ||
2013 attrs
[MACSEC_SA_ATTR_KEYID
])
2016 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
2019 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
2022 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
2023 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
2030 static int macsec_upd_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
2032 struct nlattr
**attrs
= info
->attrs
;
2033 struct net_device
*dev
;
2034 struct macsec_secy
*secy
;
2035 struct macsec_tx_sc
*tx_sc
;
2036 struct macsec_tx_sa
*tx_sa
;
2038 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2040 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2043 if (parse_sa_config(attrs
, tb_sa
))
2046 if (!validate_upd_sa(tb_sa
))
2050 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
2051 &dev
, &secy
, &tx_sc
, &assoc_num
);
2052 if (IS_ERR(tx_sa
)) {
2054 return PTR_ERR(tx_sa
);
2057 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2058 spin_lock_bh(&tx_sa
->lock
);
2059 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2060 spin_unlock_bh(&tx_sa
->lock
);
2063 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2064 tx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2066 if (assoc_num
== tx_sc
->encoding_sa
)
2067 secy
->operational
= tx_sa
->active
;
2074 static int macsec_upd_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
2076 struct nlattr
**attrs
= info
->attrs
;
2077 struct net_device
*dev
;
2078 struct macsec_secy
*secy
;
2079 struct macsec_rx_sc
*rx_sc
;
2080 struct macsec_rx_sa
*rx_sa
;
2082 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2083 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2085 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2088 if (parse_rxsc_config(attrs
, tb_rxsc
))
2091 if (parse_sa_config(attrs
, tb_sa
))
2094 if (!validate_upd_sa(tb_sa
))
2098 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
2099 &dev
, &secy
, &rx_sc
, &assoc_num
);
2100 if (IS_ERR(rx_sa
)) {
2102 return PTR_ERR(rx_sa
);
2105 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2106 spin_lock_bh(&rx_sa
->lock
);
2107 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2108 spin_unlock_bh(&rx_sa
->lock
);
2111 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2112 rx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2118 static int macsec_upd_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
2120 struct nlattr
**attrs
= info
->attrs
;
2121 struct net_device
*dev
;
2122 struct macsec_secy
*secy
;
2123 struct macsec_rx_sc
*rx_sc
;
2124 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2126 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2129 if (parse_rxsc_config(attrs
, tb_rxsc
))
2132 if (!validate_add_rxsc(tb_rxsc
))
2136 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
2137 if (IS_ERR(rx_sc
)) {
2139 return PTR_ERR(rx_sc
);
2142 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]) {
2143 bool new = !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
2145 if (rx_sc
->active
!= new)
2146 secy
->n_rx_sc
+= new ? 1 : -1;
2148 rx_sc
->active
= new;
2156 static int copy_tx_sa_stats(struct sk_buff
*skb
,
2157 struct macsec_tx_sa_stats __percpu
*pstats
)
2159 struct macsec_tx_sa_stats sum
= {0, };
2162 for_each_possible_cpu(cpu
) {
2163 const struct macsec_tx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2165 sum
.OutPktsProtected
+= stats
->OutPktsProtected
;
2166 sum
.OutPktsEncrypted
+= stats
->OutPktsEncrypted
;
2169 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED
, sum
.OutPktsProtected
) ||
2170 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED
, sum
.OutPktsEncrypted
))
2176 static int copy_rx_sa_stats(struct sk_buff
*skb
,
2177 struct macsec_rx_sa_stats __percpu
*pstats
)
2179 struct macsec_rx_sa_stats sum
= {0, };
2182 for_each_possible_cpu(cpu
) {
2183 const struct macsec_rx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2185 sum
.InPktsOK
+= stats
->InPktsOK
;
2186 sum
.InPktsInvalid
+= stats
->InPktsInvalid
;
2187 sum
.InPktsNotValid
+= stats
->InPktsNotValid
;
2188 sum
.InPktsNotUsingSA
+= stats
->InPktsNotUsingSA
;
2189 sum
.InPktsUnusedSA
+= stats
->InPktsUnusedSA
;
2192 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_OK
, sum
.InPktsOK
) ||
2193 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID
, sum
.InPktsInvalid
) ||
2194 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID
, sum
.InPktsNotValid
) ||
2195 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA
, sum
.InPktsNotUsingSA
) ||
2196 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA
, sum
.InPktsUnusedSA
))
2202 static int copy_rx_sc_stats(struct sk_buff
*skb
,
2203 struct pcpu_rx_sc_stats __percpu
*pstats
)
2205 struct macsec_rx_sc_stats sum
= {0, };
2208 for_each_possible_cpu(cpu
) {
2209 const struct pcpu_rx_sc_stats
*stats
;
2210 struct macsec_rx_sc_stats tmp
;
2213 stats
= per_cpu_ptr(pstats
, cpu
);
2215 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2216 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2217 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2219 sum
.InOctetsValidated
+= tmp
.InOctetsValidated
;
2220 sum
.InOctetsDecrypted
+= tmp
.InOctetsDecrypted
;
2221 sum
.InPktsUnchecked
+= tmp
.InPktsUnchecked
;
2222 sum
.InPktsDelayed
+= tmp
.InPktsDelayed
;
2223 sum
.InPktsOK
+= tmp
.InPktsOK
;
2224 sum
.InPktsInvalid
+= tmp
.InPktsInvalid
;
2225 sum
.InPktsLate
+= tmp
.InPktsLate
;
2226 sum
.InPktsNotValid
+= tmp
.InPktsNotValid
;
2227 sum
.InPktsNotUsingSA
+= tmp
.InPktsNotUsingSA
;
2228 sum
.InPktsUnusedSA
+= tmp
.InPktsUnusedSA
;
2231 if (nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED
,
2232 sum
.InOctetsValidated
,
2233 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2234 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED
,
2235 sum
.InOctetsDecrypted
,
2236 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2237 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED
,
2238 sum
.InPktsUnchecked
,
2239 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2240 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED
,
2242 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2243 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK
,
2245 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2246 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID
,
2248 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2249 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE
,
2251 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2252 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID
,
2254 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2255 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA
,
2256 sum
.InPktsNotUsingSA
,
2257 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2258 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA
,
2260 MACSEC_RXSC_STATS_ATTR_PAD
))
2266 static int copy_tx_sc_stats(struct sk_buff
*skb
,
2267 struct pcpu_tx_sc_stats __percpu
*pstats
)
2269 struct macsec_tx_sc_stats sum
= {0, };
2272 for_each_possible_cpu(cpu
) {
2273 const struct pcpu_tx_sc_stats
*stats
;
2274 struct macsec_tx_sc_stats tmp
;
2277 stats
= per_cpu_ptr(pstats
, cpu
);
2279 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2280 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2281 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2283 sum
.OutPktsProtected
+= tmp
.OutPktsProtected
;
2284 sum
.OutPktsEncrypted
+= tmp
.OutPktsEncrypted
;
2285 sum
.OutOctetsProtected
+= tmp
.OutOctetsProtected
;
2286 sum
.OutOctetsEncrypted
+= tmp
.OutOctetsEncrypted
;
2289 if (nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED
,
2290 sum
.OutPktsProtected
,
2291 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2292 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED
,
2293 sum
.OutPktsEncrypted
,
2294 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2295 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED
,
2296 sum
.OutOctetsProtected
,
2297 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2298 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED
,
2299 sum
.OutOctetsEncrypted
,
2300 MACSEC_TXSC_STATS_ATTR_PAD
))
2306 static int copy_secy_stats(struct sk_buff
*skb
,
2307 struct pcpu_secy_stats __percpu
*pstats
)
2309 struct macsec_dev_stats sum
= {0, };
2312 for_each_possible_cpu(cpu
) {
2313 const struct pcpu_secy_stats
*stats
;
2314 struct macsec_dev_stats tmp
;
2317 stats
= per_cpu_ptr(pstats
, cpu
);
2319 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2320 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2321 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2323 sum
.OutPktsUntagged
+= tmp
.OutPktsUntagged
;
2324 sum
.InPktsUntagged
+= tmp
.InPktsUntagged
;
2325 sum
.OutPktsTooLong
+= tmp
.OutPktsTooLong
;
2326 sum
.InPktsNoTag
+= tmp
.InPktsNoTag
;
2327 sum
.InPktsBadTag
+= tmp
.InPktsBadTag
;
2328 sum
.InPktsUnknownSCI
+= tmp
.InPktsUnknownSCI
;
2329 sum
.InPktsNoSCI
+= tmp
.InPktsNoSCI
;
2330 sum
.InPktsOverrun
+= tmp
.InPktsOverrun
;
2333 if (nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED
,
2334 sum
.OutPktsUntagged
,
2335 MACSEC_SECY_STATS_ATTR_PAD
) ||
2336 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED
,
2338 MACSEC_SECY_STATS_ATTR_PAD
) ||
2339 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG
,
2341 MACSEC_SECY_STATS_ATTR_PAD
) ||
2342 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG
,
2344 MACSEC_SECY_STATS_ATTR_PAD
) ||
2345 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG
,
2347 MACSEC_SECY_STATS_ATTR_PAD
) ||
2348 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI
,
2349 sum
.InPktsUnknownSCI
,
2350 MACSEC_SECY_STATS_ATTR_PAD
) ||
2351 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI
,
2353 MACSEC_SECY_STATS_ATTR_PAD
) ||
2354 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN
,
2356 MACSEC_SECY_STATS_ATTR_PAD
))
2362 static int nla_put_secy(struct macsec_secy
*secy
, struct sk_buff
*skb
)
2364 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2365 struct nlattr
*secy_nest
= nla_nest_start(skb
, MACSEC_ATTR_SECY
);
2370 if (nla_put_sci(skb
, MACSEC_SECY_ATTR_SCI
, secy
->sci
,
2371 MACSEC_SECY_ATTR_PAD
) ||
2372 nla_put_u64_64bit(skb
, MACSEC_SECY_ATTR_CIPHER_SUITE
,
2373 MACSEC_DEFAULT_CIPHER_ID
,
2374 MACSEC_SECY_ATTR_PAD
) ||
2375 nla_put_u8(skb
, MACSEC_SECY_ATTR_ICV_LEN
, secy
->icv_len
) ||
2376 nla_put_u8(skb
, MACSEC_SECY_ATTR_OPER
, secy
->operational
) ||
2377 nla_put_u8(skb
, MACSEC_SECY_ATTR_PROTECT
, secy
->protect_frames
) ||
2378 nla_put_u8(skb
, MACSEC_SECY_ATTR_REPLAY
, secy
->replay_protect
) ||
2379 nla_put_u8(skb
, MACSEC_SECY_ATTR_VALIDATE
, secy
->validate_frames
) ||
2380 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCRYPT
, tx_sc
->encrypt
) ||
2381 nla_put_u8(skb
, MACSEC_SECY_ATTR_INC_SCI
, tx_sc
->send_sci
) ||
2382 nla_put_u8(skb
, MACSEC_SECY_ATTR_ES
, tx_sc
->end_station
) ||
2383 nla_put_u8(skb
, MACSEC_SECY_ATTR_SCB
, tx_sc
->scb
) ||
2384 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCODING_SA
, tx_sc
->encoding_sa
))
2387 if (secy
->replay_protect
) {
2388 if (nla_put_u32(skb
, MACSEC_SECY_ATTR_WINDOW
, secy
->replay_window
))
2392 nla_nest_end(skb
, secy_nest
);
2396 nla_nest_cancel(skb
, secy_nest
);
2400 static int dump_secy(struct macsec_secy
*secy
, struct net_device
*dev
,
2401 struct sk_buff
*skb
, struct netlink_callback
*cb
)
2403 struct macsec_rx_sc
*rx_sc
;
2404 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2405 struct nlattr
*txsa_list
, *rxsc_list
;
2408 struct nlattr
*attr
;
2410 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
2411 &macsec_fam
, NLM_F_MULTI
, MACSEC_CMD_GET_TXSC
);
2415 genl_dump_check_consistent(cb
, hdr
);
2417 if (nla_put_u32(skb
, MACSEC_ATTR_IFINDEX
, dev
->ifindex
))
2418 goto nla_put_failure
;
2420 if (nla_put_secy(secy
, skb
))
2421 goto nla_put_failure
;
2423 attr
= nla_nest_start(skb
, MACSEC_ATTR_TXSC_STATS
);
2425 goto nla_put_failure
;
2426 if (copy_tx_sc_stats(skb
, tx_sc
->stats
)) {
2427 nla_nest_cancel(skb
, attr
);
2428 goto nla_put_failure
;
2430 nla_nest_end(skb
, attr
);
2432 attr
= nla_nest_start(skb
, MACSEC_ATTR_SECY_STATS
);
2434 goto nla_put_failure
;
2435 if (copy_secy_stats(skb
, macsec_priv(dev
)->stats
)) {
2436 nla_nest_cancel(skb
, attr
);
2437 goto nla_put_failure
;
2439 nla_nest_end(skb
, attr
);
2441 txsa_list
= nla_nest_start(skb
, MACSEC_ATTR_TXSA_LIST
);
2443 goto nla_put_failure
;
2444 for (i
= 0, j
= 1; i
< MACSEC_NUM_AN
; i
++) {
2445 struct macsec_tx_sa
*tx_sa
= rtnl_dereference(tx_sc
->sa
[i
]);
2446 struct nlattr
*txsa_nest
;
2451 txsa_nest
= nla_nest_start(skb
, j
++);
2453 nla_nest_cancel(skb
, txsa_list
);
2454 goto nla_put_failure
;
2457 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2458 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, tx_sa
->next_pn
) ||
2459 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, tx_sa
->key
.id
) ||
2460 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, tx_sa
->active
)) {
2461 nla_nest_cancel(skb
, txsa_nest
);
2462 nla_nest_cancel(skb
, txsa_list
);
2463 goto nla_put_failure
;
2466 attr
= nla_nest_start(skb
, MACSEC_SA_ATTR_STATS
);
2468 nla_nest_cancel(skb
, txsa_nest
);
2469 nla_nest_cancel(skb
, txsa_list
);
2470 goto nla_put_failure
;
2472 if (copy_tx_sa_stats(skb
, tx_sa
->stats
)) {
2473 nla_nest_cancel(skb
, attr
);
2474 nla_nest_cancel(skb
, txsa_nest
);
2475 nla_nest_cancel(skb
, txsa_list
);
2476 goto nla_put_failure
;
2478 nla_nest_end(skb
, attr
);
2480 nla_nest_end(skb
, txsa_nest
);
2482 nla_nest_end(skb
, txsa_list
);
2484 rxsc_list
= nla_nest_start(skb
, MACSEC_ATTR_RXSC_LIST
);
2486 goto nla_put_failure
;
2489 for_each_rxsc_rtnl(secy
, rx_sc
) {
2491 struct nlattr
*rxsa_list
;
2492 struct nlattr
*rxsc_nest
= nla_nest_start(skb
, j
++);
2495 nla_nest_cancel(skb
, rxsc_list
);
2496 goto nla_put_failure
;
2499 if (nla_put_u8(skb
, MACSEC_RXSC_ATTR_ACTIVE
, rx_sc
->active
) ||
2500 nla_put_sci(skb
, MACSEC_RXSC_ATTR_SCI
, rx_sc
->sci
,
2501 MACSEC_RXSC_ATTR_PAD
)) {
2502 nla_nest_cancel(skb
, rxsc_nest
);
2503 nla_nest_cancel(skb
, rxsc_list
);
2504 goto nla_put_failure
;
2507 attr
= nla_nest_start(skb
, MACSEC_RXSC_ATTR_STATS
);
2509 nla_nest_cancel(skb
, rxsc_nest
);
2510 nla_nest_cancel(skb
, rxsc_list
);
2511 goto nla_put_failure
;
2513 if (copy_rx_sc_stats(skb
, rx_sc
->stats
)) {
2514 nla_nest_cancel(skb
, attr
);
2515 nla_nest_cancel(skb
, rxsc_nest
);
2516 nla_nest_cancel(skb
, rxsc_list
);
2517 goto nla_put_failure
;
2519 nla_nest_end(skb
, attr
);
2521 rxsa_list
= nla_nest_start(skb
, MACSEC_RXSC_ATTR_SA_LIST
);
2523 nla_nest_cancel(skb
, rxsc_nest
);
2524 nla_nest_cancel(skb
, rxsc_list
);
2525 goto nla_put_failure
;
2528 for (i
= 0, k
= 1; i
< MACSEC_NUM_AN
; i
++) {
2529 struct macsec_rx_sa
*rx_sa
= rtnl_dereference(rx_sc
->sa
[i
]);
2530 struct nlattr
*rxsa_nest
;
2535 rxsa_nest
= nla_nest_start(skb
, k
++);
2537 nla_nest_cancel(skb
, rxsa_list
);
2538 nla_nest_cancel(skb
, rxsc_nest
);
2539 nla_nest_cancel(skb
, rxsc_list
);
2540 goto nla_put_failure
;
2543 attr
= nla_nest_start(skb
, MACSEC_SA_ATTR_STATS
);
2545 nla_nest_cancel(skb
, rxsa_list
);
2546 nla_nest_cancel(skb
, rxsc_nest
);
2547 nla_nest_cancel(skb
, rxsc_list
);
2548 goto nla_put_failure
;
2550 if (copy_rx_sa_stats(skb
, rx_sa
->stats
)) {
2551 nla_nest_cancel(skb
, attr
);
2552 nla_nest_cancel(skb
, rxsa_list
);
2553 nla_nest_cancel(skb
, rxsc_nest
);
2554 nla_nest_cancel(skb
, rxsc_list
);
2555 goto nla_put_failure
;
2557 nla_nest_end(skb
, attr
);
2559 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2560 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, rx_sa
->next_pn
) ||
2561 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, rx_sa
->key
.id
) ||
2562 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, rx_sa
->active
)) {
2563 nla_nest_cancel(skb
, rxsa_nest
);
2564 nla_nest_cancel(skb
, rxsc_nest
);
2565 nla_nest_cancel(skb
, rxsc_list
);
2566 goto nla_put_failure
;
2568 nla_nest_end(skb
, rxsa_nest
);
2571 nla_nest_end(skb
, rxsa_list
);
2572 nla_nest_end(skb
, rxsc_nest
);
2575 nla_nest_end(skb
, rxsc_list
);
2577 genlmsg_end(skb
, hdr
);
2582 genlmsg_cancel(skb
, hdr
);
2586 static int macsec_generation
= 1; /* protected by RTNL */
2588 static int macsec_dump_txsc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2590 struct net
*net
= sock_net(skb
->sk
);
2591 struct net_device
*dev
;
2594 dev_idx
= cb
->args
[0];
2599 cb
->seq
= macsec_generation
;
2601 for_each_netdev(net
, dev
) {
2602 struct macsec_secy
*secy
;
2607 if (!netif_is_macsec(dev
))
2610 secy
= &macsec_priv(dev
)->secy
;
2611 if (dump_secy(secy
, dev
, skb
, cb
) < 0)
2623 static const struct genl_ops macsec_genl_ops
[] = {
2625 .cmd
= MACSEC_CMD_GET_TXSC
,
2626 .dumpit
= macsec_dump_txsc
,
2627 .policy
= macsec_genl_policy
,
2630 .cmd
= MACSEC_CMD_ADD_RXSC
,
2631 .doit
= macsec_add_rxsc
,
2632 .policy
= macsec_genl_policy
,
2633 .flags
= GENL_ADMIN_PERM
,
2636 .cmd
= MACSEC_CMD_DEL_RXSC
,
2637 .doit
= macsec_del_rxsc
,
2638 .policy
= macsec_genl_policy
,
2639 .flags
= GENL_ADMIN_PERM
,
2642 .cmd
= MACSEC_CMD_UPD_RXSC
,
2643 .doit
= macsec_upd_rxsc
,
2644 .policy
= macsec_genl_policy
,
2645 .flags
= GENL_ADMIN_PERM
,
2648 .cmd
= MACSEC_CMD_ADD_TXSA
,
2649 .doit
= macsec_add_txsa
,
2650 .policy
= macsec_genl_policy
,
2651 .flags
= GENL_ADMIN_PERM
,
2654 .cmd
= MACSEC_CMD_DEL_TXSA
,
2655 .doit
= macsec_del_txsa
,
2656 .policy
= macsec_genl_policy
,
2657 .flags
= GENL_ADMIN_PERM
,
2660 .cmd
= MACSEC_CMD_UPD_TXSA
,
2661 .doit
= macsec_upd_txsa
,
2662 .policy
= macsec_genl_policy
,
2663 .flags
= GENL_ADMIN_PERM
,
2666 .cmd
= MACSEC_CMD_ADD_RXSA
,
2667 .doit
= macsec_add_rxsa
,
2668 .policy
= macsec_genl_policy
,
2669 .flags
= GENL_ADMIN_PERM
,
2672 .cmd
= MACSEC_CMD_DEL_RXSA
,
2673 .doit
= macsec_del_rxsa
,
2674 .policy
= macsec_genl_policy
,
2675 .flags
= GENL_ADMIN_PERM
,
2678 .cmd
= MACSEC_CMD_UPD_RXSA
,
2679 .doit
= macsec_upd_rxsa
,
2680 .policy
= macsec_genl_policy
,
2681 .flags
= GENL_ADMIN_PERM
,
2685 static struct genl_family macsec_fam __ro_after_init
= {
2686 .name
= MACSEC_GENL_NAME
,
2688 .version
= MACSEC_GENL_VERSION
,
2689 .maxattr
= MACSEC_ATTR_MAX
,
2691 .module
= THIS_MODULE
,
2692 .ops
= macsec_genl_ops
,
2693 .n_ops
= ARRAY_SIZE(macsec_genl_ops
),
2696 static netdev_tx_t
macsec_start_xmit(struct sk_buff
*skb
,
2697 struct net_device
*dev
)
2699 struct macsec_dev
*macsec
= netdev_priv(dev
);
2700 struct macsec_secy
*secy
= &macsec
->secy
;
2701 struct pcpu_secy_stats
*secy_stats
;
2705 if (!secy
->protect_frames
) {
2706 secy_stats
= this_cpu_ptr(macsec
->stats
);
2707 u64_stats_update_begin(&secy_stats
->syncp
);
2708 secy_stats
->stats
.OutPktsUntagged
++;
2709 u64_stats_update_end(&secy_stats
->syncp
);
2710 skb
->dev
= macsec
->real_dev
;
2712 ret
= dev_queue_xmit(skb
);
2713 count_tx(dev
, ret
, len
);
2717 if (!secy
->operational
) {
2719 dev
->stats
.tx_dropped
++;
2720 return NETDEV_TX_OK
;
2723 skb
= macsec_encrypt(skb
, dev
);
2725 if (PTR_ERR(skb
) != -EINPROGRESS
)
2726 dev
->stats
.tx_dropped
++;
2727 return NETDEV_TX_OK
;
2730 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
2732 macsec_encrypt_finish(skb
, dev
);
2734 ret
= dev_queue_xmit(skb
);
2735 count_tx(dev
, ret
, len
);
2739 #define MACSEC_FEATURES \
2740 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2741 static struct lock_class_key macsec_netdev_addr_lock_key
;
2743 static int macsec_dev_init(struct net_device
*dev
)
2745 struct macsec_dev
*macsec
= macsec_priv(dev
);
2746 struct net_device
*real_dev
= macsec
->real_dev
;
2749 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
2753 err
= gro_cells_init(&macsec
->gro_cells
, dev
);
2755 free_percpu(dev
->tstats
);
2759 dev
->features
= real_dev
->features
& MACSEC_FEATURES
;
2760 dev
->features
|= NETIF_F_LLTX
| NETIF_F_GSO_SOFTWARE
;
2762 dev
->needed_headroom
= real_dev
->needed_headroom
+
2763 MACSEC_NEEDED_HEADROOM
;
2764 dev
->needed_tailroom
= real_dev
->needed_tailroom
+
2765 MACSEC_NEEDED_TAILROOM
;
2767 if (is_zero_ether_addr(dev
->dev_addr
))
2768 eth_hw_addr_inherit(dev
, real_dev
);
2769 if (is_zero_ether_addr(dev
->broadcast
))
2770 memcpy(dev
->broadcast
, real_dev
->broadcast
, dev
->addr_len
);
2775 static void macsec_dev_uninit(struct net_device
*dev
)
2777 struct macsec_dev
*macsec
= macsec_priv(dev
);
2779 gro_cells_destroy(&macsec
->gro_cells
);
2780 free_percpu(dev
->tstats
);
2783 static netdev_features_t
macsec_fix_features(struct net_device
*dev
,
2784 netdev_features_t features
)
2786 struct macsec_dev
*macsec
= macsec_priv(dev
);
2787 struct net_device
*real_dev
= macsec
->real_dev
;
2789 features
&= (real_dev
->features
& MACSEC_FEATURES
) |
2790 NETIF_F_GSO_SOFTWARE
| NETIF_F_SOFT_FEATURES
;
2791 features
|= NETIF_F_LLTX
;
2796 static int macsec_dev_open(struct net_device
*dev
)
2798 struct macsec_dev
*macsec
= macsec_priv(dev
);
2799 struct net_device
*real_dev
= macsec
->real_dev
;
2802 if (!(real_dev
->flags
& IFF_UP
))
2805 err
= dev_uc_add(real_dev
, dev
->dev_addr
);
2809 if (dev
->flags
& IFF_ALLMULTI
) {
2810 err
= dev_set_allmulti(real_dev
, 1);
2815 if (dev
->flags
& IFF_PROMISC
) {
2816 err
= dev_set_promiscuity(real_dev
, 1);
2818 goto clear_allmulti
;
2821 if (netif_carrier_ok(real_dev
))
2822 netif_carrier_on(dev
);
2826 if (dev
->flags
& IFF_ALLMULTI
)
2827 dev_set_allmulti(real_dev
, -1);
2829 dev_uc_del(real_dev
, dev
->dev_addr
);
2830 netif_carrier_off(dev
);
2834 static int macsec_dev_stop(struct net_device
*dev
)
2836 struct macsec_dev
*macsec
= macsec_priv(dev
);
2837 struct net_device
*real_dev
= macsec
->real_dev
;
2839 netif_carrier_off(dev
);
2841 dev_mc_unsync(real_dev
, dev
);
2842 dev_uc_unsync(real_dev
, dev
);
2844 if (dev
->flags
& IFF_ALLMULTI
)
2845 dev_set_allmulti(real_dev
, -1);
2847 if (dev
->flags
& IFF_PROMISC
)
2848 dev_set_promiscuity(real_dev
, -1);
2850 dev_uc_del(real_dev
, dev
->dev_addr
);
2855 static void macsec_dev_change_rx_flags(struct net_device
*dev
, int change
)
2857 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
2859 if (!(dev
->flags
& IFF_UP
))
2862 if (change
& IFF_ALLMULTI
)
2863 dev_set_allmulti(real_dev
, dev
->flags
& IFF_ALLMULTI
? 1 : -1);
2865 if (change
& IFF_PROMISC
)
2866 dev_set_promiscuity(real_dev
,
2867 dev
->flags
& IFF_PROMISC
? 1 : -1);
2870 static void macsec_dev_set_rx_mode(struct net_device
*dev
)
2872 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
2874 dev_mc_sync(real_dev
, dev
);
2875 dev_uc_sync(real_dev
, dev
);
2878 static int macsec_set_mac_address(struct net_device
*dev
, void *p
)
2880 struct macsec_dev
*macsec
= macsec_priv(dev
);
2881 struct net_device
*real_dev
= macsec
->real_dev
;
2882 struct sockaddr
*addr
= p
;
2885 if (!is_valid_ether_addr(addr
->sa_data
))
2886 return -EADDRNOTAVAIL
;
2888 if (!(dev
->flags
& IFF_UP
))
2891 err
= dev_uc_add(real_dev
, addr
->sa_data
);
2895 dev_uc_del(real_dev
, dev
->dev_addr
);
2898 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
2902 static int macsec_change_mtu(struct net_device
*dev
, int new_mtu
)
2904 struct macsec_dev
*macsec
= macsec_priv(dev
);
2905 unsigned int extra
= macsec
->secy
.icv_len
+ macsec_extra_len(true);
2907 if (macsec
->real_dev
->mtu
- extra
< new_mtu
)
2915 static void macsec_get_stats64(struct net_device
*dev
,
2916 struct rtnl_link_stats64
*s
)
2923 for_each_possible_cpu(cpu
) {
2924 struct pcpu_sw_netstats
*stats
;
2925 struct pcpu_sw_netstats tmp
;
2928 stats
= per_cpu_ptr(dev
->tstats
, cpu
);
2930 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2931 tmp
.rx_packets
= stats
->rx_packets
;
2932 tmp
.rx_bytes
= stats
->rx_bytes
;
2933 tmp
.tx_packets
= stats
->tx_packets
;
2934 tmp
.tx_bytes
= stats
->tx_bytes
;
2935 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2937 s
->rx_packets
+= tmp
.rx_packets
;
2938 s
->rx_bytes
+= tmp
.rx_bytes
;
2939 s
->tx_packets
+= tmp
.tx_packets
;
2940 s
->tx_bytes
+= tmp
.tx_bytes
;
2943 s
->rx_dropped
= dev
->stats
.rx_dropped
;
2944 s
->tx_dropped
= dev
->stats
.tx_dropped
;
2947 static int macsec_get_iflink(const struct net_device
*dev
)
2949 return macsec_priv(dev
)->real_dev
->ifindex
;
2953 static int macsec_get_nest_level(struct net_device
*dev
)
2955 return macsec_priv(dev
)->nest_level
;
2959 static const struct net_device_ops macsec_netdev_ops
= {
2960 .ndo_init
= macsec_dev_init
,
2961 .ndo_uninit
= macsec_dev_uninit
,
2962 .ndo_open
= macsec_dev_open
,
2963 .ndo_stop
= macsec_dev_stop
,
2964 .ndo_fix_features
= macsec_fix_features
,
2965 .ndo_change_mtu
= macsec_change_mtu
,
2966 .ndo_set_rx_mode
= macsec_dev_set_rx_mode
,
2967 .ndo_change_rx_flags
= macsec_dev_change_rx_flags
,
2968 .ndo_set_mac_address
= macsec_set_mac_address
,
2969 .ndo_start_xmit
= macsec_start_xmit
,
2970 .ndo_get_stats64
= macsec_get_stats64
,
2971 .ndo_get_iflink
= macsec_get_iflink
,
2972 .ndo_get_lock_subclass
= macsec_get_nest_level
,
2975 static const struct device_type macsec_type
= {
2979 static const struct nla_policy macsec_rtnl_policy
[IFLA_MACSEC_MAX
+ 1] = {
2980 [IFLA_MACSEC_SCI
] = { .type
= NLA_U64
},
2981 [IFLA_MACSEC_ICV_LEN
] = { .type
= NLA_U8
},
2982 [IFLA_MACSEC_CIPHER_SUITE
] = { .type
= NLA_U64
},
2983 [IFLA_MACSEC_WINDOW
] = { .type
= NLA_U32
},
2984 [IFLA_MACSEC_ENCODING_SA
] = { .type
= NLA_U8
},
2985 [IFLA_MACSEC_ENCRYPT
] = { .type
= NLA_U8
},
2986 [IFLA_MACSEC_PROTECT
] = { .type
= NLA_U8
},
2987 [IFLA_MACSEC_INC_SCI
] = { .type
= NLA_U8
},
2988 [IFLA_MACSEC_ES
] = { .type
= NLA_U8
},
2989 [IFLA_MACSEC_SCB
] = { .type
= NLA_U8
},
2990 [IFLA_MACSEC_REPLAY_PROTECT
] = { .type
= NLA_U8
},
2991 [IFLA_MACSEC_VALIDATION
] = { .type
= NLA_U8
},
2994 static void macsec_free_netdev(struct net_device
*dev
)
2996 struct macsec_dev
*macsec
= macsec_priv(dev
);
2997 struct net_device
*real_dev
= macsec
->real_dev
;
2999 free_percpu(macsec
->stats
);
3000 free_percpu(macsec
->secy
.tx_sc
.stats
);
3005 static void macsec_setup(struct net_device
*dev
)
3009 dev
->max_mtu
= ETH_MAX_MTU
;
3010 dev
->priv_flags
|= IFF_NO_QUEUE
;
3011 dev
->netdev_ops
= &macsec_netdev_ops
;
3012 dev
->needs_free_netdev
= true;
3013 dev
->priv_destructor
= macsec_free_netdev
;
3014 SET_NETDEV_DEVTYPE(dev
, &macsec_type
);
3016 eth_zero_addr(dev
->broadcast
);
3019 static void macsec_changelink_common(struct net_device
*dev
,
3020 struct nlattr
*data
[])
3022 struct macsec_secy
*secy
;
3023 struct macsec_tx_sc
*tx_sc
;
3025 secy
= &macsec_priv(dev
)->secy
;
3026 tx_sc
= &secy
->tx_sc
;
3028 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3029 struct macsec_tx_sa
*tx_sa
;
3031 tx_sc
->encoding_sa
= nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]);
3032 tx_sa
= rtnl_dereference(tx_sc
->sa
[tx_sc
->encoding_sa
]);
3034 secy
->operational
= tx_sa
&& tx_sa
->active
;
3037 if (data
[IFLA_MACSEC_WINDOW
])
3038 secy
->replay_window
= nla_get_u32(data
[IFLA_MACSEC_WINDOW
]);
3040 if (data
[IFLA_MACSEC_ENCRYPT
])
3041 tx_sc
->encrypt
= !!nla_get_u8(data
[IFLA_MACSEC_ENCRYPT
]);
3043 if (data
[IFLA_MACSEC_PROTECT
])
3044 secy
->protect_frames
= !!nla_get_u8(data
[IFLA_MACSEC_PROTECT
]);
3046 if (data
[IFLA_MACSEC_INC_SCI
])
3047 tx_sc
->send_sci
= !!nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]);
3049 if (data
[IFLA_MACSEC_ES
])
3050 tx_sc
->end_station
= !!nla_get_u8(data
[IFLA_MACSEC_ES
]);
3052 if (data
[IFLA_MACSEC_SCB
])
3053 tx_sc
->scb
= !!nla_get_u8(data
[IFLA_MACSEC_SCB
]);
3055 if (data
[IFLA_MACSEC_REPLAY_PROTECT
])
3056 secy
->replay_protect
= !!nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
]);
3058 if (data
[IFLA_MACSEC_VALIDATION
])
3059 secy
->validate_frames
= nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]);
3062 static int macsec_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
3063 struct nlattr
*data
[],
3064 struct netlink_ext_ack
*extack
)
3069 if (data
[IFLA_MACSEC_CIPHER_SUITE
] ||
3070 data
[IFLA_MACSEC_ICV_LEN
] ||
3071 data
[IFLA_MACSEC_SCI
] ||
3072 data
[IFLA_MACSEC_PORT
])
3075 macsec_changelink_common(dev
, data
);
3080 static void macsec_del_dev(struct macsec_dev
*macsec
)
3084 while (macsec
->secy
.rx_sc
) {
3085 struct macsec_rx_sc
*rx_sc
= rtnl_dereference(macsec
->secy
.rx_sc
);
3087 rcu_assign_pointer(macsec
->secy
.rx_sc
, rx_sc
->next
);
3091 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
3092 struct macsec_tx_sa
*sa
= rtnl_dereference(macsec
->secy
.tx_sc
.sa
[i
]);
3095 RCU_INIT_POINTER(macsec
->secy
.tx_sc
.sa
[i
], NULL
);
3101 static void macsec_common_dellink(struct net_device
*dev
, struct list_head
*head
)
3103 struct macsec_dev
*macsec
= macsec_priv(dev
);
3104 struct net_device
*real_dev
= macsec
->real_dev
;
3106 unregister_netdevice_queue(dev
, head
);
3107 list_del_rcu(&macsec
->secys
);
3108 macsec_del_dev(macsec
);
3109 netdev_upper_dev_unlink(real_dev
, dev
);
3111 macsec_generation
++;
3114 static void macsec_dellink(struct net_device
*dev
, struct list_head
*head
)
3116 struct macsec_dev
*macsec
= macsec_priv(dev
);
3117 struct net_device
*real_dev
= macsec
->real_dev
;
3118 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3120 macsec_common_dellink(dev
, head
);
3122 if (list_empty(&rxd
->secys
)) {
3123 netdev_rx_handler_unregister(real_dev
);
3128 static int register_macsec_dev(struct net_device
*real_dev
,
3129 struct net_device
*dev
)
3131 struct macsec_dev
*macsec
= macsec_priv(dev
);
3132 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3137 rxd
= kmalloc(sizeof(*rxd
), GFP_KERNEL
);
3141 INIT_LIST_HEAD(&rxd
->secys
);
3143 err
= netdev_rx_handler_register(real_dev
, macsec_handle_frame
,
3151 list_add_tail_rcu(&macsec
->secys
, &rxd
->secys
);
3155 static bool sci_exists(struct net_device
*dev
, sci_t sci
)
3157 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(dev
);
3158 struct macsec_dev
*macsec
;
3160 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
3161 if (macsec
->secy
.sci
== sci
)
3168 static sci_t
dev_to_sci(struct net_device
*dev
, __be16 port
)
3170 return make_sci(dev
->dev_addr
, port
);
3173 static int macsec_add_dev(struct net_device
*dev
, sci_t sci
, u8 icv_len
)
3175 struct macsec_dev
*macsec
= macsec_priv(dev
);
3176 struct macsec_secy
*secy
= &macsec
->secy
;
3178 macsec
->stats
= netdev_alloc_pcpu_stats(struct pcpu_secy_stats
);
3182 secy
->tx_sc
.stats
= netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats
);
3183 if (!secy
->tx_sc
.stats
) {
3184 free_percpu(macsec
->stats
);
3188 if (sci
== MACSEC_UNDEF_SCI
)
3189 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3192 secy
->operational
= true;
3193 secy
->key_len
= DEFAULT_SAK_LEN
;
3194 secy
->icv_len
= icv_len
;
3195 secy
->validate_frames
= MACSEC_VALIDATE_DEFAULT
;
3196 secy
->protect_frames
= true;
3197 secy
->replay_protect
= false;
3200 secy
->tx_sc
.active
= true;
3201 secy
->tx_sc
.encoding_sa
= DEFAULT_ENCODING_SA
;
3202 secy
->tx_sc
.encrypt
= DEFAULT_ENCRYPT
;
3203 secy
->tx_sc
.send_sci
= DEFAULT_SEND_SCI
;
3204 secy
->tx_sc
.end_station
= false;
3205 secy
->tx_sc
.scb
= false;
3210 static int macsec_newlink(struct net
*net
, struct net_device
*dev
,
3211 struct nlattr
*tb
[], struct nlattr
*data
[],
3212 struct netlink_ext_ack
*extack
)
3214 struct macsec_dev
*macsec
= macsec_priv(dev
);
3215 struct net_device
*real_dev
;
3218 u8 icv_len
= DEFAULT_ICV_LEN
;
3219 rx_handler_func_t
*rx_handler
;
3223 real_dev
= __dev_get_by_index(net
, nla_get_u32(tb
[IFLA_LINK
]));
3227 dev
->priv_flags
|= IFF_MACSEC
;
3229 macsec
->real_dev
= real_dev
;
3231 if (data
&& data
[IFLA_MACSEC_ICV_LEN
])
3232 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3233 dev
->mtu
= real_dev
->mtu
- icv_len
- macsec_extra_len(true);
3235 rx_handler
= rtnl_dereference(real_dev
->rx_handler
);
3236 if (rx_handler
&& rx_handler
!= macsec_handle_frame
)
3239 err
= register_netdevice(dev
);
3245 macsec
->nest_level
= dev_get_nest_level(real_dev
) + 1;
3246 netdev_lockdep_set_classes(dev
);
3247 lockdep_set_class_and_subclass(&dev
->addr_list_lock
,
3248 &macsec_netdev_addr_lock_key
,
3249 macsec_get_nest_level(dev
));
3251 err
= netdev_upper_dev_link(real_dev
, dev
, extack
);
3255 /* need to be already registered so that ->init has run and
3256 * the MAC addr is set
3258 if (data
&& data
[IFLA_MACSEC_SCI
])
3259 sci
= nla_get_sci(data
[IFLA_MACSEC_SCI
]);
3260 else if (data
&& data
[IFLA_MACSEC_PORT
])
3261 sci
= dev_to_sci(dev
, nla_get_be16(data
[IFLA_MACSEC_PORT
]));
3263 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3265 if (rx_handler
&& sci_exists(real_dev
, sci
)) {
3270 err
= macsec_add_dev(dev
, sci
, icv_len
);
3275 macsec_changelink_common(dev
, data
);
3277 err
= register_macsec_dev(real_dev
, dev
);
3281 macsec_generation
++;
3286 macsec_del_dev(macsec
);
3288 netdev_upper_dev_unlink(real_dev
, dev
);
3290 unregister_netdevice(dev
);
3294 static int macsec_validate_attr(struct nlattr
*tb
[], struct nlattr
*data
[],
3295 struct netlink_ext_ack
*extack
)
3297 u64 csid
= MACSEC_DEFAULT_CIPHER_ID
;
3298 u8 icv_len
= DEFAULT_ICV_LEN
;
3305 if (data
[IFLA_MACSEC_CIPHER_SUITE
])
3306 csid
= nla_get_u64(data
[IFLA_MACSEC_CIPHER_SUITE
]);
3308 if (data
[IFLA_MACSEC_ICV_LEN
]) {
3309 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3310 if (icv_len
!= DEFAULT_ICV_LEN
) {
3311 char dummy_key
[DEFAULT_SAK_LEN
] = { 0 };
3312 struct crypto_aead
*dummy_tfm
;
3314 dummy_tfm
= macsec_alloc_tfm(dummy_key
,
3317 if (IS_ERR(dummy_tfm
))
3318 return PTR_ERR(dummy_tfm
);
3319 crypto_free_aead(dummy_tfm
);
3324 case MACSEC_DEFAULT_CIPHER_ID
:
3325 case MACSEC_DEFAULT_CIPHER_ALT
:
3326 if (icv_len
< MACSEC_MIN_ICV_LEN
||
3327 icv_len
> MACSEC_STD_ICV_LEN
)
3334 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3335 if (nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]) >= MACSEC_NUM_AN
)
3339 for (flag
= IFLA_MACSEC_ENCODING_SA
+ 1;
3340 flag
< IFLA_MACSEC_VALIDATION
;
3343 if (nla_get_u8(data
[flag
]) > 1)
3348 es
= data
[IFLA_MACSEC_ES
] ? nla_get_u8(data
[IFLA_MACSEC_ES
]) : false;
3349 sci
= data
[IFLA_MACSEC_INC_SCI
] ? nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]) : false;
3350 scb
= data
[IFLA_MACSEC_SCB
] ? nla_get_u8(data
[IFLA_MACSEC_SCB
]) : false;
3352 if ((sci
&& (scb
|| es
)) || (scb
&& es
))
3355 if (data
[IFLA_MACSEC_VALIDATION
] &&
3356 nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]) > MACSEC_VALIDATE_MAX
)
3359 if ((data
[IFLA_MACSEC_REPLAY_PROTECT
] &&
3360 nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
])) &&
3361 !data
[IFLA_MACSEC_WINDOW
])
3367 static struct net
*macsec_get_link_net(const struct net_device
*dev
)
3369 return dev_net(macsec_priv(dev
)->real_dev
);
3372 static size_t macsec_get_size(const struct net_device
*dev
)
3374 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
3375 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
3376 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
3377 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
3378 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
3379 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
3380 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
3381 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
3382 nla_total_size(1) + /* IFLA_MACSEC_ES */
3383 nla_total_size(1) + /* IFLA_MACSEC_SCB */
3384 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
3385 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
3389 static int macsec_fill_info(struct sk_buff
*skb
,
3390 const struct net_device
*dev
)
3392 struct macsec_secy
*secy
= &macsec_priv(dev
)->secy
;
3393 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
3395 if (nla_put_sci(skb
, IFLA_MACSEC_SCI
, secy
->sci
,
3397 nla_put_u8(skb
, IFLA_MACSEC_ICV_LEN
, secy
->icv_len
) ||
3398 nla_put_u64_64bit(skb
, IFLA_MACSEC_CIPHER_SUITE
,
3399 MACSEC_DEFAULT_CIPHER_ID
, IFLA_MACSEC_PAD
) ||
3400 nla_put_u8(skb
, IFLA_MACSEC_ENCODING_SA
, tx_sc
->encoding_sa
) ||
3401 nla_put_u8(skb
, IFLA_MACSEC_ENCRYPT
, tx_sc
->encrypt
) ||
3402 nla_put_u8(skb
, IFLA_MACSEC_PROTECT
, secy
->protect_frames
) ||
3403 nla_put_u8(skb
, IFLA_MACSEC_INC_SCI
, tx_sc
->send_sci
) ||
3404 nla_put_u8(skb
, IFLA_MACSEC_ES
, tx_sc
->end_station
) ||
3405 nla_put_u8(skb
, IFLA_MACSEC_SCB
, tx_sc
->scb
) ||
3406 nla_put_u8(skb
, IFLA_MACSEC_REPLAY_PROTECT
, secy
->replay_protect
) ||
3407 nla_put_u8(skb
, IFLA_MACSEC_VALIDATION
, secy
->validate_frames
) ||
3409 goto nla_put_failure
;
3411 if (secy
->replay_protect
) {
3412 if (nla_put_u32(skb
, IFLA_MACSEC_WINDOW
, secy
->replay_window
))
3413 goto nla_put_failure
;
3422 static struct rtnl_link_ops macsec_link_ops __read_mostly
= {
3424 .priv_size
= sizeof(struct macsec_dev
),
3425 .maxtype
= IFLA_MACSEC_MAX
,
3426 .policy
= macsec_rtnl_policy
,
3427 .setup
= macsec_setup
,
3428 .validate
= macsec_validate_attr
,
3429 .newlink
= macsec_newlink
,
3430 .changelink
= macsec_changelink
,
3431 .dellink
= macsec_dellink
,
3432 .get_size
= macsec_get_size
,
3433 .fill_info
= macsec_fill_info
,
3434 .get_link_net
= macsec_get_link_net
,
3437 static bool is_macsec_master(struct net_device
*dev
)
3439 return rcu_access_pointer(dev
->rx_handler
) == macsec_handle_frame
;
3442 static int macsec_notify(struct notifier_block
*this, unsigned long event
,
3445 struct net_device
*real_dev
= netdev_notifier_info_to_dev(ptr
);
3448 if (!is_macsec_master(real_dev
))
3452 case NETDEV_UNREGISTER
: {
3453 struct macsec_dev
*m
, *n
;
3454 struct macsec_rxh_data
*rxd
;
3456 rxd
= macsec_data_rtnl(real_dev
);
3457 list_for_each_entry_safe(m
, n
, &rxd
->secys
, secys
) {
3458 macsec_common_dellink(m
->secy
.netdev
, &head
);
3461 netdev_rx_handler_unregister(real_dev
);
3464 unregister_netdevice_many(&head
);
3467 case NETDEV_CHANGEMTU
: {
3468 struct macsec_dev
*m
;
3469 struct macsec_rxh_data
*rxd
;
3471 rxd
= macsec_data_rtnl(real_dev
);
3472 list_for_each_entry(m
, &rxd
->secys
, secys
) {
3473 struct net_device
*dev
= m
->secy
.netdev
;
3474 unsigned int mtu
= real_dev
->mtu
- (m
->secy
.icv_len
+
3475 macsec_extra_len(true));
3478 dev_set_mtu(dev
, mtu
);
3486 static struct notifier_block macsec_notifier
= {
3487 .notifier_call
= macsec_notify
,
3490 static int __init
macsec_init(void)
3494 pr_info("MACsec IEEE 802.1AE\n");
3495 err
= register_netdevice_notifier(&macsec_notifier
);
3499 err
= rtnl_link_register(&macsec_link_ops
);
3503 err
= genl_register_family(&macsec_fam
);
3510 rtnl_link_unregister(&macsec_link_ops
);
3512 unregister_netdevice_notifier(&macsec_notifier
);
3516 static void __exit
macsec_exit(void)
3518 genl_unregister_family(&macsec_fam
);
3519 rtnl_link_unregister(&macsec_link_ops
);
3520 unregister_netdevice_notifier(&macsec_notifier
);
3524 module_init(macsec_init
);
3525 module_exit(macsec_exit
);
3527 MODULE_ALIAS_RTNL_LINK("macsec");
3528 MODULE_ALIAS_GENL_FAMILY("macsec");
3530 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3531 MODULE_LICENSE("GPL v2");