2 * drivers/net/macsec.c - MACsec device
4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/socket.h>
15 #include <linux/module.h>
16 #include <crypto/aead.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rtnetlink.h>
19 #include <linux/refcount.h>
20 #include <net/genetlink.h>
22 #include <net/gro_cells.h>
24 #include <uapi/linux/if_macsec.h>
26 typedef u64 __bitwise sci_t
;
28 #define MACSEC_SCI_LEN 8
30 /* SecTAG length = macsec_eth_header without the optional SCI */
31 #define MACSEC_TAG_LEN 6
33 struct macsec_eth_header
{
37 #if defined(__LITTLE_ENDIAN_BITFIELD)
40 #elif defined(__BIG_ENDIAN_BITFIELD)
44 #error "Please fix <asm/byteorder.h>"
47 u8 secure_channel_id
[8]; /* optional */
50 #define MACSEC_TCI_VERSION 0x80
51 #define MACSEC_TCI_ES 0x40 /* end station */
52 #define MACSEC_TCI_SC 0x20 /* SCI present */
53 #define MACSEC_TCI_SCB 0x10 /* epon */
54 #define MACSEC_TCI_E 0x08 /* encryption */
55 #define MACSEC_TCI_C 0x04 /* changed text */
56 #define MACSEC_AN_MASK 0x03 /* association number */
57 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
59 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
60 #define MIN_NON_SHORT_LEN 48
62 #define GCM_AES_IV_LEN 12
63 #define DEFAULT_ICV_LEN 16
65 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */
67 #define for_each_rxsc(secy, sc) \
68 for (sc = rcu_dereference_bh(secy->rx_sc); \
70 sc = rcu_dereference_bh(sc->next))
71 #define for_each_rxsc_rtnl(secy, sc) \
72 for (sc = rtnl_dereference(secy->rx_sc); \
74 sc = rtnl_dereference(sc->next))
78 u8 secure_channel_id
[8];
85 * struct macsec_key - SA key
86 * @id: user-provided key identifier
87 * @tfm: crypto struct, key storage
90 u8 id
[MACSEC_KEYID_LEN
];
91 struct crypto_aead
*tfm
;
94 struct macsec_rx_sc_stats
{
95 __u64 InOctetsValidated
;
96 __u64 InOctetsDecrypted
;
97 __u64 InPktsUnchecked
;
102 __u64 InPktsNotValid
;
103 __u64 InPktsNotUsingSA
;
104 __u64 InPktsUnusedSA
;
107 struct macsec_rx_sa_stats
{
110 __u32 InPktsNotValid
;
111 __u32 InPktsNotUsingSA
;
112 __u32 InPktsUnusedSA
;
115 struct macsec_tx_sa_stats
{
116 __u32 OutPktsProtected
;
117 __u32 OutPktsEncrypted
;
120 struct macsec_tx_sc_stats
{
121 __u64 OutPktsProtected
;
122 __u64 OutPktsEncrypted
;
123 __u64 OutOctetsProtected
;
124 __u64 OutOctetsEncrypted
;
127 struct macsec_dev_stats
{
128 __u64 OutPktsUntagged
;
129 __u64 InPktsUntagged
;
130 __u64 OutPktsTooLong
;
133 __u64 InPktsUnknownSCI
;
139 * struct macsec_rx_sa - receive secure association
141 * @next_pn: packet number expected for the next packet
142 * @lock: protects next_pn manipulations
143 * @key: key structure
144 * @stats: per-SA stats
146 struct macsec_rx_sa
{
147 struct macsec_key key
;
152 struct macsec_rx_sa_stats __percpu
*stats
;
153 struct macsec_rx_sc
*sc
;
157 struct pcpu_rx_sc_stats
{
158 struct macsec_rx_sc_stats stats
;
159 struct u64_stats_sync syncp
;
163 * struct macsec_rx_sc - receive secure channel
164 * @sci: secure channel identifier for this SC
165 * @active: channel is active
166 * @sa: array of secure associations
167 * @stats: per-SC stats
169 struct macsec_rx_sc
{
170 struct macsec_rx_sc __rcu
*next
;
173 struct macsec_rx_sa __rcu
*sa
[MACSEC_NUM_AN
];
174 struct pcpu_rx_sc_stats __percpu
*stats
;
176 struct rcu_head rcu_head
;
180 * struct macsec_tx_sa - transmit secure association
182 * @next_pn: packet number to use for the next packet
183 * @lock: protects next_pn manipulations
184 * @key: key structure
185 * @stats: per-SA stats
187 struct macsec_tx_sa
{
188 struct macsec_key key
;
193 struct macsec_tx_sa_stats __percpu
*stats
;
197 struct pcpu_tx_sc_stats
{
198 struct macsec_tx_sc_stats stats
;
199 struct u64_stats_sync syncp
;
203 * struct macsec_tx_sc - transmit secure channel
205 * @encoding_sa: association number of the SA currently in use
206 * @encrypt: encrypt packets on transmit, or authenticate only
207 * @send_sci: always include the SCI in the SecTAG
209 * @scb: single copy broadcast flag
210 * @sa: array of secure associations
211 * @stats: stats for this TXSC
213 struct macsec_tx_sc
{
220 struct macsec_tx_sa __rcu
*sa
[MACSEC_NUM_AN
];
221 struct pcpu_tx_sc_stats __percpu
*stats
;
224 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
227 * struct macsec_secy - MACsec Security Entity
228 * @netdev: netdevice for this SecY
229 * @n_rx_sc: number of receive secure channels configured on this SecY
230 * @sci: secure channel identifier used for tx
231 * @key_len: length of keys used by the cipher suite
232 * @icv_len: length of ICV used by the cipher suite
233 * @validate_frames: validation mode
234 * @operational: MAC_Operational flag
235 * @protect_frames: enable protection for this SecY
236 * @replay_protect: enable packet number checks on receive
237 * @replay_window: size of the replay window
238 * @tx_sc: transmit secure channel
239 * @rx_sc: linked list of receive secure channels
242 struct net_device
*netdev
;
243 unsigned int n_rx_sc
;
247 enum macsec_validation_type validate_frames
;
252 struct macsec_tx_sc tx_sc
;
253 struct macsec_rx_sc __rcu
*rx_sc
;
256 struct pcpu_secy_stats
{
257 struct macsec_dev_stats stats
;
258 struct u64_stats_sync syncp
;
262 * struct macsec_dev - private data
264 * @real_dev: pointer to underlying netdevice
265 * @stats: MACsec device stats
266 * @secys: linked list of SecY's on the underlying device
269 struct macsec_secy secy
;
270 struct net_device
*real_dev
;
271 struct pcpu_secy_stats __percpu
*stats
;
272 struct list_head secys
;
273 struct gro_cells gro_cells
;
274 unsigned int nest_level
;
278 * struct macsec_rxh_data - rx_handler private argument
279 * @secys: linked list of SecY's on this underlying device
281 struct macsec_rxh_data
{
282 struct list_head secys
;
285 static struct macsec_dev
*macsec_priv(const struct net_device
*dev
)
287 return (struct macsec_dev
*)netdev_priv(dev
);
290 static struct macsec_rxh_data
*macsec_data_rcu(const struct net_device
*dev
)
292 return rcu_dereference_bh(dev
->rx_handler_data
);
295 static struct macsec_rxh_data
*macsec_data_rtnl(const struct net_device
*dev
)
297 return rtnl_dereference(dev
->rx_handler_data
);
301 struct aead_request
*req
;
303 struct macsec_tx_sa
*tx_sa
;
304 struct macsec_rx_sa
*rx_sa
;
311 static struct macsec_rx_sa
*macsec_rxsa_get(struct macsec_rx_sa __rcu
*ptr
)
313 struct macsec_rx_sa
*sa
= rcu_dereference_bh(ptr
);
315 if (!sa
|| !sa
->active
)
318 if (!refcount_inc_not_zero(&sa
->refcnt
))
324 static void free_rx_sc_rcu(struct rcu_head
*head
)
326 struct macsec_rx_sc
*rx_sc
= container_of(head
, struct macsec_rx_sc
, rcu_head
);
328 free_percpu(rx_sc
->stats
);
332 static struct macsec_rx_sc
*macsec_rxsc_get(struct macsec_rx_sc
*sc
)
334 return refcount_inc_not_zero(&sc
->refcnt
) ? sc
: NULL
;
337 static void macsec_rxsc_put(struct macsec_rx_sc
*sc
)
339 if (refcount_dec_and_test(&sc
->refcnt
))
340 call_rcu(&sc
->rcu_head
, free_rx_sc_rcu
);
343 static void free_rxsa(struct rcu_head
*head
)
345 struct macsec_rx_sa
*sa
= container_of(head
, struct macsec_rx_sa
, rcu
);
347 crypto_free_aead(sa
->key
.tfm
);
348 free_percpu(sa
->stats
);
352 static void macsec_rxsa_put(struct macsec_rx_sa
*sa
)
354 if (refcount_dec_and_test(&sa
->refcnt
))
355 call_rcu(&sa
->rcu
, free_rxsa
);
358 static struct macsec_tx_sa
*macsec_txsa_get(struct macsec_tx_sa __rcu
*ptr
)
360 struct macsec_tx_sa
*sa
= rcu_dereference_bh(ptr
);
362 if (!sa
|| !sa
->active
)
365 if (!refcount_inc_not_zero(&sa
->refcnt
))
371 static void free_txsa(struct rcu_head
*head
)
373 struct macsec_tx_sa
*sa
= container_of(head
, struct macsec_tx_sa
, rcu
);
375 crypto_free_aead(sa
->key
.tfm
);
376 free_percpu(sa
->stats
);
380 static void macsec_txsa_put(struct macsec_tx_sa
*sa
)
382 if (refcount_dec_and_test(&sa
->refcnt
))
383 call_rcu(&sa
->rcu
, free_txsa
);
386 static struct macsec_cb
*macsec_skb_cb(struct sk_buff
*skb
)
388 BUILD_BUG_ON(sizeof(struct macsec_cb
) > sizeof(skb
->cb
));
389 return (struct macsec_cb
*)skb
->cb
;
392 #define MACSEC_PORT_ES (htons(0x0001))
393 #define MACSEC_PORT_SCB (0x0000)
394 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
396 #define MACSEC_GCM_AES_128_SAK_LEN 16
397 #define MACSEC_GCM_AES_256_SAK_LEN 32
399 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
400 #define DEFAULT_SEND_SCI true
401 #define DEFAULT_ENCRYPT false
402 #define DEFAULT_ENCODING_SA 0
404 static bool send_sci(const struct macsec_secy
*secy
)
406 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
408 return tx_sc
->send_sci
||
409 (secy
->n_rx_sc
> 1 && !tx_sc
->end_station
&& !tx_sc
->scb
);
412 static sci_t
make_sci(u8
*addr
, __be16 port
)
416 memcpy(&sci
, addr
, ETH_ALEN
);
417 memcpy(((char *)&sci
) + ETH_ALEN
, &port
, sizeof(port
));
422 static sci_t
macsec_frame_sci(struct macsec_eth_header
*hdr
, bool sci_present
)
427 memcpy(&sci
, hdr
->secure_channel_id
,
428 sizeof(hdr
->secure_channel_id
));
430 sci
= make_sci(hdr
->eth
.h_source
, MACSEC_PORT_ES
);
435 static unsigned int macsec_sectag_len(bool sci_present
)
437 return MACSEC_TAG_LEN
+ (sci_present
? MACSEC_SCI_LEN
: 0);
440 static unsigned int macsec_hdr_len(bool sci_present
)
442 return macsec_sectag_len(sci_present
) + ETH_HLEN
;
445 static unsigned int macsec_extra_len(bool sci_present
)
447 return macsec_sectag_len(sci_present
) + sizeof(__be16
);
450 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
451 static void macsec_fill_sectag(struct macsec_eth_header
*h
,
452 const struct macsec_secy
*secy
, u32 pn
,
455 const struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
457 memset(&h
->tci_an
, 0, macsec_sectag_len(sci_present
));
458 h
->eth
.h_proto
= htons(ETH_P_MACSEC
);
461 h
->tci_an
|= MACSEC_TCI_SC
;
462 memcpy(&h
->secure_channel_id
, &secy
->sci
,
463 sizeof(h
->secure_channel_id
));
465 if (tx_sc
->end_station
)
466 h
->tci_an
|= MACSEC_TCI_ES
;
468 h
->tci_an
|= MACSEC_TCI_SCB
;
471 h
->packet_number
= htonl(pn
);
473 /* with GCM, C/E clear for !encrypt, both set for encrypt */
475 h
->tci_an
|= MACSEC_TCI_CONFID
;
476 else if (secy
->icv_len
!= DEFAULT_ICV_LEN
)
477 h
->tci_an
|= MACSEC_TCI_C
;
479 h
->tci_an
|= tx_sc
->encoding_sa
;
482 static void macsec_set_shortlen(struct macsec_eth_header
*h
, size_t data_len
)
484 if (data_len
< MIN_NON_SHORT_LEN
)
485 h
->short_length
= data_len
;
488 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
489 static bool macsec_validate_skb(struct sk_buff
*skb
, u16 icv_len
)
491 struct macsec_eth_header
*h
= (struct macsec_eth_header
*)skb
->data
;
492 int len
= skb
->len
- 2 * ETH_ALEN
;
493 int extra_len
= macsec_extra_len(!!(h
->tci_an
& MACSEC_TCI_SC
)) + icv_len
;
495 /* a) It comprises at least 17 octets */
499 /* b) MACsec EtherType: already checked */
501 /* c) V bit is clear */
502 if (h
->tci_an
& MACSEC_TCI_VERSION
)
505 /* d) ES or SCB => !SC */
506 if ((h
->tci_an
& MACSEC_TCI_ES
|| h
->tci_an
& MACSEC_TCI_SCB
) &&
507 (h
->tci_an
& MACSEC_TCI_SC
))
510 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
514 /* rx.pn != 0 (figure 10-5) */
515 if (!h
->packet_number
)
518 /* length check, f) g) h) i) */
520 return len
== extra_len
+ h
->short_length
;
521 return len
>= extra_len
+ MIN_NON_SHORT_LEN
;
524 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
525 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
527 static void macsec_fill_iv(unsigned char *iv
, sci_t sci
, u32 pn
)
529 struct gcm_iv
*gcm_iv
= (struct gcm_iv
*)iv
;
532 gcm_iv
->pn
= htonl(pn
);
535 static struct macsec_eth_header
*macsec_ethhdr(struct sk_buff
*skb
)
537 return (struct macsec_eth_header
*)skb_mac_header(skb
);
540 static u32
tx_sa_update_pn(struct macsec_tx_sa
*tx_sa
, struct macsec_secy
*secy
)
544 spin_lock_bh(&tx_sa
->lock
);
548 if (tx_sa
->next_pn
== 0) {
549 pr_debug("PN wrapped, transitioning to !oper\n");
550 tx_sa
->active
= false;
551 if (secy
->protect_frames
)
552 secy
->operational
= false;
554 spin_unlock_bh(&tx_sa
->lock
);
559 static void macsec_encrypt_finish(struct sk_buff
*skb
, struct net_device
*dev
)
561 struct macsec_dev
*macsec
= netdev_priv(dev
);
563 skb
->dev
= macsec
->real_dev
;
564 skb_reset_mac_header(skb
);
565 skb
->protocol
= eth_hdr(skb
)->h_proto
;
568 static void macsec_count_tx(struct sk_buff
*skb
, struct macsec_tx_sc
*tx_sc
,
569 struct macsec_tx_sa
*tx_sa
)
571 struct pcpu_tx_sc_stats
*txsc_stats
= this_cpu_ptr(tx_sc
->stats
);
573 u64_stats_update_begin(&txsc_stats
->syncp
);
574 if (tx_sc
->encrypt
) {
575 txsc_stats
->stats
.OutOctetsEncrypted
+= skb
->len
;
576 txsc_stats
->stats
.OutPktsEncrypted
++;
577 this_cpu_inc(tx_sa
->stats
->OutPktsEncrypted
);
579 txsc_stats
->stats
.OutOctetsProtected
+= skb
->len
;
580 txsc_stats
->stats
.OutPktsProtected
++;
581 this_cpu_inc(tx_sa
->stats
->OutPktsProtected
);
583 u64_stats_update_end(&txsc_stats
->syncp
);
586 static void count_tx(struct net_device
*dev
, int ret
, int len
)
588 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
589 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
591 u64_stats_update_begin(&stats
->syncp
);
593 stats
->tx_bytes
+= len
;
594 u64_stats_update_end(&stats
->syncp
);
598 static void macsec_encrypt_done(struct crypto_async_request
*base
, int err
)
600 struct sk_buff
*skb
= base
->data
;
601 struct net_device
*dev
= skb
->dev
;
602 struct macsec_dev
*macsec
= macsec_priv(dev
);
603 struct macsec_tx_sa
*sa
= macsec_skb_cb(skb
)->tx_sa
;
606 aead_request_free(macsec_skb_cb(skb
)->req
);
609 macsec_encrypt_finish(skb
, dev
);
610 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
612 ret
= dev_queue_xmit(skb
);
613 count_tx(dev
, ret
, len
);
614 rcu_read_unlock_bh();
620 static struct aead_request
*macsec_alloc_req(struct crypto_aead
*tfm
,
622 struct scatterlist
**sg
,
625 size_t size
, iv_offset
, sg_offset
;
626 struct aead_request
*req
;
629 size
= sizeof(struct aead_request
) + crypto_aead_reqsize(tfm
);
631 size
+= GCM_AES_IV_LEN
;
633 size
= ALIGN(size
, __alignof__(struct scatterlist
));
635 size
+= sizeof(struct scatterlist
) * num_frags
;
637 tmp
= kmalloc(size
, GFP_ATOMIC
);
641 *iv
= (unsigned char *)(tmp
+ iv_offset
);
642 *sg
= (struct scatterlist
*)(tmp
+ sg_offset
);
645 aead_request_set_tfm(req
, tfm
);
650 static struct sk_buff
*macsec_encrypt(struct sk_buff
*skb
,
651 struct net_device
*dev
)
654 struct scatterlist
*sg
;
655 struct sk_buff
*trailer
;
658 struct macsec_eth_header
*hh
;
659 size_t unprotected_len
;
660 struct aead_request
*req
;
661 struct macsec_secy
*secy
;
662 struct macsec_tx_sc
*tx_sc
;
663 struct macsec_tx_sa
*tx_sa
;
664 struct macsec_dev
*macsec
= macsec_priv(dev
);
668 secy
= &macsec
->secy
;
669 tx_sc
= &secy
->tx_sc
;
671 /* 10.5.1 TX SA assignment */
672 tx_sa
= macsec_txsa_get(tx_sc
->sa
[tx_sc
->encoding_sa
]);
674 secy
->operational
= false;
676 return ERR_PTR(-EINVAL
);
679 if (unlikely(skb_headroom(skb
) < MACSEC_NEEDED_HEADROOM
||
680 skb_tailroom(skb
) < MACSEC_NEEDED_TAILROOM
)) {
681 struct sk_buff
*nskb
= skb_copy_expand(skb
,
682 MACSEC_NEEDED_HEADROOM
,
683 MACSEC_NEEDED_TAILROOM
,
689 macsec_txsa_put(tx_sa
);
691 return ERR_PTR(-ENOMEM
);
694 skb
= skb_unshare(skb
, GFP_ATOMIC
);
696 macsec_txsa_put(tx_sa
);
697 return ERR_PTR(-ENOMEM
);
701 unprotected_len
= skb
->len
;
703 sci_present
= send_sci(secy
);
704 hh
= skb_push(skb
, macsec_extra_len(sci_present
));
705 memmove(hh
, eth
, 2 * ETH_ALEN
);
707 pn
= tx_sa_update_pn(tx_sa
, secy
);
709 macsec_txsa_put(tx_sa
);
711 return ERR_PTR(-ENOLINK
);
713 macsec_fill_sectag(hh
, secy
, pn
, sci_present
);
714 macsec_set_shortlen(hh
, unprotected_len
- 2 * ETH_ALEN
);
716 skb_put(skb
, secy
->icv_len
);
718 if (skb
->len
- ETH_HLEN
> macsec_priv(dev
)->real_dev
->mtu
) {
719 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
721 u64_stats_update_begin(&secy_stats
->syncp
);
722 secy_stats
->stats
.OutPktsTooLong
++;
723 u64_stats_update_end(&secy_stats
->syncp
);
725 macsec_txsa_put(tx_sa
);
727 return ERR_PTR(-EINVAL
);
730 ret
= skb_cow_data(skb
, 0, &trailer
);
731 if (unlikely(ret
< 0)) {
732 macsec_txsa_put(tx_sa
);
737 req
= macsec_alloc_req(tx_sa
->key
.tfm
, &iv
, &sg
, ret
);
739 macsec_txsa_put(tx_sa
);
741 return ERR_PTR(-ENOMEM
);
744 macsec_fill_iv(iv
, secy
->sci
, pn
);
746 sg_init_table(sg
, ret
);
747 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
748 if (unlikely(ret
< 0)) {
749 aead_request_free(req
);
750 macsec_txsa_put(tx_sa
);
755 if (tx_sc
->encrypt
) {
756 int len
= skb
->len
- macsec_hdr_len(sci_present
) -
758 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
759 aead_request_set_ad(req
, macsec_hdr_len(sci_present
));
761 aead_request_set_crypt(req
, sg
, sg
, 0, iv
);
762 aead_request_set_ad(req
, skb
->len
- secy
->icv_len
);
765 macsec_skb_cb(skb
)->req
= req
;
766 macsec_skb_cb(skb
)->tx_sa
= tx_sa
;
767 aead_request_set_callback(req
, 0, macsec_encrypt_done
, skb
);
770 ret
= crypto_aead_encrypt(req
);
771 if (ret
== -EINPROGRESS
) {
773 } else if (ret
!= 0) {
776 aead_request_free(req
);
777 macsec_txsa_put(tx_sa
);
778 return ERR_PTR(-EINVAL
);
782 aead_request_free(req
);
783 macsec_txsa_put(tx_sa
);
788 static bool macsec_post_decrypt(struct sk_buff
*skb
, struct macsec_secy
*secy
, u32 pn
)
790 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
791 struct pcpu_rx_sc_stats
*rxsc_stats
= this_cpu_ptr(rx_sa
->sc
->stats
);
792 struct macsec_eth_header
*hdr
= macsec_ethhdr(skb
);
795 spin_lock(&rx_sa
->lock
);
796 if (rx_sa
->next_pn
>= secy
->replay_window
)
797 lowest_pn
= rx_sa
->next_pn
- secy
->replay_window
;
799 /* Now perform replay protection check again
800 * (see IEEE 802.1AE-2006 figure 10-5)
802 if (secy
->replay_protect
&& pn
< lowest_pn
) {
803 spin_unlock(&rx_sa
->lock
);
804 u64_stats_update_begin(&rxsc_stats
->syncp
);
805 rxsc_stats
->stats
.InPktsLate
++;
806 u64_stats_update_end(&rxsc_stats
->syncp
);
810 if (secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
) {
811 u64_stats_update_begin(&rxsc_stats
->syncp
);
812 if (hdr
->tci_an
& MACSEC_TCI_E
)
813 rxsc_stats
->stats
.InOctetsDecrypted
+= skb
->len
;
815 rxsc_stats
->stats
.InOctetsValidated
+= skb
->len
;
816 u64_stats_update_end(&rxsc_stats
->syncp
);
819 if (!macsec_skb_cb(skb
)->valid
) {
820 spin_unlock(&rx_sa
->lock
);
823 if (hdr
->tci_an
& MACSEC_TCI_C
||
824 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
825 u64_stats_update_begin(&rxsc_stats
->syncp
);
826 rxsc_stats
->stats
.InPktsNotValid
++;
827 u64_stats_update_end(&rxsc_stats
->syncp
);
831 u64_stats_update_begin(&rxsc_stats
->syncp
);
832 if (secy
->validate_frames
== MACSEC_VALIDATE_CHECK
) {
833 rxsc_stats
->stats
.InPktsInvalid
++;
834 this_cpu_inc(rx_sa
->stats
->InPktsInvalid
);
835 } else if (pn
< lowest_pn
) {
836 rxsc_stats
->stats
.InPktsDelayed
++;
838 rxsc_stats
->stats
.InPktsUnchecked
++;
840 u64_stats_update_end(&rxsc_stats
->syncp
);
842 u64_stats_update_begin(&rxsc_stats
->syncp
);
843 if (pn
< lowest_pn
) {
844 rxsc_stats
->stats
.InPktsDelayed
++;
846 rxsc_stats
->stats
.InPktsOK
++;
847 this_cpu_inc(rx_sa
->stats
->InPktsOK
);
849 u64_stats_update_end(&rxsc_stats
->syncp
);
851 if (pn
>= rx_sa
->next_pn
)
852 rx_sa
->next_pn
= pn
+ 1;
853 spin_unlock(&rx_sa
->lock
);
859 static void macsec_reset_skb(struct sk_buff
*skb
, struct net_device
*dev
)
861 skb
->pkt_type
= PACKET_HOST
;
862 skb
->protocol
= eth_type_trans(skb
, dev
);
864 skb_reset_network_header(skb
);
865 if (!skb_transport_header_was_set(skb
))
866 skb_reset_transport_header(skb
);
867 skb_reset_mac_len(skb
);
870 static void macsec_finalize_skb(struct sk_buff
*skb
, u8 icv_len
, u8 hdr_len
)
872 memmove(skb
->data
+ hdr_len
, skb
->data
, 2 * ETH_ALEN
);
873 skb_pull(skb
, hdr_len
);
874 pskb_trim_unique(skb
, skb
->len
- icv_len
);
877 static void count_rx(struct net_device
*dev
, int len
)
879 struct pcpu_sw_netstats
*stats
= this_cpu_ptr(dev
->tstats
);
881 u64_stats_update_begin(&stats
->syncp
);
883 stats
->rx_bytes
+= len
;
884 u64_stats_update_end(&stats
->syncp
);
887 static void macsec_decrypt_done(struct crypto_async_request
*base
, int err
)
889 struct sk_buff
*skb
= base
->data
;
890 struct net_device
*dev
= skb
->dev
;
891 struct macsec_dev
*macsec
= macsec_priv(dev
);
892 struct macsec_rx_sa
*rx_sa
= macsec_skb_cb(skb
)->rx_sa
;
893 struct macsec_rx_sc
*rx_sc
= rx_sa
->sc
;
897 aead_request_free(macsec_skb_cb(skb
)->req
);
900 macsec_skb_cb(skb
)->valid
= true;
903 pn
= ntohl(macsec_ethhdr(skb
)->packet_number
);
904 if (!macsec_post_decrypt(skb
, &macsec
->secy
, pn
)) {
905 rcu_read_unlock_bh();
910 macsec_finalize_skb(skb
, macsec
->secy
.icv_len
,
911 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
912 macsec_reset_skb(skb
, macsec
->secy
.netdev
);
915 if (gro_cells_receive(&macsec
->gro_cells
, skb
) == NET_RX_SUCCESS
)
918 rcu_read_unlock_bh();
921 macsec_rxsa_put(rx_sa
);
922 macsec_rxsc_put(rx_sc
);
926 static struct sk_buff
*macsec_decrypt(struct sk_buff
*skb
,
927 struct net_device
*dev
,
928 struct macsec_rx_sa
*rx_sa
,
930 struct macsec_secy
*secy
)
933 struct scatterlist
*sg
;
934 struct sk_buff
*trailer
;
936 struct aead_request
*req
;
937 struct macsec_eth_header
*hdr
;
938 u16 icv_len
= secy
->icv_len
;
940 macsec_skb_cb(skb
)->valid
= false;
941 skb
= skb_share_check(skb
, GFP_ATOMIC
);
943 return ERR_PTR(-ENOMEM
);
945 ret
= skb_cow_data(skb
, 0, &trailer
);
946 if (unlikely(ret
< 0)) {
950 req
= macsec_alloc_req(rx_sa
->key
.tfm
, &iv
, &sg
, ret
);
953 return ERR_PTR(-ENOMEM
);
956 hdr
= (struct macsec_eth_header
*)skb
->data
;
957 macsec_fill_iv(iv
, sci
, ntohl(hdr
->packet_number
));
959 sg_init_table(sg
, ret
);
960 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
961 if (unlikely(ret
< 0)) {
962 aead_request_free(req
);
967 if (hdr
->tci_an
& MACSEC_TCI_E
) {
968 /* confidentiality: ethernet + macsec header
969 * authenticated, encrypted payload
971 int len
= skb
->len
- macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
);
973 aead_request_set_crypt(req
, sg
, sg
, len
, iv
);
974 aead_request_set_ad(req
, macsec_hdr_len(macsec_skb_cb(skb
)->has_sci
));
975 skb
= skb_unshare(skb
, GFP_ATOMIC
);
977 aead_request_free(req
);
978 return ERR_PTR(-ENOMEM
);
981 /* integrity only: all headers + data authenticated */
982 aead_request_set_crypt(req
, sg
, sg
, icv_len
, iv
);
983 aead_request_set_ad(req
, skb
->len
- icv_len
);
986 macsec_skb_cb(skb
)->req
= req
;
988 aead_request_set_callback(req
, 0, macsec_decrypt_done
, skb
);
991 ret
= crypto_aead_decrypt(req
);
992 if (ret
== -EINPROGRESS
) {
994 } else if (ret
!= 0) {
995 /* decryption/authentication failed
996 * 10.6 if validateFrames is disabled, deliver anyway
998 if (ret
!= -EBADMSG
) {
1003 macsec_skb_cb(skb
)->valid
= true;
1007 aead_request_free(req
);
1012 static struct macsec_rx_sc
*find_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
1014 struct macsec_rx_sc
*rx_sc
;
1016 for_each_rxsc(secy
, rx_sc
) {
1017 if (rx_sc
->sci
== sci
)
1024 static struct macsec_rx_sc
*find_rx_sc_rtnl(struct macsec_secy
*secy
, sci_t sci
)
1026 struct macsec_rx_sc
*rx_sc
;
1028 for_each_rxsc_rtnl(secy
, rx_sc
) {
1029 if (rx_sc
->sci
== sci
)
1036 static void handle_not_macsec(struct sk_buff
*skb
)
1038 struct macsec_rxh_data
*rxd
;
1039 struct macsec_dev
*macsec
;
1042 rxd
= macsec_data_rcu(skb
->dev
);
1044 /* 10.6 If the management control validateFrames is not
1045 * Strict, frames without a SecTAG are received, counted, and
1046 * delivered to the Controlled Port
1048 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1049 struct sk_buff
*nskb
;
1050 struct pcpu_secy_stats
*secy_stats
= this_cpu_ptr(macsec
->stats
);
1052 if (macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1053 u64_stats_update_begin(&secy_stats
->syncp
);
1054 secy_stats
->stats
.InPktsNoTag
++;
1055 u64_stats_update_end(&secy_stats
->syncp
);
1059 /* deliver on this port */
1060 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1064 nskb
->dev
= macsec
->secy
.netdev
;
1066 if (netif_rx(nskb
) == NET_RX_SUCCESS
) {
1067 u64_stats_update_begin(&secy_stats
->syncp
);
1068 secy_stats
->stats
.InPktsUntagged
++;
1069 u64_stats_update_end(&secy_stats
->syncp
);
1076 static rx_handler_result_t
macsec_handle_frame(struct sk_buff
**pskb
)
1078 struct sk_buff
*skb
= *pskb
;
1079 struct net_device
*dev
= skb
->dev
;
1080 struct macsec_eth_header
*hdr
;
1081 struct macsec_secy
*secy
= NULL
;
1082 struct macsec_rx_sc
*rx_sc
;
1083 struct macsec_rx_sa
*rx_sa
;
1084 struct macsec_rxh_data
*rxd
;
1085 struct macsec_dev
*macsec
;
1089 struct pcpu_rx_sc_stats
*rxsc_stats
;
1090 struct pcpu_secy_stats
*secy_stats
;
1094 if (skb_headroom(skb
) < ETH_HLEN
)
1097 hdr
= macsec_ethhdr(skb
);
1098 if (hdr
->eth
.h_proto
!= htons(ETH_P_MACSEC
)) {
1099 handle_not_macsec(skb
);
1101 /* and deliver to the uncontrolled port */
1102 return RX_HANDLER_PASS
;
1105 skb
= skb_unshare(skb
, GFP_ATOMIC
);
1108 return RX_HANDLER_CONSUMED
;
1111 pulled_sci
= pskb_may_pull(skb
, macsec_extra_len(true));
1113 if (!pskb_may_pull(skb
, macsec_extra_len(false)))
1117 hdr
= macsec_ethhdr(skb
);
1119 /* Frames with a SecTAG that has the TCI E bit set but the C
1120 * bit clear are discarded, as this reserved encoding is used
1121 * to identify frames with a SecTAG that are not to be
1122 * delivered to the Controlled Port.
1124 if ((hdr
->tci_an
& (MACSEC_TCI_C
| MACSEC_TCI_E
)) == MACSEC_TCI_E
)
1125 return RX_HANDLER_PASS
;
1127 /* now, pull the extra length */
1128 if (hdr
->tci_an
& MACSEC_TCI_SC
) {
1133 /* ethernet header is part of crypto processing */
1134 skb_push(skb
, ETH_HLEN
);
1136 macsec_skb_cb(skb
)->has_sci
= !!(hdr
->tci_an
& MACSEC_TCI_SC
);
1137 macsec_skb_cb(skb
)->assoc_num
= hdr
->tci_an
& MACSEC_AN_MASK
;
1138 sci
= macsec_frame_sci(hdr
, macsec_skb_cb(skb
)->has_sci
);
1141 rxd
= macsec_data_rcu(skb
->dev
);
1143 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1144 struct macsec_rx_sc
*sc
= find_rx_sc(&macsec
->secy
, sci
);
1146 sc
= sc
? macsec_rxsc_get(sc
) : NULL
;
1149 secy
= &macsec
->secy
;
1159 macsec
= macsec_priv(dev
);
1160 secy_stats
= this_cpu_ptr(macsec
->stats
);
1161 rxsc_stats
= this_cpu_ptr(rx_sc
->stats
);
1163 if (!macsec_validate_skb(skb
, secy
->icv_len
)) {
1164 u64_stats_update_begin(&secy_stats
->syncp
);
1165 secy_stats
->stats
.InPktsBadTag
++;
1166 u64_stats_update_end(&secy_stats
->syncp
);
1170 rx_sa
= macsec_rxsa_get(rx_sc
->sa
[macsec_skb_cb(skb
)->assoc_num
]);
1172 /* 10.6.1 if the SA is not in use */
1174 /* If validateFrames is Strict or the C bit in the
1175 * SecTAG is set, discard
1177 if (hdr
->tci_an
& MACSEC_TCI_C
||
1178 secy
->validate_frames
== MACSEC_VALIDATE_STRICT
) {
1179 u64_stats_update_begin(&rxsc_stats
->syncp
);
1180 rxsc_stats
->stats
.InPktsNotUsingSA
++;
1181 u64_stats_update_end(&rxsc_stats
->syncp
);
1185 /* not Strict, the frame (with the SecTAG and ICV
1186 * removed) is delivered to the Controlled Port.
1188 u64_stats_update_begin(&rxsc_stats
->syncp
);
1189 rxsc_stats
->stats
.InPktsUnusedSA
++;
1190 u64_stats_update_end(&rxsc_stats
->syncp
);
1194 /* First, PN check to avoid decrypting obviously wrong packets */
1195 pn
= ntohl(hdr
->packet_number
);
1196 if (secy
->replay_protect
) {
1199 spin_lock(&rx_sa
->lock
);
1200 late
= rx_sa
->next_pn
>= secy
->replay_window
&&
1201 pn
< (rx_sa
->next_pn
- secy
->replay_window
);
1202 spin_unlock(&rx_sa
->lock
);
1205 u64_stats_update_begin(&rxsc_stats
->syncp
);
1206 rxsc_stats
->stats
.InPktsLate
++;
1207 u64_stats_update_end(&rxsc_stats
->syncp
);
1212 macsec_skb_cb(skb
)->rx_sa
= rx_sa
;
1214 /* Disabled && !changed text => skip validation */
1215 if (hdr
->tci_an
& MACSEC_TCI_C
||
1216 secy
->validate_frames
!= MACSEC_VALIDATE_DISABLED
)
1217 skb
= macsec_decrypt(skb
, dev
, rx_sa
, sci
, secy
);
1220 /* the decrypt callback needs the reference */
1221 if (PTR_ERR(skb
) != -EINPROGRESS
) {
1222 macsec_rxsa_put(rx_sa
);
1223 macsec_rxsc_put(rx_sc
);
1227 return RX_HANDLER_CONSUMED
;
1230 if (!macsec_post_decrypt(skb
, secy
, pn
))
1234 macsec_finalize_skb(skb
, secy
->icv_len
,
1235 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1236 macsec_reset_skb(skb
, secy
->netdev
);
1239 macsec_rxsa_put(rx_sa
);
1240 macsec_rxsc_put(rx_sc
);
1242 ret
= gro_cells_receive(&macsec
->gro_cells
, skb
);
1243 if (ret
== NET_RX_SUCCESS
)
1244 count_rx(dev
, skb
->len
);
1246 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1251 return RX_HANDLER_CONSUMED
;
1254 macsec_rxsa_put(rx_sa
);
1256 macsec_rxsc_put(rx_sc
);
1261 return RX_HANDLER_CONSUMED
;
1264 /* 10.6.1 if the SC is not found */
1265 cbit
= !!(hdr
->tci_an
& MACSEC_TCI_C
);
1267 macsec_finalize_skb(skb
, DEFAULT_ICV_LEN
,
1268 macsec_extra_len(macsec_skb_cb(skb
)->has_sci
));
1270 list_for_each_entry_rcu(macsec
, &rxd
->secys
, secys
) {
1271 struct sk_buff
*nskb
;
1273 secy_stats
= this_cpu_ptr(macsec
->stats
);
1275 /* If validateFrames is Strict or the C bit in the
1276 * SecTAG is set, discard
1279 macsec
->secy
.validate_frames
== MACSEC_VALIDATE_STRICT
) {
1280 u64_stats_update_begin(&secy_stats
->syncp
);
1281 secy_stats
->stats
.InPktsNoSCI
++;
1282 u64_stats_update_end(&secy_stats
->syncp
);
1286 /* not strict, the frame (with the SecTAG and ICV
1287 * removed) is delivered to the Controlled Port.
1289 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1293 macsec_reset_skb(nskb
, macsec
->secy
.netdev
);
1295 ret
= netif_rx(nskb
);
1296 if (ret
== NET_RX_SUCCESS
) {
1297 u64_stats_update_begin(&secy_stats
->syncp
);
1298 secy_stats
->stats
.InPktsUnknownSCI
++;
1299 u64_stats_update_end(&secy_stats
->syncp
);
1301 macsec
->secy
.netdev
->stats
.rx_dropped
++;
1307 return RX_HANDLER_PASS
;
1310 static struct crypto_aead
*macsec_alloc_tfm(char *key
, int key_len
, int icv_len
)
1312 struct crypto_aead
*tfm
;
1315 tfm
= crypto_alloc_aead("gcm(aes)", 0, 0);
1320 ret
= crypto_aead_setkey(tfm
, key
, key_len
);
1324 ret
= crypto_aead_setauthsize(tfm
, icv_len
);
1330 crypto_free_aead(tfm
);
1331 return ERR_PTR(ret
);
1334 static int init_rx_sa(struct macsec_rx_sa
*rx_sa
, char *sak
, int key_len
,
1337 rx_sa
->stats
= alloc_percpu(struct macsec_rx_sa_stats
);
1341 rx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1342 if (IS_ERR(rx_sa
->key
.tfm
)) {
1343 free_percpu(rx_sa
->stats
);
1344 return PTR_ERR(rx_sa
->key
.tfm
);
1347 rx_sa
->active
= false;
1349 refcount_set(&rx_sa
->refcnt
, 1);
1350 spin_lock_init(&rx_sa
->lock
);
1355 static void clear_rx_sa(struct macsec_rx_sa
*rx_sa
)
1357 rx_sa
->active
= false;
1359 macsec_rxsa_put(rx_sa
);
1362 static void free_rx_sc(struct macsec_rx_sc
*rx_sc
)
1366 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
1367 struct macsec_rx_sa
*sa
= rtnl_dereference(rx_sc
->sa
[i
]);
1369 RCU_INIT_POINTER(rx_sc
->sa
[i
], NULL
);
1374 macsec_rxsc_put(rx_sc
);
1377 static struct macsec_rx_sc
*del_rx_sc(struct macsec_secy
*secy
, sci_t sci
)
1379 struct macsec_rx_sc
*rx_sc
, __rcu
**rx_scp
;
1381 for (rx_scp
= &secy
->rx_sc
, rx_sc
= rtnl_dereference(*rx_scp
);
1383 rx_scp
= &rx_sc
->next
, rx_sc
= rtnl_dereference(*rx_scp
)) {
1384 if (rx_sc
->sci
== sci
) {
1387 rcu_assign_pointer(*rx_scp
, rx_sc
->next
);
1395 static struct macsec_rx_sc
*create_rx_sc(struct net_device
*dev
, sci_t sci
)
1397 struct macsec_rx_sc
*rx_sc
;
1398 struct macsec_dev
*macsec
;
1399 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
1400 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
1401 struct macsec_secy
*secy
;
1403 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
1404 if (find_rx_sc_rtnl(&macsec
->secy
, sci
))
1405 return ERR_PTR(-EEXIST
);
1408 rx_sc
= kzalloc(sizeof(*rx_sc
), GFP_KERNEL
);
1410 return ERR_PTR(-ENOMEM
);
1412 rx_sc
->stats
= netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats
);
1413 if (!rx_sc
->stats
) {
1415 return ERR_PTR(-ENOMEM
);
1419 rx_sc
->active
= true;
1420 refcount_set(&rx_sc
->refcnt
, 1);
1422 secy
= &macsec_priv(dev
)->secy
;
1423 rcu_assign_pointer(rx_sc
->next
, secy
->rx_sc
);
1424 rcu_assign_pointer(secy
->rx_sc
, rx_sc
);
1432 static int init_tx_sa(struct macsec_tx_sa
*tx_sa
, char *sak
, int key_len
,
1435 tx_sa
->stats
= alloc_percpu(struct macsec_tx_sa_stats
);
1439 tx_sa
->key
.tfm
= macsec_alloc_tfm(sak
, key_len
, icv_len
);
1440 if (IS_ERR(tx_sa
->key
.tfm
)) {
1441 free_percpu(tx_sa
->stats
);
1442 return PTR_ERR(tx_sa
->key
.tfm
);
1445 tx_sa
->active
= false;
1446 refcount_set(&tx_sa
->refcnt
, 1);
1447 spin_lock_init(&tx_sa
->lock
);
1452 static void clear_tx_sa(struct macsec_tx_sa
*tx_sa
)
1454 tx_sa
->active
= false;
1456 macsec_txsa_put(tx_sa
);
1459 static struct genl_family macsec_fam
;
1461 static struct net_device
*get_dev_from_nl(struct net
*net
,
1462 struct nlattr
**attrs
)
1464 int ifindex
= nla_get_u32(attrs
[MACSEC_ATTR_IFINDEX
]);
1465 struct net_device
*dev
;
1467 dev
= __dev_get_by_index(net
, ifindex
);
1469 return ERR_PTR(-ENODEV
);
1471 if (!netif_is_macsec(dev
))
1472 return ERR_PTR(-ENODEV
);
1477 static sci_t
nla_get_sci(const struct nlattr
*nla
)
1479 return (__force sci_t
)nla_get_u64(nla
);
1482 static int nla_put_sci(struct sk_buff
*skb
, int attrtype
, sci_t value
,
1485 return nla_put_u64_64bit(skb
, attrtype
, (__force u64
)value
, padattr
);
1488 static struct macsec_tx_sa
*get_txsa_from_nl(struct net
*net
,
1489 struct nlattr
**attrs
,
1490 struct nlattr
**tb_sa
,
1491 struct net_device
**devp
,
1492 struct macsec_secy
**secyp
,
1493 struct macsec_tx_sc
**scp
,
1496 struct net_device
*dev
;
1497 struct macsec_secy
*secy
;
1498 struct macsec_tx_sc
*tx_sc
;
1499 struct macsec_tx_sa
*tx_sa
;
1501 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1502 return ERR_PTR(-EINVAL
);
1504 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1506 dev
= get_dev_from_nl(net
, attrs
);
1508 return ERR_CAST(dev
);
1510 if (*assoc_num
>= MACSEC_NUM_AN
)
1511 return ERR_PTR(-EINVAL
);
1513 secy
= &macsec_priv(dev
)->secy
;
1514 tx_sc
= &secy
->tx_sc
;
1516 tx_sa
= rtnl_dereference(tx_sc
->sa
[*assoc_num
]);
1518 return ERR_PTR(-ENODEV
);
1526 static struct macsec_rx_sc
*get_rxsc_from_nl(struct net
*net
,
1527 struct nlattr
**attrs
,
1528 struct nlattr
**tb_rxsc
,
1529 struct net_device
**devp
,
1530 struct macsec_secy
**secyp
)
1532 struct net_device
*dev
;
1533 struct macsec_secy
*secy
;
1534 struct macsec_rx_sc
*rx_sc
;
1537 dev
= get_dev_from_nl(net
, attrs
);
1539 return ERR_CAST(dev
);
1541 secy
= &macsec_priv(dev
)->secy
;
1543 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1544 return ERR_PTR(-EINVAL
);
1546 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1547 rx_sc
= find_rx_sc_rtnl(secy
, sci
);
1549 return ERR_PTR(-ENODEV
);
1557 static struct macsec_rx_sa
*get_rxsa_from_nl(struct net
*net
,
1558 struct nlattr
**attrs
,
1559 struct nlattr
**tb_rxsc
,
1560 struct nlattr
**tb_sa
,
1561 struct net_device
**devp
,
1562 struct macsec_secy
**secyp
,
1563 struct macsec_rx_sc
**scp
,
1566 struct macsec_rx_sc
*rx_sc
;
1567 struct macsec_rx_sa
*rx_sa
;
1569 if (!tb_sa
[MACSEC_SA_ATTR_AN
])
1570 return ERR_PTR(-EINVAL
);
1572 *assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1573 if (*assoc_num
>= MACSEC_NUM_AN
)
1574 return ERR_PTR(-EINVAL
);
1576 rx_sc
= get_rxsc_from_nl(net
, attrs
, tb_rxsc
, devp
, secyp
);
1578 return ERR_CAST(rx_sc
);
1580 rx_sa
= rtnl_dereference(rx_sc
->sa
[*assoc_num
]);
1582 return ERR_PTR(-ENODEV
);
1588 static const struct nla_policy macsec_genl_policy
[NUM_MACSEC_ATTR
] = {
1589 [MACSEC_ATTR_IFINDEX
] = { .type
= NLA_U32
},
1590 [MACSEC_ATTR_RXSC_CONFIG
] = { .type
= NLA_NESTED
},
1591 [MACSEC_ATTR_SA_CONFIG
] = { .type
= NLA_NESTED
},
1594 static const struct nla_policy macsec_genl_rxsc_policy
[NUM_MACSEC_RXSC_ATTR
] = {
1595 [MACSEC_RXSC_ATTR_SCI
] = { .type
= NLA_U64
},
1596 [MACSEC_RXSC_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1599 static const struct nla_policy macsec_genl_sa_policy
[NUM_MACSEC_SA_ATTR
] = {
1600 [MACSEC_SA_ATTR_AN
] = { .type
= NLA_U8
},
1601 [MACSEC_SA_ATTR_ACTIVE
] = { .type
= NLA_U8
},
1602 [MACSEC_SA_ATTR_PN
] = { .type
= NLA_U32
},
1603 [MACSEC_SA_ATTR_KEYID
] = { .type
= NLA_BINARY
,
1604 .len
= MACSEC_KEYID_LEN
, },
1605 [MACSEC_SA_ATTR_KEY
] = { .type
= NLA_BINARY
,
1606 .len
= MACSEC_MAX_KEY_LEN
, },
1609 static int parse_sa_config(struct nlattr
**attrs
, struct nlattr
**tb_sa
)
1611 if (!attrs
[MACSEC_ATTR_SA_CONFIG
])
1614 if (nla_parse_nested_deprecated(tb_sa
, MACSEC_SA_ATTR_MAX
, attrs
[MACSEC_ATTR_SA_CONFIG
], macsec_genl_sa_policy
, NULL
))
1620 static int parse_rxsc_config(struct nlattr
**attrs
, struct nlattr
**tb_rxsc
)
1622 if (!attrs
[MACSEC_ATTR_RXSC_CONFIG
])
1625 if (nla_parse_nested_deprecated(tb_rxsc
, MACSEC_RXSC_ATTR_MAX
, attrs
[MACSEC_ATTR_RXSC_CONFIG
], macsec_genl_rxsc_policy
, NULL
))
1631 static bool validate_add_rxsa(struct nlattr
**attrs
)
1633 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1634 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1635 !attrs
[MACSEC_SA_ATTR_KEYID
])
1638 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1641 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1644 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1645 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1649 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1655 static int macsec_add_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1657 struct net_device
*dev
;
1658 struct nlattr
**attrs
= info
->attrs
;
1659 struct macsec_secy
*secy
;
1660 struct macsec_rx_sc
*rx_sc
;
1661 struct macsec_rx_sa
*rx_sa
;
1662 unsigned char assoc_num
;
1663 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1664 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1667 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1670 if (parse_sa_config(attrs
, tb_sa
))
1673 if (parse_rxsc_config(attrs
, tb_rxsc
))
1676 if (!validate_add_rxsa(tb_sa
))
1680 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
1681 if (IS_ERR(rx_sc
)) {
1683 return PTR_ERR(rx_sc
);
1686 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1688 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1689 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1690 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1695 rx_sa
= rtnl_dereference(rx_sc
->sa
[assoc_num
]);
1701 rx_sa
= kmalloc(sizeof(*rx_sa
), GFP_KERNEL
);
1707 err
= init_rx_sa(rx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1708 secy
->key_len
, secy
->icv_len
);
1715 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
1716 spin_lock_bh(&rx_sa
->lock
);
1717 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1718 spin_unlock_bh(&rx_sa
->lock
);
1721 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1722 rx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1724 nla_memcpy(rx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1726 rcu_assign_pointer(rx_sc
->sa
[assoc_num
], rx_sa
);
1733 static bool validate_add_rxsc(struct nlattr
**attrs
)
1735 if (!attrs
[MACSEC_RXSC_ATTR_SCI
])
1738 if (attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) {
1739 if (nla_get_u8(attrs
[MACSEC_RXSC_ATTR_ACTIVE
]) > 1)
1746 static int macsec_add_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1748 struct net_device
*dev
;
1749 sci_t sci
= MACSEC_UNDEF_SCI
;
1750 struct nlattr
**attrs
= info
->attrs
;
1751 struct macsec_rx_sc
*rx_sc
;
1752 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1754 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1757 if (parse_rxsc_config(attrs
, tb_rxsc
))
1760 if (!validate_add_rxsc(tb_rxsc
))
1764 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1767 return PTR_ERR(dev
);
1770 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1772 rx_sc
= create_rx_sc(dev
, sci
);
1773 if (IS_ERR(rx_sc
)) {
1775 return PTR_ERR(rx_sc
);
1778 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
])
1779 rx_sc
->active
= !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
1786 static bool validate_add_txsa(struct nlattr
**attrs
)
1788 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
1789 !attrs
[MACSEC_SA_ATTR_PN
] ||
1790 !attrs
[MACSEC_SA_ATTR_KEY
] ||
1791 !attrs
[MACSEC_SA_ATTR_KEYID
])
1794 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
1797 if (nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
1800 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
1801 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
1805 if (nla_len(attrs
[MACSEC_SA_ATTR_KEYID
]) != MACSEC_KEYID_LEN
)
1811 static int macsec_add_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1813 struct net_device
*dev
;
1814 struct nlattr
**attrs
= info
->attrs
;
1815 struct macsec_secy
*secy
;
1816 struct macsec_tx_sc
*tx_sc
;
1817 struct macsec_tx_sa
*tx_sa
;
1818 unsigned char assoc_num
;
1819 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1822 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1825 if (parse_sa_config(attrs
, tb_sa
))
1828 if (!validate_add_txsa(tb_sa
))
1832 dev
= get_dev_from_nl(genl_info_net(info
), attrs
);
1835 return PTR_ERR(dev
);
1838 secy
= &macsec_priv(dev
)->secy
;
1839 tx_sc
= &secy
->tx_sc
;
1841 assoc_num
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_AN
]);
1843 if (nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]) != secy
->key_len
) {
1844 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1845 nla_len(tb_sa
[MACSEC_SA_ATTR_KEY
]), secy
->key_len
);
1850 tx_sa
= rtnl_dereference(tx_sc
->sa
[assoc_num
]);
1856 tx_sa
= kmalloc(sizeof(*tx_sa
), GFP_KERNEL
);
1862 err
= init_tx_sa(tx_sa
, nla_data(tb_sa
[MACSEC_SA_ATTR_KEY
]),
1863 secy
->key_len
, secy
->icv_len
);
1870 nla_memcpy(tx_sa
->key
.id
, tb_sa
[MACSEC_SA_ATTR_KEYID
], MACSEC_KEYID_LEN
);
1872 spin_lock_bh(&tx_sa
->lock
);
1873 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
1874 spin_unlock_bh(&tx_sa
->lock
);
1876 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
1877 tx_sa
->active
= !!nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
1879 if (assoc_num
== tx_sc
->encoding_sa
&& tx_sa
->active
)
1880 secy
->operational
= true;
1882 rcu_assign_pointer(tx_sc
->sa
[assoc_num
], tx_sa
);
1889 static int macsec_del_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
1891 struct nlattr
**attrs
= info
->attrs
;
1892 struct net_device
*dev
;
1893 struct macsec_secy
*secy
;
1894 struct macsec_rx_sc
*rx_sc
;
1895 struct macsec_rx_sa
*rx_sa
;
1897 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1898 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1900 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1903 if (parse_sa_config(attrs
, tb_sa
))
1906 if (parse_rxsc_config(attrs
, tb_rxsc
))
1910 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
1911 &dev
, &secy
, &rx_sc
, &assoc_num
);
1912 if (IS_ERR(rx_sa
)) {
1914 return PTR_ERR(rx_sa
);
1917 if (rx_sa
->active
) {
1922 RCU_INIT_POINTER(rx_sc
->sa
[assoc_num
], NULL
);
1930 static int macsec_del_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
1932 struct nlattr
**attrs
= info
->attrs
;
1933 struct net_device
*dev
;
1934 struct macsec_secy
*secy
;
1935 struct macsec_rx_sc
*rx_sc
;
1937 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
1939 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1942 if (parse_rxsc_config(attrs
, tb_rxsc
))
1945 if (!tb_rxsc
[MACSEC_RXSC_ATTR_SCI
])
1949 dev
= get_dev_from_nl(genl_info_net(info
), info
->attrs
);
1952 return PTR_ERR(dev
);
1955 secy
= &macsec_priv(dev
)->secy
;
1956 sci
= nla_get_sci(tb_rxsc
[MACSEC_RXSC_ATTR_SCI
]);
1958 rx_sc
= del_rx_sc(secy
, sci
);
1970 static int macsec_del_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
1972 struct nlattr
**attrs
= info
->attrs
;
1973 struct net_device
*dev
;
1974 struct macsec_secy
*secy
;
1975 struct macsec_tx_sc
*tx_sc
;
1976 struct macsec_tx_sa
*tx_sa
;
1978 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
1980 if (!attrs
[MACSEC_ATTR_IFINDEX
])
1983 if (parse_sa_config(attrs
, tb_sa
))
1987 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
1988 &dev
, &secy
, &tx_sc
, &assoc_num
);
1989 if (IS_ERR(tx_sa
)) {
1991 return PTR_ERR(tx_sa
);
1994 if (tx_sa
->active
) {
1999 RCU_INIT_POINTER(tx_sc
->sa
[assoc_num
], NULL
);
2007 static bool validate_upd_sa(struct nlattr
**attrs
)
2009 if (!attrs
[MACSEC_SA_ATTR_AN
] ||
2010 attrs
[MACSEC_SA_ATTR_KEY
] ||
2011 attrs
[MACSEC_SA_ATTR_KEYID
])
2014 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_AN
]) >= MACSEC_NUM_AN
)
2017 if (attrs
[MACSEC_SA_ATTR_PN
] && nla_get_u32(attrs
[MACSEC_SA_ATTR_PN
]) == 0)
2020 if (attrs
[MACSEC_SA_ATTR_ACTIVE
]) {
2021 if (nla_get_u8(attrs
[MACSEC_SA_ATTR_ACTIVE
]) > 1)
2028 static int macsec_upd_txsa(struct sk_buff
*skb
, struct genl_info
*info
)
2030 struct nlattr
**attrs
= info
->attrs
;
2031 struct net_device
*dev
;
2032 struct macsec_secy
*secy
;
2033 struct macsec_tx_sc
*tx_sc
;
2034 struct macsec_tx_sa
*tx_sa
;
2036 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2038 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2041 if (parse_sa_config(attrs
, tb_sa
))
2044 if (!validate_upd_sa(tb_sa
))
2048 tx_sa
= get_txsa_from_nl(genl_info_net(info
), attrs
, tb_sa
,
2049 &dev
, &secy
, &tx_sc
, &assoc_num
);
2050 if (IS_ERR(tx_sa
)) {
2052 return PTR_ERR(tx_sa
);
2055 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2056 spin_lock_bh(&tx_sa
->lock
);
2057 tx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2058 spin_unlock_bh(&tx_sa
->lock
);
2061 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2062 tx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2064 if (assoc_num
== tx_sc
->encoding_sa
)
2065 secy
->operational
= tx_sa
->active
;
2072 static int macsec_upd_rxsa(struct sk_buff
*skb
, struct genl_info
*info
)
2074 struct nlattr
**attrs
= info
->attrs
;
2075 struct net_device
*dev
;
2076 struct macsec_secy
*secy
;
2077 struct macsec_rx_sc
*rx_sc
;
2078 struct macsec_rx_sa
*rx_sa
;
2080 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2081 struct nlattr
*tb_sa
[MACSEC_SA_ATTR_MAX
+ 1];
2083 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2086 if (parse_rxsc_config(attrs
, tb_rxsc
))
2089 if (parse_sa_config(attrs
, tb_sa
))
2092 if (!validate_upd_sa(tb_sa
))
2096 rx_sa
= get_rxsa_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, tb_sa
,
2097 &dev
, &secy
, &rx_sc
, &assoc_num
);
2098 if (IS_ERR(rx_sa
)) {
2100 return PTR_ERR(rx_sa
);
2103 if (tb_sa
[MACSEC_SA_ATTR_PN
]) {
2104 spin_lock_bh(&rx_sa
->lock
);
2105 rx_sa
->next_pn
= nla_get_u32(tb_sa
[MACSEC_SA_ATTR_PN
]);
2106 spin_unlock_bh(&rx_sa
->lock
);
2109 if (tb_sa
[MACSEC_SA_ATTR_ACTIVE
])
2110 rx_sa
->active
= nla_get_u8(tb_sa
[MACSEC_SA_ATTR_ACTIVE
]);
2116 static int macsec_upd_rxsc(struct sk_buff
*skb
, struct genl_info
*info
)
2118 struct nlattr
**attrs
= info
->attrs
;
2119 struct net_device
*dev
;
2120 struct macsec_secy
*secy
;
2121 struct macsec_rx_sc
*rx_sc
;
2122 struct nlattr
*tb_rxsc
[MACSEC_RXSC_ATTR_MAX
+ 1];
2124 if (!attrs
[MACSEC_ATTR_IFINDEX
])
2127 if (parse_rxsc_config(attrs
, tb_rxsc
))
2130 if (!validate_add_rxsc(tb_rxsc
))
2134 rx_sc
= get_rxsc_from_nl(genl_info_net(info
), attrs
, tb_rxsc
, &dev
, &secy
);
2135 if (IS_ERR(rx_sc
)) {
2137 return PTR_ERR(rx_sc
);
2140 if (tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]) {
2141 bool new = !!nla_get_u8(tb_rxsc
[MACSEC_RXSC_ATTR_ACTIVE
]);
2143 if (rx_sc
->active
!= new)
2144 secy
->n_rx_sc
+= new ? 1 : -1;
2146 rx_sc
->active
= new;
2154 static int copy_tx_sa_stats(struct sk_buff
*skb
,
2155 struct macsec_tx_sa_stats __percpu
*pstats
)
2157 struct macsec_tx_sa_stats sum
= {0, };
2160 for_each_possible_cpu(cpu
) {
2161 const struct macsec_tx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2163 sum
.OutPktsProtected
+= stats
->OutPktsProtected
;
2164 sum
.OutPktsEncrypted
+= stats
->OutPktsEncrypted
;
2167 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED
, sum
.OutPktsProtected
) ||
2168 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED
, sum
.OutPktsEncrypted
))
2174 static noinline_for_stack
int
2175 copy_rx_sa_stats(struct sk_buff
*skb
,
2176 struct macsec_rx_sa_stats __percpu
*pstats
)
2178 struct macsec_rx_sa_stats sum
= {0, };
2181 for_each_possible_cpu(cpu
) {
2182 const struct macsec_rx_sa_stats
*stats
= per_cpu_ptr(pstats
, cpu
);
2184 sum
.InPktsOK
+= stats
->InPktsOK
;
2185 sum
.InPktsInvalid
+= stats
->InPktsInvalid
;
2186 sum
.InPktsNotValid
+= stats
->InPktsNotValid
;
2187 sum
.InPktsNotUsingSA
+= stats
->InPktsNotUsingSA
;
2188 sum
.InPktsUnusedSA
+= stats
->InPktsUnusedSA
;
2191 if (nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_OK
, sum
.InPktsOK
) ||
2192 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID
, sum
.InPktsInvalid
) ||
2193 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID
, sum
.InPktsNotValid
) ||
2194 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA
, sum
.InPktsNotUsingSA
) ||
2195 nla_put_u32(skb
, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA
, sum
.InPktsUnusedSA
))
2201 static noinline_for_stack
int
2202 copy_rx_sc_stats(struct sk_buff
*skb
, struct pcpu_rx_sc_stats __percpu
*pstats
)
2204 struct macsec_rx_sc_stats sum
= {0, };
2207 for_each_possible_cpu(cpu
) {
2208 const struct pcpu_rx_sc_stats
*stats
;
2209 struct macsec_rx_sc_stats tmp
;
2212 stats
= per_cpu_ptr(pstats
, cpu
);
2214 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2215 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2216 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2218 sum
.InOctetsValidated
+= tmp
.InOctetsValidated
;
2219 sum
.InOctetsDecrypted
+= tmp
.InOctetsDecrypted
;
2220 sum
.InPktsUnchecked
+= tmp
.InPktsUnchecked
;
2221 sum
.InPktsDelayed
+= tmp
.InPktsDelayed
;
2222 sum
.InPktsOK
+= tmp
.InPktsOK
;
2223 sum
.InPktsInvalid
+= tmp
.InPktsInvalid
;
2224 sum
.InPktsLate
+= tmp
.InPktsLate
;
2225 sum
.InPktsNotValid
+= tmp
.InPktsNotValid
;
2226 sum
.InPktsNotUsingSA
+= tmp
.InPktsNotUsingSA
;
2227 sum
.InPktsUnusedSA
+= tmp
.InPktsUnusedSA
;
2230 if (nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED
,
2231 sum
.InOctetsValidated
,
2232 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2233 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED
,
2234 sum
.InOctetsDecrypted
,
2235 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2236 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED
,
2237 sum
.InPktsUnchecked
,
2238 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2239 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED
,
2241 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2242 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK
,
2244 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2245 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID
,
2247 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2248 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE
,
2250 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2251 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID
,
2253 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2254 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA
,
2255 sum
.InPktsNotUsingSA
,
2256 MACSEC_RXSC_STATS_ATTR_PAD
) ||
2257 nla_put_u64_64bit(skb
, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA
,
2259 MACSEC_RXSC_STATS_ATTR_PAD
))
2265 static noinline_for_stack
int
2266 copy_tx_sc_stats(struct sk_buff
*skb
, struct pcpu_tx_sc_stats __percpu
*pstats
)
2268 struct macsec_tx_sc_stats sum
= {0, };
2271 for_each_possible_cpu(cpu
) {
2272 const struct pcpu_tx_sc_stats
*stats
;
2273 struct macsec_tx_sc_stats tmp
;
2276 stats
= per_cpu_ptr(pstats
, cpu
);
2278 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2279 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2280 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2282 sum
.OutPktsProtected
+= tmp
.OutPktsProtected
;
2283 sum
.OutPktsEncrypted
+= tmp
.OutPktsEncrypted
;
2284 sum
.OutOctetsProtected
+= tmp
.OutOctetsProtected
;
2285 sum
.OutOctetsEncrypted
+= tmp
.OutOctetsEncrypted
;
2288 if (nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED
,
2289 sum
.OutPktsProtected
,
2290 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2291 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED
,
2292 sum
.OutPktsEncrypted
,
2293 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2294 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED
,
2295 sum
.OutOctetsProtected
,
2296 MACSEC_TXSC_STATS_ATTR_PAD
) ||
2297 nla_put_u64_64bit(skb
, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED
,
2298 sum
.OutOctetsEncrypted
,
2299 MACSEC_TXSC_STATS_ATTR_PAD
))
2305 static noinline_for_stack
int
2306 copy_secy_stats(struct sk_buff
*skb
, struct pcpu_secy_stats __percpu
*pstats
)
2308 struct macsec_dev_stats sum
= {0, };
2311 for_each_possible_cpu(cpu
) {
2312 const struct pcpu_secy_stats
*stats
;
2313 struct macsec_dev_stats tmp
;
2316 stats
= per_cpu_ptr(pstats
, cpu
);
2318 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2319 memcpy(&tmp
, &stats
->stats
, sizeof(tmp
));
2320 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2322 sum
.OutPktsUntagged
+= tmp
.OutPktsUntagged
;
2323 sum
.InPktsUntagged
+= tmp
.InPktsUntagged
;
2324 sum
.OutPktsTooLong
+= tmp
.OutPktsTooLong
;
2325 sum
.InPktsNoTag
+= tmp
.InPktsNoTag
;
2326 sum
.InPktsBadTag
+= tmp
.InPktsBadTag
;
2327 sum
.InPktsUnknownSCI
+= tmp
.InPktsUnknownSCI
;
2328 sum
.InPktsNoSCI
+= tmp
.InPktsNoSCI
;
2329 sum
.InPktsOverrun
+= tmp
.InPktsOverrun
;
2332 if (nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED
,
2333 sum
.OutPktsUntagged
,
2334 MACSEC_SECY_STATS_ATTR_PAD
) ||
2335 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED
,
2337 MACSEC_SECY_STATS_ATTR_PAD
) ||
2338 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG
,
2340 MACSEC_SECY_STATS_ATTR_PAD
) ||
2341 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG
,
2343 MACSEC_SECY_STATS_ATTR_PAD
) ||
2344 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG
,
2346 MACSEC_SECY_STATS_ATTR_PAD
) ||
2347 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI
,
2348 sum
.InPktsUnknownSCI
,
2349 MACSEC_SECY_STATS_ATTR_PAD
) ||
2350 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI
,
2352 MACSEC_SECY_STATS_ATTR_PAD
) ||
2353 nla_put_u64_64bit(skb
, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN
,
2355 MACSEC_SECY_STATS_ATTR_PAD
))
2361 static int nla_put_secy(struct macsec_secy
*secy
, struct sk_buff
*skb
)
2363 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2364 struct nlattr
*secy_nest
= nla_nest_start_noflag(skb
,
2371 switch (secy
->key_len
) {
2372 case MACSEC_GCM_AES_128_SAK_LEN
:
2373 csid
= MACSEC_DEFAULT_CIPHER_ID
;
2375 case MACSEC_GCM_AES_256_SAK_LEN
:
2376 csid
= MACSEC_CIPHER_ID_GCM_AES_256
;
2382 if (nla_put_sci(skb
, MACSEC_SECY_ATTR_SCI
, secy
->sci
,
2383 MACSEC_SECY_ATTR_PAD
) ||
2384 nla_put_u64_64bit(skb
, MACSEC_SECY_ATTR_CIPHER_SUITE
,
2385 csid
, MACSEC_SECY_ATTR_PAD
) ||
2386 nla_put_u8(skb
, MACSEC_SECY_ATTR_ICV_LEN
, secy
->icv_len
) ||
2387 nla_put_u8(skb
, MACSEC_SECY_ATTR_OPER
, secy
->operational
) ||
2388 nla_put_u8(skb
, MACSEC_SECY_ATTR_PROTECT
, secy
->protect_frames
) ||
2389 nla_put_u8(skb
, MACSEC_SECY_ATTR_REPLAY
, secy
->replay_protect
) ||
2390 nla_put_u8(skb
, MACSEC_SECY_ATTR_VALIDATE
, secy
->validate_frames
) ||
2391 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCRYPT
, tx_sc
->encrypt
) ||
2392 nla_put_u8(skb
, MACSEC_SECY_ATTR_INC_SCI
, tx_sc
->send_sci
) ||
2393 nla_put_u8(skb
, MACSEC_SECY_ATTR_ES
, tx_sc
->end_station
) ||
2394 nla_put_u8(skb
, MACSEC_SECY_ATTR_SCB
, tx_sc
->scb
) ||
2395 nla_put_u8(skb
, MACSEC_SECY_ATTR_ENCODING_SA
, tx_sc
->encoding_sa
))
2398 if (secy
->replay_protect
) {
2399 if (nla_put_u32(skb
, MACSEC_SECY_ATTR_WINDOW
, secy
->replay_window
))
2403 nla_nest_end(skb
, secy_nest
);
2407 nla_nest_cancel(skb
, secy_nest
);
2411 static noinline_for_stack
int
2412 dump_secy(struct macsec_secy
*secy
, struct net_device
*dev
,
2413 struct sk_buff
*skb
, struct netlink_callback
*cb
)
2415 struct macsec_rx_sc
*rx_sc
;
2416 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
2417 struct nlattr
*txsa_list
, *rxsc_list
;
2420 struct nlattr
*attr
;
2422 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
2423 &macsec_fam
, NLM_F_MULTI
, MACSEC_CMD_GET_TXSC
);
2427 genl_dump_check_consistent(cb
, hdr
);
2429 if (nla_put_u32(skb
, MACSEC_ATTR_IFINDEX
, dev
->ifindex
))
2430 goto nla_put_failure
;
2432 if (nla_put_secy(secy
, skb
))
2433 goto nla_put_failure
;
2435 attr
= nla_nest_start_noflag(skb
, MACSEC_ATTR_TXSC_STATS
);
2437 goto nla_put_failure
;
2438 if (copy_tx_sc_stats(skb
, tx_sc
->stats
)) {
2439 nla_nest_cancel(skb
, attr
);
2440 goto nla_put_failure
;
2442 nla_nest_end(skb
, attr
);
2444 attr
= nla_nest_start_noflag(skb
, MACSEC_ATTR_SECY_STATS
);
2446 goto nla_put_failure
;
2447 if (copy_secy_stats(skb
, macsec_priv(dev
)->stats
)) {
2448 nla_nest_cancel(skb
, attr
);
2449 goto nla_put_failure
;
2451 nla_nest_end(skb
, attr
);
2453 txsa_list
= nla_nest_start_noflag(skb
, MACSEC_ATTR_TXSA_LIST
);
2455 goto nla_put_failure
;
2456 for (i
= 0, j
= 1; i
< MACSEC_NUM_AN
; i
++) {
2457 struct macsec_tx_sa
*tx_sa
= rtnl_dereference(tx_sc
->sa
[i
]);
2458 struct nlattr
*txsa_nest
;
2463 txsa_nest
= nla_nest_start_noflag(skb
, j
++);
2465 nla_nest_cancel(skb
, txsa_list
);
2466 goto nla_put_failure
;
2469 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2470 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, tx_sa
->next_pn
) ||
2471 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, tx_sa
->key
.id
) ||
2472 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, tx_sa
->active
)) {
2473 nla_nest_cancel(skb
, txsa_nest
);
2474 nla_nest_cancel(skb
, txsa_list
);
2475 goto nla_put_failure
;
2478 attr
= nla_nest_start_noflag(skb
, MACSEC_SA_ATTR_STATS
);
2480 nla_nest_cancel(skb
, txsa_nest
);
2481 nla_nest_cancel(skb
, txsa_list
);
2482 goto nla_put_failure
;
2484 if (copy_tx_sa_stats(skb
, tx_sa
->stats
)) {
2485 nla_nest_cancel(skb
, attr
);
2486 nla_nest_cancel(skb
, txsa_nest
);
2487 nla_nest_cancel(skb
, txsa_list
);
2488 goto nla_put_failure
;
2490 nla_nest_end(skb
, attr
);
2492 nla_nest_end(skb
, txsa_nest
);
2494 nla_nest_end(skb
, txsa_list
);
2496 rxsc_list
= nla_nest_start_noflag(skb
, MACSEC_ATTR_RXSC_LIST
);
2498 goto nla_put_failure
;
2501 for_each_rxsc_rtnl(secy
, rx_sc
) {
2503 struct nlattr
*rxsa_list
;
2504 struct nlattr
*rxsc_nest
= nla_nest_start_noflag(skb
, j
++);
2507 nla_nest_cancel(skb
, rxsc_list
);
2508 goto nla_put_failure
;
2511 if (nla_put_u8(skb
, MACSEC_RXSC_ATTR_ACTIVE
, rx_sc
->active
) ||
2512 nla_put_sci(skb
, MACSEC_RXSC_ATTR_SCI
, rx_sc
->sci
,
2513 MACSEC_RXSC_ATTR_PAD
)) {
2514 nla_nest_cancel(skb
, rxsc_nest
);
2515 nla_nest_cancel(skb
, rxsc_list
);
2516 goto nla_put_failure
;
2519 attr
= nla_nest_start_noflag(skb
, MACSEC_RXSC_ATTR_STATS
);
2521 nla_nest_cancel(skb
, rxsc_nest
);
2522 nla_nest_cancel(skb
, rxsc_list
);
2523 goto nla_put_failure
;
2525 if (copy_rx_sc_stats(skb
, rx_sc
->stats
)) {
2526 nla_nest_cancel(skb
, attr
);
2527 nla_nest_cancel(skb
, rxsc_nest
);
2528 nla_nest_cancel(skb
, rxsc_list
);
2529 goto nla_put_failure
;
2531 nla_nest_end(skb
, attr
);
2533 rxsa_list
= nla_nest_start_noflag(skb
,
2534 MACSEC_RXSC_ATTR_SA_LIST
);
2536 nla_nest_cancel(skb
, rxsc_nest
);
2537 nla_nest_cancel(skb
, rxsc_list
);
2538 goto nla_put_failure
;
2541 for (i
= 0, k
= 1; i
< MACSEC_NUM_AN
; i
++) {
2542 struct macsec_rx_sa
*rx_sa
= rtnl_dereference(rx_sc
->sa
[i
]);
2543 struct nlattr
*rxsa_nest
;
2548 rxsa_nest
= nla_nest_start_noflag(skb
, k
++);
2550 nla_nest_cancel(skb
, rxsa_list
);
2551 nla_nest_cancel(skb
, rxsc_nest
);
2552 nla_nest_cancel(skb
, rxsc_list
);
2553 goto nla_put_failure
;
2556 attr
= nla_nest_start_noflag(skb
,
2557 MACSEC_SA_ATTR_STATS
);
2559 nla_nest_cancel(skb
, rxsa_list
);
2560 nla_nest_cancel(skb
, rxsc_nest
);
2561 nla_nest_cancel(skb
, rxsc_list
);
2562 goto nla_put_failure
;
2564 if (copy_rx_sa_stats(skb
, rx_sa
->stats
)) {
2565 nla_nest_cancel(skb
, attr
);
2566 nla_nest_cancel(skb
, rxsa_list
);
2567 nla_nest_cancel(skb
, rxsc_nest
);
2568 nla_nest_cancel(skb
, rxsc_list
);
2569 goto nla_put_failure
;
2571 nla_nest_end(skb
, attr
);
2573 if (nla_put_u8(skb
, MACSEC_SA_ATTR_AN
, i
) ||
2574 nla_put_u32(skb
, MACSEC_SA_ATTR_PN
, rx_sa
->next_pn
) ||
2575 nla_put(skb
, MACSEC_SA_ATTR_KEYID
, MACSEC_KEYID_LEN
, rx_sa
->key
.id
) ||
2576 nla_put_u8(skb
, MACSEC_SA_ATTR_ACTIVE
, rx_sa
->active
)) {
2577 nla_nest_cancel(skb
, rxsa_nest
);
2578 nla_nest_cancel(skb
, rxsc_nest
);
2579 nla_nest_cancel(skb
, rxsc_list
);
2580 goto nla_put_failure
;
2582 nla_nest_end(skb
, rxsa_nest
);
2585 nla_nest_end(skb
, rxsa_list
);
2586 nla_nest_end(skb
, rxsc_nest
);
2589 nla_nest_end(skb
, rxsc_list
);
2591 genlmsg_end(skb
, hdr
);
2596 genlmsg_cancel(skb
, hdr
);
2600 static int macsec_generation
= 1; /* protected by RTNL */
2602 static int macsec_dump_txsc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2604 struct net
*net
= sock_net(skb
->sk
);
2605 struct net_device
*dev
;
2608 dev_idx
= cb
->args
[0];
2613 cb
->seq
= macsec_generation
;
2615 for_each_netdev(net
, dev
) {
2616 struct macsec_secy
*secy
;
2621 if (!netif_is_macsec(dev
))
2624 secy
= &macsec_priv(dev
)->secy
;
2625 if (dump_secy(secy
, dev
, skb
, cb
) < 0)
2637 static const struct genl_ops macsec_genl_ops
[] = {
2639 .cmd
= MACSEC_CMD_GET_TXSC
,
2640 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2641 .dumpit
= macsec_dump_txsc
,
2644 .cmd
= MACSEC_CMD_ADD_RXSC
,
2645 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2646 .doit
= macsec_add_rxsc
,
2647 .flags
= GENL_ADMIN_PERM
,
2650 .cmd
= MACSEC_CMD_DEL_RXSC
,
2651 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2652 .doit
= macsec_del_rxsc
,
2653 .flags
= GENL_ADMIN_PERM
,
2656 .cmd
= MACSEC_CMD_UPD_RXSC
,
2657 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2658 .doit
= macsec_upd_rxsc
,
2659 .flags
= GENL_ADMIN_PERM
,
2662 .cmd
= MACSEC_CMD_ADD_TXSA
,
2663 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2664 .doit
= macsec_add_txsa
,
2665 .flags
= GENL_ADMIN_PERM
,
2668 .cmd
= MACSEC_CMD_DEL_TXSA
,
2669 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2670 .doit
= macsec_del_txsa
,
2671 .flags
= GENL_ADMIN_PERM
,
2674 .cmd
= MACSEC_CMD_UPD_TXSA
,
2675 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2676 .doit
= macsec_upd_txsa
,
2677 .flags
= GENL_ADMIN_PERM
,
2680 .cmd
= MACSEC_CMD_ADD_RXSA
,
2681 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2682 .doit
= macsec_add_rxsa
,
2683 .flags
= GENL_ADMIN_PERM
,
2686 .cmd
= MACSEC_CMD_DEL_RXSA
,
2687 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2688 .doit
= macsec_del_rxsa
,
2689 .flags
= GENL_ADMIN_PERM
,
2692 .cmd
= MACSEC_CMD_UPD_RXSA
,
2693 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2694 .doit
= macsec_upd_rxsa
,
2695 .flags
= GENL_ADMIN_PERM
,
2699 static struct genl_family macsec_fam __ro_after_init
= {
2700 .name
= MACSEC_GENL_NAME
,
2702 .version
= MACSEC_GENL_VERSION
,
2703 .maxattr
= MACSEC_ATTR_MAX
,
2704 .policy
= macsec_genl_policy
,
2706 .module
= THIS_MODULE
,
2707 .ops
= macsec_genl_ops
,
2708 .n_ops
= ARRAY_SIZE(macsec_genl_ops
),
2711 static netdev_tx_t
macsec_start_xmit(struct sk_buff
*skb
,
2712 struct net_device
*dev
)
2714 struct macsec_dev
*macsec
= netdev_priv(dev
);
2715 struct macsec_secy
*secy
= &macsec
->secy
;
2716 struct pcpu_secy_stats
*secy_stats
;
2720 if (!secy
->protect_frames
) {
2721 secy_stats
= this_cpu_ptr(macsec
->stats
);
2722 u64_stats_update_begin(&secy_stats
->syncp
);
2723 secy_stats
->stats
.OutPktsUntagged
++;
2724 u64_stats_update_end(&secy_stats
->syncp
);
2725 skb
->dev
= macsec
->real_dev
;
2727 ret
= dev_queue_xmit(skb
);
2728 count_tx(dev
, ret
, len
);
2732 if (!secy
->operational
) {
2734 dev
->stats
.tx_dropped
++;
2735 return NETDEV_TX_OK
;
2738 skb
= macsec_encrypt(skb
, dev
);
2740 if (PTR_ERR(skb
) != -EINPROGRESS
)
2741 dev
->stats
.tx_dropped
++;
2742 return NETDEV_TX_OK
;
2745 macsec_count_tx(skb
, &macsec
->secy
.tx_sc
, macsec_skb_cb(skb
)->tx_sa
);
2747 macsec_encrypt_finish(skb
, dev
);
2749 ret
= dev_queue_xmit(skb
);
2750 count_tx(dev
, ret
, len
);
2754 #define MACSEC_FEATURES \
2755 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2756 static struct lock_class_key macsec_netdev_addr_lock_key
;
2758 static int macsec_dev_init(struct net_device
*dev
)
2760 struct macsec_dev
*macsec
= macsec_priv(dev
);
2761 struct net_device
*real_dev
= macsec
->real_dev
;
2764 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
2768 err
= gro_cells_init(&macsec
->gro_cells
, dev
);
2770 free_percpu(dev
->tstats
);
2774 dev
->features
= real_dev
->features
& MACSEC_FEATURES
;
2775 dev
->features
|= NETIF_F_LLTX
| NETIF_F_GSO_SOFTWARE
;
2777 dev
->needed_headroom
= real_dev
->needed_headroom
+
2778 MACSEC_NEEDED_HEADROOM
;
2779 dev
->needed_tailroom
= real_dev
->needed_tailroom
+
2780 MACSEC_NEEDED_TAILROOM
;
2782 if (is_zero_ether_addr(dev
->dev_addr
))
2783 eth_hw_addr_inherit(dev
, real_dev
);
2784 if (is_zero_ether_addr(dev
->broadcast
))
2785 memcpy(dev
->broadcast
, real_dev
->broadcast
, dev
->addr_len
);
2790 static void macsec_dev_uninit(struct net_device
*dev
)
2792 struct macsec_dev
*macsec
= macsec_priv(dev
);
2794 gro_cells_destroy(&macsec
->gro_cells
);
2795 free_percpu(dev
->tstats
);
2798 static netdev_features_t
macsec_fix_features(struct net_device
*dev
,
2799 netdev_features_t features
)
2801 struct macsec_dev
*macsec
= macsec_priv(dev
);
2802 struct net_device
*real_dev
= macsec
->real_dev
;
2804 features
&= (real_dev
->features
& MACSEC_FEATURES
) |
2805 NETIF_F_GSO_SOFTWARE
| NETIF_F_SOFT_FEATURES
;
2806 features
|= NETIF_F_LLTX
;
2811 static int macsec_dev_open(struct net_device
*dev
)
2813 struct macsec_dev
*macsec
= macsec_priv(dev
);
2814 struct net_device
*real_dev
= macsec
->real_dev
;
2817 err
= dev_uc_add(real_dev
, dev
->dev_addr
);
2821 if (dev
->flags
& IFF_ALLMULTI
) {
2822 err
= dev_set_allmulti(real_dev
, 1);
2827 if (dev
->flags
& IFF_PROMISC
) {
2828 err
= dev_set_promiscuity(real_dev
, 1);
2830 goto clear_allmulti
;
2833 if (netif_carrier_ok(real_dev
))
2834 netif_carrier_on(dev
);
2838 if (dev
->flags
& IFF_ALLMULTI
)
2839 dev_set_allmulti(real_dev
, -1);
2841 dev_uc_del(real_dev
, dev
->dev_addr
);
2842 netif_carrier_off(dev
);
2846 static int macsec_dev_stop(struct net_device
*dev
)
2848 struct macsec_dev
*macsec
= macsec_priv(dev
);
2849 struct net_device
*real_dev
= macsec
->real_dev
;
2851 netif_carrier_off(dev
);
2853 dev_mc_unsync(real_dev
, dev
);
2854 dev_uc_unsync(real_dev
, dev
);
2856 if (dev
->flags
& IFF_ALLMULTI
)
2857 dev_set_allmulti(real_dev
, -1);
2859 if (dev
->flags
& IFF_PROMISC
)
2860 dev_set_promiscuity(real_dev
, -1);
2862 dev_uc_del(real_dev
, dev
->dev_addr
);
2867 static void macsec_dev_change_rx_flags(struct net_device
*dev
, int change
)
2869 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
2871 if (!(dev
->flags
& IFF_UP
))
2874 if (change
& IFF_ALLMULTI
)
2875 dev_set_allmulti(real_dev
, dev
->flags
& IFF_ALLMULTI
? 1 : -1);
2877 if (change
& IFF_PROMISC
)
2878 dev_set_promiscuity(real_dev
,
2879 dev
->flags
& IFF_PROMISC
? 1 : -1);
2882 static void macsec_dev_set_rx_mode(struct net_device
*dev
)
2884 struct net_device
*real_dev
= macsec_priv(dev
)->real_dev
;
2886 dev_mc_sync(real_dev
, dev
);
2887 dev_uc_sync(real_dev
, dev
);
2890 static int macsec_set_mac_address(struct net_device
*dev
, void *p
)
2892 struct macsec_dev
*macsec
= macsec_priv(dev
);
2893 struct net_device
*real_dev
= macsec
->real_dev
;
2894 struct sockaddr
*addr
= p
;
2897 if (!is_valid_ether_addr(addr
->sa_data
))
2898 return -EADDRNOTAVAIL
;
2900 if (!(dev
->flags
& IFF_UP
))
2903 err
= dev_uc_add(real_dev
, addr
->sa_data
);
2907 dev_uc_del(real_dev
, dev
->dev_addr
);
2910 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
2914 static int macsec_change_mtu(struct net_device
*dev
, int new_mtu
)
2916 struct macsec_dev
*macsec
= macsec_priv(dev
);
2917 unsigned int extra
= macsec
->secy
.icv_len
+ macsec_extra_len(true);
2919 if (macsec
->real_dev
->mtu
- extra
< new_mtu
)
2927 static void macsec_get_stats64(struct net_device
*dev
,
2928 struct rtnl_link_stats64
*s
)
2935 for_each_possible_cpu(cpu
) {
2936 struct pcpu_sw_netstats
*stats
;
2937 struct pcpu_sw_netstats tmp
;
2940 stats
= per_cpu_ptr(dev
->tstats
, cpu
);
2942 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
2943 tmp
.rx_packets
= stats
->rx_packets
;
2944 tmp
.rx_bytes
= stats
->rx_bytes
;
2945 tmp
.tx_packets
= stats
->tx_packets
;
2946 tmp
.tx_bytes
= stats
->tx_bytes
;
2947 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
2949 s
->rx_packets
+= tmp
.rx_packets
;
2950 s
->rx_bytes
+= tmp
.rx_bytes
;
2951 s
->tx_packets
+= tmp
.tx_packets
;
2952 s
->tx_bytes
+= tmp
.tx_bytes
;
2955 s
->rx_dropped
= dev
->stats
.rx_dropped
;
2956 s
->tx_dropped
= dev
->stats
.tx_dropped
;
2959 static int macsec_get_iflink(const struct net_device
*dev
)
2961 return macsec_priv(dev
)->real_dev
->ifindex
;
2964 static int macsec_get_nest_level(struct net_device
*dev
)
2966 return macsec_priv(dev
)->nest_level
;
2969 static const struct net_device_ops macsec_netdev_ops
= {
2970 .ndo_init
= macsec_dev_init
,
2971 .ndo_uninit
= macsec_dev_uninit
,
2972 .ndo_open
= macsec_dev_open
,
2973 .ndo_stop
= macsec_dev_stop
,
2974 .ndo_fix_features
= macsec_fix_features
,
2975 .ndo_change_mtu
= macsec_change_mtu
,
2976 .ndo_set_rx_mode
= macsec_dev_set_rx_mode
,
2977 .ndo_change_rx_flags
= macsec_dev_change_rx_flags
,
2978 .ndo_set_mac_address
= macsec_set_mac_address
,
2979 .ndo_start_xmit
= macsec_start_xmit
,
2980 .ndo_get_stats64
= macsec_get_stats64
,
2981 .ndo_get_iflink
= macsec_get_iflink
,
2982 .ndo_get_lock_subclass
= macsec_get_nest_level
,
2985 static const struct device_type macsec_type
= {
2989 static const struct nla_policy macsec_rtnl_policy
[IFLA_MACSEC_MAX
+ 1] = {
2990 [IFLA_MACSEC_SCI
] = { .type
= NLA_U64
},
2991 [IFLA_MACSEC_ICV_LEN
] = { .type
= NLA_U8
},
2992 [IFLA_MACSEC_CIPHER_SUITE
] = { .type
= NLA_U64
},
2993 [IFLA_MACSEC_WINDOW
] = { .type
= NLA_U32
},
2994 [IFLA_MACSEC_ENCODING_SA
] = { .type
= NLA_U8
},
2995 [IFLA_MACSEC_ENCRYPT
] = { .type
= NLA_U8
},
2996 [IFLA_MACSEC_PROTECT
] = { .type
= NLA_U8
},
2997 [IFLA_MACSEC_INC_SCI
] = { .type
= NLA_U8
},
2998 [IFLA_MACSEC_ES
] = { .type
= NLA_U8
},
2999 [IFLA_MACSEC_SCB
] = { .type
= NLA_U8
},
3000 [IFLA_MACSEC_REPLAY_PROTECT
] = { .type
= NLA_U8
},
3001 [IFLA_MACSEC_VALIDATION
] = { .type
= NLA_U8
},
3004 static void macsec_free_netdev(struct net_device
*dev
)
3006 struct macsec_dev
*macsec
= macsec_priv(dev
);
3007 struct net_device
*real_dev
= macsec
->real_dev
;
3009 free_percpu(macsec
->stats
);
3010 free_percpu(macsec
->secy
.tx_sc
.stats
);
3015 static void macsec_setup(struct net_device
*dev
)
3019 dev
->max_mtu
= ETH_MAX_MTU
;
3020 dev
->priv_flags
|= IFF_NO_QUEUE
;
3021 dev
->netdev_ops
= &macsec_netdev_ops
;
3022 dev
->needs_free_netdev
= true;
3023 dev
->priv_destructor
= macsec_free_netdev
;
3024 SET_NETDEV_DEVTYPE(dev
, &macsec_type
);
3026 eth_zero_addr(dev
->broadcast
);
3029 static int macsec_changelink_common(struct net_device
*dev
,
3030 struct nlattr
*data
[])
3032 struct macsec_secy
*secy
;
3033 struct macsec_tx_sc
*tx_sc
;
3035 secy
= &macsec_priv(dev
)->secy
;
3036 tx_sc
= &secy
->tx_sc
;
3038 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3039 struct macsec_tx_sa
*tx_sa
;
3041 tx_sc
->encoding_sa
= nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]);
3042 tx_sa
= rtnl_dereference(tx_sc
->sa
[tx_sc
->encoding_sa
]);
3044 secy
->operational
= tx_sa
&& tx_sa
->active
;
3047 if (data
[IFLA_MACSEC_WINDOW
])
3048 secy
->replay_window
= nla_get_u32(data
[IFLA_MACSEC_WINDOW
]);
3050 if (data
[IFLA_MACSEC_ENCRYPT
])
3051 tx_sc
->encrypt
= !!nla_get_u8(data
[IFLA_MACSEC_ENCRYPT
]);
3053 if (data
[IFLA_MACSEC_PROTECT
])
3054 secy
->protect_frames
= !!nla_get_u8(data
[IFLA_MACSEC_PROTECT
]);
3056 if (data
[IFLA_MACSEC_INC_SCI
])
3057 tx_sc
->send_sci
= !!nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]);
3059 if (data
[IFLA_MACSEC_ES
])
3060 tx_sc
->end_station
= !!nla_get_u8(data
[IFLA_MACSEC_ES
]);
3062 if (data
[IFLA_MACSEC_SCB
])
3063 tx_sc
->scb
= !!nla_get_u8(data
[IFLA_MACSEC_SCB
]);
3065 if (data
[IFLA_MACSEC_REPLAY_PROTECT
])
3066 secy
->replay_protect
= !!nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
]);
3068 if (data
[IFLA_MACSEC_VALIDATION
])
3069 secy
->validate_frames
= nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]);
3071 if (data
[IFLA_MACSEC_CIPHER_SUITE
]) {
3072 switch (nla_get_u64(data
[IFLA_MACSEC_CIPHER_SUITE
])) {
3073 case MACSEC_CIPHER_ID_GCM_AES_128
:
3074 case MACSEC_DEFAULT_CIPHER_ID
:
3075 secy
->key_len
= MACSEC_GCM_AES_128_SAK_LEN
;
3077 case MACSEC_CIPHER_ID_GCM_AES_256
:
3078 secy
->key_len
= MACSEC_GCM_AES_256_SAK_LEN
;
3088 static int macsec_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
3089 struct nlattr
*data
[],
3090 struct netlink_ext_ack
*extack
)
3095 if (data
[IFLA_MACSEC_CIPHER_SUITE
] ||
3096 data
[IFLA_MACSEC_ICV_LEN
] ||
3097 data
[IFLA_MACSEC_SCI
] ||
3098 data
[IFLA_MACSEC_PORT
])
3101 return macsec_changelink_common(dev
, data
);
3104 static void macsec_del_dev(struct macsec_dev
*macsec
)
3108 while (macsec
->secy
.rx_sc
) {
3109 struct macsec_rx_sc
*rx_sc
= rtnl_dereference(macsec
->secy
.rx_sc
);
3111 rcu_assign_pointer(macsec
->secy
.rx_sc
, rx_sc
->next
);
3115 for (i
= 0; i
< MACSEC_NUM_AN
; i
++) {
3116 struct macsec_tx_sa
*sa
= rtnl_dereference(macsec
->secy
.tx_sc
.sa
[i
]);
3119 RCU_INIT_POINTER(macsec
->secy
.tx_sc
.sa
[i
], NULL
);
3125 static void macsec_common_dellink(struct net_device
*dev
, struct list_head
*head
)
3127 struct macsec_dev
*macsec
= macsec_priv(dev
);
3128 struct net_device
*real_dev
= macsec
->real_dev
;
3130 unregister_netdevice_queue(dev
, head
);
3131 list_del_rcu(&macsec
->secys
);
3132 macsec_del_dev(macsec
);
3133 netdev_upper_dev_unlink(real_dev
, dev
);
3135 macsec_generation
++;
3138 static void macsec_dellink(struct net_device
*dev
, struct list_head
*head
)
3140 struct macsec_dev
*macsec
= macsec_priv(dev
);
3141 struct net_device
*real_dev
= macsec
->real_dev
;
3142 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3144 macsec_common_dellink(dev
, head
);
3146 if (list_empty(&rxd
->secys
)) {
3147 netdev_rx_handler_unregister(real_dev
);
3152 static int register_macsec_dev(struct net_device
*real_dev
,
3153 struct net_device
*dev
)
3155 struct macsec_dev
*macsec
= macsec_priv(dev
);
3156 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(real_dev
);
3161 rxd
= kmalloc(sizeof(*rxd
), GFP_KERNEL
);
3165 INIT_LIST_HEAD(&rxd
->secys
);
3167 err
= netdev_rx_handler_register(real_dev
, macsec_handle_frame
,
3175 list_add_tail_rcu(&macsec
->secys
, &rxd
->secys
);
3179 static bool sci_exists(struct net_device
*dev
, sci_t sci
)
3181 struct macsec_rxh_data
*rxd
= macsec_data_rtnl(dev
);
3182 struct macsec_dev
*macsec
;
3184 list_for_each_entry(macsec
, &rxd
->secys
, secys
) {
3185 if (macsec
->secy
.sci
== sci
)
3192 static sci_t
dev_to_sci(struct net_device
*dev
, __be16 port
)
3194 return make_sci(dev
->dev_addr
, port
);
3197 static int macsec_add_dev(struct net_device
*dev
, sci_t sci
, u8 icv_len
)
3199 struct macsec_dev
*macsec
= macsec_priv(dev
);
3200 struct macsec_secy
*secy
= &macsec
->secy
;
3202 macsec
->stats
= netdev_alloc_pcpu_stats(struct pcpu_secy_stats
);
3206 secy
->tx_sc
.stats
= netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats
);
3207 if (!secy
->tx_sc
.stats
) {
3208 free_percpu(macsec
->stats
);
3212 if (sci
== MACSEC_UNDEF_SCI
)
3213 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3216 secy
->operational
= true;
3217 secy
->key_len
= DEFAULT_SAK_LEN
;
3218 secy
->icv_len
= icv_len
;
3219 secy
->validate_frames
= MACSEC_VALIDATE_DEFAULT
;
3220 secy
->protect_frames
= true;
3221 secy
->replay_protect
= false;
3224 secy
->tx_sc
.active
= true;
3225 secy
->tx_sc
.encoding_sa
= DEFAULT_ENCODING_SA
;
3226 secy
->tx_sc
.encrypt
= DEFAULT_ENCRYPT
;
3227 secy
->tx_sc
.send_sci
= DEFAULT_SEND_SCI
;
3228 secy
->tx_sc
.end_station
= false;
3229 secy
->tx_sc
.scb
= false;
3234 static int macsec_newlink(struct net
*net
, struct net_device
*dev
,
3235 struct nlattr
*tb
[], struct nlattr
*data
[],
3236 struct netlink_ext_ack
*extack
)
3238 struct macsec_dev
*macsec
= macsec_priv(dev
);
3239 struct net_device
*real_dev
;
3242 u8 icv_len
= DEFAULT_ICV_LEN
;
3243 rx_handler_func_t
*rx_handler
;
3247 real_dev
= __dev_get_by_index(net
, nla_get_u32(tb
[IFLA_LINK
]));
3251 dev
->priv_flags
|= IFF_MACSEC
;
3253 macsec
->real_dev
= real_dev
;
3255 if (data
&& data
[IFLA_MACSEC_ICV_LEN
])
3256 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3257 dev
->mtu
= real_dev
->mtu
- icv_len
- macsec_extra_len(true);
3259 rx_handler
= rtnl_dereference(real_dev
->rx_handler
);
3260 if (rx_handler
&& rx_handler
!= macsec_handle_frame
)
3263 err
= register_netdevice(dev
);
3269 macsec
->nest_level
= dev_get_nest_level(real_dev
) + 1;
3270 netdev_lockdep_set_classes(dev
);
3271 lockdep_set_class_and_subclass(&dev
->addr_list_lock
,
3272 &macsec_netdev_addr_lock_key
,
3273 macsec_get_nest_level(dev
));
3275 err
= netdev_upper_dev_link(real_dev
, dev
, extack
);
3279 /* need to be already registered so that ->init has run and
3280 * the MAC addr is set
3282 if (data
&& data
[IFLA_MACSEC_SCI
])
3283 sci
= nla_get_sci(data
[IFLA_MACSEC_SCI
]);
3284 else if (data
&& data
[IFLA_MACSEC_PORT
])
3285 sci
= dev_to_sci(dev
, nla_get_be16(data
[IFLA_MACSEC_PORT
]));
3287 sci
= dev_to_sci(dev
, MACSEC_PORT_ES
);
3289 if (rx_handler
&& sci_exists(real_dev
, sci
)) {
3294 err
= macsec_add_dev(dev
, sci
, icv_len
);
3299 err
= macsec_changelink_common(dev
, data
);
3304 err
= register_macsec_dev(real_dev
, dev
);
3308 netif_stacked_transfer_operstate(real_dev
, dev
);
3309 linkwatch_fire_event(dev
);
3311 macsec_generation
++;
3316 macsec_del_dev(macsec
);
3318 netdev_upper_dev_unlink(real_dev
, dev
);
3320 unregister_netdevice(dev
);
3324 static int macsec_validate_attr(struct nlattr
*tb
[], struct nlattr
*data
[],
3325 struct netlink_ext_ack
*extack
)
3327 u64 csid
= MACSEC_DEFAULT_CIPHER_ID
;
3328 u8 icv_len
= DEFAULT_ICV_LEN
;
3335 if (data
[IFLA_MACSEC_CIPHER_SUITE
])
3336 csid
= nla_get_u64(data
[IFLA_MACSEC_CIPHER_SUITE
]);
3338 if (data
[IFLA_MACSEC_ICV_LEN
]) {
3339 icv_len
= nla_get_u8(data
[IFLA_MACSEC_ICV_LEN
]);
3340 if (icv_len
!= DEFAULT_ICV_LEN
) {
3341 char dummy_key
[DEFAULT_SAK_LEN
] = { 0 };
3342 struct crypto_aead
*dummy_tfm
;
3344 dummy_tfm
= macsec_alloc_tfm(dummy_key
,
3347 if (IS_ERR(dummy_tfm
))
3348 return PTR_ERR(dummy_tfm
);
3349 crypto_free_aead(dummy_tfm
);
3354 case MACSEC_CIPHER_ID_GCM_AES_128
:
3355 case MACSEC_CIPHER_ID_GCM_AES_256
:
3356 case MACSEC_DEFAULT_CIPHER_ID
:
3357 if (icv_len
< MACSEC_MIN_ICV_LEN
||
3358 icv_len
> MACSEC_STD_ICV_LEN
)
3365 if (data
[IFLA_MACSEC_ENCODING_SA
]) {
3366 if (nla_get_u8(data
[IFLA_MACSEC_ENCODING_SA
]) >= MACSEC_NUM_AN
)
3370 for (flag
= IFLA_MACSEC_ENCODING_SA
+ 1;
3371 flag
< IFLA_MACSEC_VALIDATION
;
3374 if (nla_get_u8(data
[flag
]) > 1)
3379 es
= data
[IFLA_MACSEC_ES
] ? nla_get_u8(data
[IFLA_MACSEC_ES
]) : false;
3380 sci
= data
[IFLA_MACSEC_INC_SCI
] ? nla_get_u8(data
[IFLA_MACSEC_INC_SCI
]) : false;
3381 scb
= data
[IFLA_MACSEC_SCB
] ? nla_get_u8(data
[IFLA_MACSEC_SCB
]) : false;
3383 if ((sci
&& (scb
|| es
)) || (scb
&& es
))
3386 if (data
[IFLA_MACSEC_VALIDATION
] &&
3387 nla_get_u8(data
[IFLA_MACSEC_VALIDATION
]) > MACSEC_VALIDATE_MAX
)
3390 if ((data
[IFLA_MACSEC_REPLAY_PROTECT
] &&
3391 nla_get_u8(data
[IFLA_MACSEC_REPLAY_PROTECT
])) &&
3392 !data
[IFLA_MACSEC_WINDOW
])
3398 static struct net
*macsec_get_link_net(const struct net_device
*dev
)
3400 return dev_net(macsec_priv(dev
)->real_dev
);
3403 static size_t macsec_get_size(const struct net_device
*dev
)
3405 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
3406 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
3407 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
3408 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
3409 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
3410 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
3411 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
3412 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
3413 nla_total_size(1) + /* IFLA_MACSEC_ES */
3414 nla_total_size(1) + /* IFLA_MACSEC_SCB */
3415 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
3416 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
3420 static int macsec_fill_info(struct sk_buff
*skb
,
3421 const struct net_device
*dev
)
3423 struct macsec_secy
*secy
= &macsec_priv(dev
)->secy
;
3424 struct macsec_tx_sc
*tx_sc
= &secy
->tx_sc
;
3427 switch (secy
->key_len
) {
3428 case MACSEC_GCM_AES_128_SAK_LEN
:
3429 csid
= MACSEC_DEFAULT_CIPHER_ID
;
3431 case MACSEC_GCM_AES_256_SAK_LEN
:
3432 csid
= MACSEC_CIPHER_ID_GCM_AES_256
;
3435 goto nla_put_failure
;
3438 if (nla_put_sci(skb
, IFLA_MACSEC_SCI
, secy
->sci
,
3440 nla_put_u8(skb
, IFLA_MACSEC_ICV_LEN
, secy
->icv_len
) ||
3441 nla_put_u64_64bit(skb
, IFLA_MACSEC_CIPHER_SUITE
,
3442 csid
, IFLA_MACSEC_PAD
) ||
3443 nla_put_u8(skb
, IFLA_MACSEC_ENCODING_SA
, tx_sc
->encoding_sa
) ||
3444 nla_put_u8(skb
, IFLA_MACSEC_ENCRYPT
, tx_sc
->encrypt
) ||
3445 nla_put_u8(skb
, IFLA_MACSEC_PROTECT
, secy
->protect_frames
) ||
3446 nla_put_u8(skb
, IFLA_MACSEC_INC_SCI
, tx_sc
->send_sci
) ||
3447 nla_put_u8(skb
, IFLA_MACSEC_ES
, tx_sc
->end_station
) ||
3448 nla_put_u8(skb
, IFLA_MACSEC_SCB
, tx_sc
->scb
) ||
3449 nla_put_u8(skb
, IFLA_MACSEC_REPLAY_PROTECT
, secy
->replay_protect
) ||
3450 nla_put_u8(skb
, IFLA_MACSEC_VALIDATION
, secy
->validate_frames
) ||
3452 goto nla_put_failure
;
3454 if (secy
->replay_protect
) {
3455 if (nla_put_u32(skb
, IFLA_MACSEC_WINDOW
, secy
->replay_window
))
3456 goto nla_put_failure
;
3465 static struct rtnl_link_ops macsec_link_ops __read_mostly
= {
3467 .priv_size
= sizeof(struct macsec_dev
),
3468 .maxtype
= IFLA_MACSEC_MAX
,
3469 .policy
= macsec_rtnl_policy
,
3470 .setup
= macsec_setup
,
3471 .validate
= macsec_validate_attr
,
3472 .newlink
= macsec_newlink
,
3473 .changelink
= macsec_changelink
,
3474 .dellink
= macsec_dellink
,
3475 .get_size
= macsec_get_size
,
3476 .fill_info
= macsec_fill_info
,
3477 .get_link_net
= macsec_get_link_net
,
3480 static bool is_macsec_master(struct net_device
*dev
)
3482 return rcu_access_pointer(dev
->rx_handler
) == macsec_handle_frame
;
3485 static int macsec_notify(struct notifier_block
*this, unsigned long event
,
3488 struct net_device
*real_dev
= netdev_notifier_info_to_dev(ptr
);
3491 if (!is_macsec_master(real_dev
))
3497 case NETDEV_CHANGE
: {
3498 struct macsec_dev
*m
, *n
;
3499 struct macsec_rxh_data
*rxd
;
3501 rxd
= macsec_data_rtnl(real_dev
);
3502 list_for_each_entry_safe(m
, n
, &rxd
->secys
, secys
) {
3503 struct net_device
*dev
= m
->secy
.netdev
;
3505 netif_stacked_transfer_operstate(real_dev
, dev
);
3509 case NETDEV_UNREGISTER
: {
3510 struct macsec_dev
*m
, *n
;
3511 struct macsec_rxh_data
*rxd
;
3513 rxd
= macsec_data_rtnl(real_dev
);
3514 list_for_each_entry_safe(m
, n
, &rxd
->secys
, secys
) {
3515 macsec_common_dellink(m
->secy
.netdev
, &head
);
3518 netdev_rx_handler_unregister(real_dev
);
3521 unregister_netdevice_many(&head
);
3524 case NETDEV_CHANGEMTU
: {
3525 struct macsec_dev
*m
;
3526 struct macsec_rxh_data
*rxd
;
3528 rxd
= macsec_data_rtnl(real_dev
);
3529 list_for_each_entry(m
, &rxd
->secys
, secys
) {
3530 struct net_device
*dev
= m
->secy
.netdev
;
3531 unsigned int mtu
= real_dev
->mtu
- (m
->secy
.icv_len
+
3532 macsec_extra_len(true));
3535 dev_set_mtu(dev
, mtu
);
3543 static struct notifier_block macsec_notifier
= {
3544 .notifier_call
= macsec_notify
,
3547 static int __init
macsec_init(void)
3551 pr_info("MACsec IEEE 802.1AE\n");
3552 err
= register_netdevice_notifier(&macsec_notifier
);
3556 err
= rtnl_link_register(&macsec_link_ops
);
3560 err
= genl_register_family(&macsec_fam
);
3567 rtnl_link_unregister(&macsec_link_ops
);
3569 unregister_netdevice_notifier(&macsec_notifier
);
3573 static void __exit
macsec_exit(void)
3575 genl_unregister_family(&macsec_fam
);
3576 rtnl_link_unregister(&macsec_link_ops
);
3577 unregister_netdevice_notifier(&macsec_notifier
);
3581 module_init(macsec_init
);
3582 module_exit(macsec_exit
);
3584 MODULE_ALIAS_RTNL_LINK("macsec");
3585 MODULE_ALIAS_GENL_FAMILY("macsec");
3587 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3588 MODULE_LICENSE("GPL v2");