]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/macsec.c
macsec: Fix invalid error code set
[mirror_ubuntu-jammy-kernel.git] / drivers / net / macsec.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
c09440f7
SD
2/*
3 * drivers/net/macsec.c - MACsec device
4 *
5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
c09440f7
SD
6 */
7
8#include <linux/types.h>
9#include <linux/skbuff.h>
10#include <linux/socket.h>
11#include <linux/module.h>
12#include <crypto/aead.h>
13#include <linux/etherdevice.h>
3cf3227a 14#include <linux/netdevice.h>
c09440f7 15#include <linux/rtnetlink.h>
e187246f 16#include <linux/refcount.h>
c09440f7
SD
17#include <net/genetlink.h>
18#include <net/sock.h>
5491e7c6 19#include <net/gro_cells.h>
c0e4eadf 20#include <net/macsec.h>
3cf3227a 21#include <linux/phy.h>
a21ecf0e 22#include <linux/byteorder/generic.h>
b06d072c 23#include <linux/if_arp.h>
c09440f7
SD
24
25#include <uapi/linux/if_macsec.h>
26
c09440f7
SD
27#define MACSEC_SCI_LEN 8
28
29/* SecTAG length = macsec_eth_header without the optional SCI */
30#define MACSEC_TAG_LEN 6
31
32struct macsec_eth_header {
33 struct ethhdr eth;
34 /* SecTAG */
35 u8 tci_an;
36#if defined(__LITTLE_ENDIAN_BITFIELD)
37 u8 short_length:6,
38 unused:2;
39#elif defined(__BIG_ENDIAN_BITFIELD)
40 u8 unused:2,
41 short_length:6;
42#else
43#error "Please fix <asm/byteorder.h>"
44#endif
45 __be32 packet_number;
46 u8 secure_channel_id[8]; /* optional */
47} __packed;
48
49#define MACSEC_TCI_VERSION 0x80
50#define MACSEC_TCI_ES 0x40 /* end station */
51#define MACSEC_TCI_SC 0x20 /* SCI present */
52#define MACSEC_TCI_SCB 0x10 /* epon */
53#define MACSEC_TCI_E 0x08 /* encryption */
54#define MACSEC_TCI_C 0x04 /* changed text */
55#define MACSEC_AN_MASK 0x03 /* association number */
56#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
57
58/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
59#define MIN_NON_SHORT_LEN 48
60
61#define GCM_AES_IV_LEN 12
62#define DEFAULT_ICV_LEN 16
63
7979472b 64#define for_each_rxsc(secy, sc) \
c09440f7 65 for (sc = rcu_dereference_bh(secy->rx_sc); \
7979472b 66 sc; \
c09440f7
SD
67 sc = rcu_dereference_bh(sc->next))
68#define for_each_rxsc_rtnl(secy, sc) \
69 for (sc = rtnl_dereference(secy->rx_sc); \
70 sc; \
71 sc = rtnl_dereference(sc->next))
72
a21ecf0e
EM
73#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
74
75struct gcm_iv_xpn {
76 union {
77 u8 short_secure_channel_id[4];
78 ssci_t ssci;
79 };
80 __be64 pn;
81} __packed;
82
c09440f7
SD
83struct gcm_iv {
84 union {
85 u8 secure_channel_id[8];
86 sci_t sci;
87 };
88 __be32 pn;
89};
90
c09440f7
SD
91#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
92
c09440f7
SD
93struct pcpu_secy_stats {
94 struct macsec_dev_stats stats;
95 struct u64_stats_sync syncp;
96};
97
98/**
99 * struct macsec_dev - private data
100 * @secy: SecY config
101 * @real_dev: pointer to underlying netdevice
102 * @stats: MACsec device stats
103 * @secys: linked list of SecY's on the underlying device
ecdc5689 104 * @gro_cells: pointer to the Generic Receive Offload cell
3cf3227a 105 * @offload: status of offloading on the MACsec device
c09440f7
SD
106 */
107struct macsec_dev {
108 struct macsec_secy secy;
109 struct net_device *real_dev;
110 struct pcpu_secy_stats __percpu *stats;
111 struct list_head secys;
5491e7c6 112 struct gro_cells gro_cells;
3cf3227a 113 enum macsec_offload offload;
c09440f7
SD
114};
115
116/**
117 * struct macsec_rxh_data - rx_handler private argument
118 * @secys: linked list of SecY's on this underlying device
119 */
120struct macsec_rxh_data {
121 struct list_head secys;
122};
123
124static struct macsec_dev *macsec_priv(const struct net_device *dev)
125{
126 return (struct macsec_dev *)netdev_priv(dev);
127}
128
129static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
130{
131 return rcu_dereference_bh(dev->rx_handler_data);
132}
133
134static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
135{
136 return rtnl_dereference(dev->rx_handler_data);
137}
138
139struct macsec_cb {
140 struct aead_request *req;
141 union {
142 struct macsec_tx_sa *tx_sa;
143 struct macsec_rx_sa *rx_sa;
144 };
145 u8 assoc_num;
146 bool valid;
147 bool has_sci;
148};
149
150static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
151{
152 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
153
154 if (!sa || !sa->active)
155 return NULL;
156
e187246f 157 if (!refcount_inc_not_zero(&sa->refcnt))
c09440f7
SD
158 return NULL;
159
160 return sa;
161}
162
163static void free_rx_sc_rcu(struct rcu_head *head)
164{
165 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
166
167 free_percpu(rx_sc->stats);
168 kfree(rx_sc);
169}
170
171static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
172{
8676d76f 173 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
c09440f7
SD
174}
175
176static void macsec_rxsc_put(struct macsec_rx_sc *sc)
177{
8676d76f 178 if (refcount_dec_and_test(&sc->refcnt))
c09440f7
SD
179 call_rcu(&sc->rcu_head, free_rx_sc_rcu);
180}
181
182static void free_rxsa(struct rcu_head *head)
183{
184 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
185
186 crypto_free_aead(sa->key.tfm);
187 free_percpu(sa->stats);
c09440f7
SD
188 kfree(sa);
189}
190
191static void macsec_rxsa_put(struct macsec_rx_sa *sa)
192{
e187246f 193 if (refcount_dec_and_test(&sa->refcnt))
c09440f7
SD
194 call_rcu(&sa->rcu, free_rxsa);
195}
196
197static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
198{
199 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
200
201 if (!sa || !sa->active)
202 return NULL;
203
28206cdb 204 if (!refcount_inc_not_zero(&sa->refcnt))
c09440f7
SD
205 return NULL;
206
207 return sa;
208}
209
210static void free_txsa(struct rcu_head *head)
211{
212 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
213
214 crypto_free_aead(sa->key.tfm);
215 free_percpu(sa->stats);
216 kfree(sa);
217}
218
219static void macsec_txsa_put(struct macsec_tx_sa *sa)
220{
28206cdb 221 if (refcount_dec_and_test(&sa->refcnt))
c09440f7
SD
222 call_rcu(&sa->rcu, free_txsa);
223}
224
225static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
226{
227 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
228 return (struct macsec_cb *)skb->cb;
229}
230
231#define MACSEC_PORT_ES (htons(0x0001))
232#define MACSEC_PORT_SCB (0x0000)
233#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
48ef50fa 234#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
c09440f7 235
ccfdec90
FW
236#define MACSEC_GCM_AES_128_SAK_LEN 16
237#define MACSEC_GCM_AES_256_SAK_LEN 32
238
ccfdec90 239#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
48ef50fa 240#define DEFAULT_XPN false
c09440f7
SD
241#define DEFAULT_SEND_SCI true
242#define DEFAULT_ENCRYPT false
243#define DEFAULT_ENCODING_SA 0
0b52e10a 244#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
c09440f7 245
e0f841f5
TB
246static bool send_sci(const struct macsec_secy *secy)
247{
248 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
249
250 return tx_sc->send_sci ||
251 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
252}
253
c09440f7
SD
254static sci_t make_sci(u8 *addr, __be16 port)
255{
256 sci_t sci;
257
258 memcpy(&sci, addr, ETH_ALEN);
259 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
260
261 return sci;
262}
263
264static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
265{
266 sci_t sci;
267
268 if (sci_present)
269 memcpy(&sci, hdr->secure_channel_id,
270 sizeof(hdr->secure_channel_id));
271 else
272 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
273
274 return sci;
275}
276
277static unsigned int macsec_sectag_len(bool sci_present)
278{
279 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
280}
281
282static unsigned int macsec_hdr_len(bool sci_present)
283{
284 return macsec_sectag_len(sci_present) + ETH_HLEN;
285}
286
287static unsigned int macsec_extra_len(bool sci_present)
288{
289 return macsec_sectag_len(sci_present) + sizeof(__be16);
290}
291
292/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
293static void macsec_fill_sectag(struct macsec_eth_header *h,
e0f841f5
TB
294 const struct macsec_secy *secy, u32 pn,
295 bool sci_present)
c09440f7
SD
296{
297 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
298
e0f841f5 299 memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
c09440f7
SD
300 h->eth.h_proto = htons(ETH_P_MACSEC);
301
e0f841f5 302 if (sci_present) {
c09440f7
SD
303 h->tci_an |= MACSEC_TCI_SC;
304 memcpy(&h->secure_channel_id, &secy->sci,
305 sizeof(h->secure_channel_id));
306 } else {
307 if (tx_sc->end_station)
308 h->tci_an |= MACSEC_TCI_ES;
309 if (tx_sc->scb)
310 h->tci_an |= MACSEC_TCI_SCB;
311 }
312
313 h->packet_number = htonl(pn);
314
315 /* with GCM, C/E clear for !encrypt, both set for encrypt */
316 if (tx_sc->encrypt)
317 h->tci_an |= MACSEC_TCI_CONFID;
318 else if (secy->icv_len != DEFAULT_ICV_LEN)
319 h->tci_an |= MACSEC_TCI_C;
320
321 h->tci_an |= tx_sc->encoding_sa;
322}
323
324static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
325{
326 if (data_len < MIN_NON_SHORT_LEN)
327 h->short_length = data_len;
328}
329
3cf3227a
AT
330/* Checks if a MACsec interface is being offloaded to an hardware engine */
331static bool macsec_is_offloaded(struct macsec_dev *macsec)
332{
21114b7f
AT
333 if (macsec->offload == MACSEC_OFFLOAD_MAC ||
334 macsec->offload == MACSEC_OFFLOAD_PHY)
3cf3227a
AT
335 return true;
336
337 return false;
338}
339
340/* Checks if underlying layers implement MACsec offloading functions. */
341static bool macsec_check_offload(enum macsec_offload offload,
342 struct macsec_dev *macsec)
343{
344 if (!macsec || !macsec->real_dev)
345 return false;
346
347 if (offload == MACSEC_OFFLOAD_PHY)
348 return macsec->real_dev->phydev &&
349 macsec->real_dev->phydev->macsec_ops;
21114b7f
AT
350 else if (offload == MACSEC_OFFLOAD_MAC)
351 return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
352 macsec->real_dev->macsec_ops;
3cf3227a
AT
353
354 return false;
355}
356
357static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
358 struct macsec_dev *macsec,
359 struct macsec_context *ctx)
360{
361 if (ctx) {
362 memset(ctx, 0, sizeof(*ctx));
363 ctx->offload = offload;
364
365 if (offload == MACSEC_OFFLOAD_PHY)
366 ctx->phydev = macsec->real_dev->phydev;
21114b7f
AT
367 else if (offload == MACSEC_OFFLOAD_MAC)
368 ctx->netdev = macsec->real_dev;
3cf3227a
AT
369 }
370
21114b7f
AT
371 if (offload == MACSEC_OFFLOAD_PHY)
372 return macsec->real_dev->phydev->macsec_ops;
373 else
374 return macsec->real_dev->macsec_ops;
3cf3227a
AT
375}
376
377/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
378 * context device reference if provided.
379 */
380static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
381 struct macsec_context *ctx)
382{
383 if (!macsec_check_offload(macsec->offload, macsec))
384 return NULL;
385
386 return __macsec_get_ops(macsec->offload, macsec, ctx);
387}
388
a21ecf0e
EM
389/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
390static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
c09440f7
SD
391{
392 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
393 int len = skb->len - 2 * ETH_ALEN;
394 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
395
396 /* a) It comprises at least 17 octets */
397 if (skb->len <= 16)
398 return false;
399
400 /* b) MACsec EtherType: already checked */
401
402 /* c) V bit is clear */
403 if (h->tci_an & MACSEC_TCI_VERSION)
404 return false;
405
406 /* d) ES or SCB => !SC */
407 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
408 (h->tci_an & MACSEC_TCI_SC))
409 return false;
410
411 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
412 if (h->unused)
413 return false;
414
a21ecf0e
EM
415 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
416 if (!h->packet_number && !xpn)
c09440f7
SD
417 return false;
418
419 /* length check, f) g) h) i) */
420 if (h->short_length)
421 return len == extra_len + h->short_length;
422 return len >= extra_len + MIN_NON_SHORT_LEN;
423}
424
425#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
2ccbe2cb 426#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
c09440f7 427
a21ecf0e
EM
428static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
429 salt_t salt)
430{
431 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
432
433 gcm_iv->ssci = ssci ^ salt.ssci;
434 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
435}
436
c09440f7
SD
437static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
438{
439 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
440
441 gcm_iv->sci = sci;
442 gcm_iv->pn = htonl(pn);
443}
444
445static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
446{
447 return (struct macsec_eth_header *)skb_mac_header(skb);
448}
449
5c937de7
AT
450static void __macsec_pn_wrapped(struct macsec_secy *secy,
451 struct macsec_tx_sa *tx_sa)
452{
453 pr_debug("PN wrapped, transitioning to !oper\n");
454 tx_sa->active = false;
455 if (secy->protect_frames)
456 secy->operational = false;
457}
458
459void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
460{
461 spin_lock_bh(&tx_sa->lock);
462 __macsec_pn_wrapped(secy, tx_sa);
463 spin_unlock_bh(&tx_sa->lock);
464}
465EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
466
a21ecf0e
EM
467static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
468 struct macsec_secy *secy)
c09440f7 469{
a21ecf0e 470 pn_t pn;
c09440f7
SD
471
472 spin_lock_bh(&tx_sa->lock);
c09440f7 473
a21ecf0e
EM
474 pn = tx_sa->next_pn_halves;
475 if (secy->xpn)
476 tx_sa->next_pn++;
477 else
478 tx_sa->next_pn_halves.lower++;
479
5c937de7
AT
480 if (tx_sa->next_pn == 0)
481 __macsec_pn_wrapped(secy, tx_sa);
c09440f7
SD
482 spin_unlock_bh(&tx_sa->lock);
483
484 return pn;
485}
486
487static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
488{
489 struct macsec_dev *macsec = netdev_priv(dev);
490
491 skb->dev = macsec->real_dev;
492 skb_reset_mac_header(skb);
493 skb->protocol = eth_hdr(skb)->h_proto;
494}
495
496static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
497 struct macsec_tx_sa *tx_sa)
498{
499 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
500
501 u64_stats_update_begin(&txsc_stats->syncp);
502 if (tx_sc->encrypt) {
503 txsc_stats->stats.OutOctetsEncrypted += skb->len;
504 txsc_stats->stats.OutPktsEncrypted++;
505 this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
506 } else {
507 txsc_stats->stats.OutOctetsProtected += skb->len;
508 txsc_stats->stats.OutPktsProtected++;
509 this_cpu_inc(tx_sa->stats->OutPktsProtected);
510 }
511 u64_stats_update_end(&txsc_stats->syncp);
512}
513
514static void count_tx(struct net_device *dev, int ret, int len)
515{
516 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
517 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
518
519 u64_stats_update_begin(&stats->syncp);
520 stats->tx_packets++;
521 stats->tx_bytes += len;
522 u64_stats_update_end(&stats->syncp);
c09440f7
SD
523 }
524}
525
526static void macsec_encrypt_done(struct crypto_async_request *base, int err)
527{
528 struct sk_buff *skb = base->data;
529 struct net_device *dev = skb->dev;
530 struct macsec_dev *macsec = macsec_priv(dev);
531 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
532 int len, ret;
533
534 aead_request_free(macsec_skb_cb(skb)->req);
535
536 rcu_read_lock_bh();
537 macsec_encrypt_finish(skb, dev);
538 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
539 len = skb->len;
540 ret = dev_queue_xmit(skb);
541 count_tx(dev, ret, len);
542 rcu_read_unlock_bh();
543
544 macsec_txsa_put(sa);
545 dev_put(dev);
546}
547
5d9649b3
SD
548static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
549 unsigned char **iv,
5294b830
JD
550 struct scatterlist **sg,
551 int num_frags)
5d9649b3
SD
552{
553 size_t size, iv_offset, sg_offset;
554 struct aead_request *req;
555 void *tmp;
556
557 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
558 iv_offset = size;
559 size += GCM_AES_IV_LEN;
560
561 size = ALIGN(size, __alignof__(struct scatterlist));
562 sg_offset = size;
5294b830 563 size += sizeof(struct scatterlist) * num_frags;
5d9649b3
SD
564
565 tmp = kmalloc(size, GFP_ATOMIC);
566 if (!tmp)
567 return NULL;
568
569 *iv = (unsigned char *)(tmp + iv_offset);
570 *sg = (struct scatterlist *)(tmp + sg_offset);
571 req = tmp;
572
573 aead_request_set_tfm(req, tfm);
574
575 return req;
576}
577
c09440f7
SD
578static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
579 struct net_device *dev)
580{
581 int ret;
5d9649b3 582 struct scatterlist *sg;
5294b830 583 struct sk_buff *trailer;
5d9649b3 584 unsigned char *iv;
c09440f7
SD
585 struct ethhdr *eth;
586 struct macsec_eth_header *hh;
587 size_t unprotected_len;
588 struct aead_request *req;
589 struct macsec_secy *secy;
590 struct macsec_tx_sc *tx_sc;
591 struct macsec_tx_sa *tx_sa;
592 struct macsec_dev *macsec = macsec_priv(dev);
e0f841f5 593 bool sci_present;
a21ecf0e 594 pn_t pn;
c09440f7
SD
595
596 secy = &macsec->secy;
597 tx_sc = &secy->tx_sc;
598
599 /* 10.5.1 TX SA assignment */
600 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
601 if (!tx_sa) {
602 secy->operational = false;
603 kfree_skb(skb);
604 return ERR_PTR(-EINVAL);
605 }
606
607 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
608 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
609 struct sk_buff *nskb = skb_copy_expand(skb,
610 MACSEC_NEEDED_HEADROOM,
611 MACSEC_NEEDED_TAILROOM,
612 GFP_ATOMIC);
613 if (likely(nskb)) {
614 consume_skb(skb);
615 skb = nskb;
616 } else {
617 macsec_txsa_put(tx_sa);
618 kfree_skb(skb);
619 return ERR_PTR(-ENOMEM);
620 }
621 } else {
622 skb = skb_unshare(skb, GFP_ATOMIC);
623 if (!skb) {
624 macsec_txsa_put(tx_sa);
625 return ERR_PTR(-ENOMEM);
626 }
627 }
628
629 unprotected_len = skb->len;
630 eth = eth_hdr(skb);
e0f841f5 631 sci_present = send_sci(secy);
d58ff351 632 hh = skb_push(skb, macsec_extra_len(sci_present));
c09440f7
SD
633 memmove(hh, eth, 2 * ETH_ALEN);
634
635 pn = tx_sa_update_pn(tx_sa, secy);
a21ecf0e 636 if (pn.full64 == 0) {
c09440f7
SD
637 macsec_txsa_put(tx_sa);
638 kfree_skb(skb);
639 return ERR_PTR(-ENOLINK);
640 }
a21ecf0e 641 macsec_fill_sectag(hh, secy, pn.lower, sci_present);
c09440f7
SD
642 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
643
c09440f7
SD
644 skb_put(skb, secy->icv_len);
645
646 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
647 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
648
649 u64_stats_update_begin(&secy_stats->syncp);
650 secy_stats->stats.OutPktsTooLong++;
651 u64_stats_update_end(&secy_stats->syncp);
652
653 macsec_txsa_put(tx_sa);
654 kfree_skb(skb);
655 return ERR_PTR(-EINVAL);
656 }
657
5294b830
JD
658 ret = skb_cow_data(skb, 0, &trailer);
659 if (unlikely(ret < 0)) {
660 macsec_txsa_put(tx_sa);
661 kfree_skb(skb);
662 return ERR_PTR(ret);
663 }
664
665 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
c09440f7
SD
666 if (!req) {
667 macsec_txsa_put(tx_sa);
668 kfree_skb(skb);
669 return ERR_PTR(-ENOMEM);
670 }
671
a21ecf0e
EM
672 if (secy->xpn)
673 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
674 else
675 macsec_fill_iv(iv, secy->sci, pn.lower);
5d9649b3 676
5294b830 677 sg_init_table(sg, ret);
cda7ea69
JD
678 ret = skb_to_sgvec(skb, sg, 0, skb->len);
679 if (unlikely(ret < 0)) {
5aba2ba5 680 aead_request_free(req);
cda7ea69
JD
681 macsec_txsa_put(tx_sa);
682 kfree_skb(skb);
683 return ERR_PTR(ret);
684 }
c09440f7
SD
685
686 if (tx_sc->encrypt) {
e0f841f5 687 int len = skb->len - macsec_hdr_len(sci_present) -
c09440f7
SD
688 secy->icv_len;
689 aead_request_set_crypt(req, sg, sg, len, iv);
e0f841f5 690 aead_request_set_ad(req, macsec_hdr_len(sci_present));
c09440f7
SD
691 } else {
692 aead_request_set_crypt(req, sg, sg, 0, iv);
693 aead_request_set_ad(req, skb->len - secy->icv_len);
694 }
695
696 macsec_skb_cb(skb)->req = req;
697 macsec_skb_cb(skb)->tx_sa = tx_sa;
698 aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
699
700 dev_hold(skb->dev);
701 ret = crypto_aead_encrypt(req);
702 if (ret == -EINPROGRESS) {
703 return ERR_PTR(ret);
704 } else if (ret != 0) {
705 dev_put(skb->dev);
706 kfree_skb(skb);
707 aead_request_free(req);
708 macsec_txsa_put(tx_sa);
709 return ERR_PTR(-EINVAL);
710 }
711
712 dev_put(skb->dev);
713 aead_request_free(req);
714 macsec_txsa_put(tx_sa);
715
716 return skb;
717}
718
719static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
720{
721 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
722 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
723 struct macsec_eth_header *hdr = macsec_ethhdr(skb);
724 u32 lowest_pn = 0;
725
726 spin_lock(&rx_sa->lock);
a21ecf0e
EM
727 if (rx_sa->next_pn_halves.lower >= secy->replay_window)
728 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
c09440f7
SD
729
730 /* Now perform replay protection check again
731 * (see IEEE 802.1AE-2006 figure 10-5)
732 */
a21ecf0e
EM
733 if (secy->replay_protect && pn < lowest_pn &&
734 (!secy->xpn || pn_same_half(pn, lowest_pn))) {
c09440f7
SD
735 spin_unlock(&rx_sa->lock);
736 u64_stats_update_begin(&rxsc_stats->syncp);
737 rxsc_stats->stats.InPktsLate++;
738 u64_stats_update_end(&rxsc_stats->syncp);
739 return false;
740 }
741
742 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
743 u64_stats_update_begin(&rxsc_stats->syncp);
744 if (hdr->tci_an & MACSEC_TCI_E)
745 rxsc_stats->stats.InOctetsDecrypted += skb->len;
746 else
747 rxsc_stats->stats.InOctetsValidated += skb->len;
748 u64_stats_update_end(&rxsc_stats->syncp);
749 }
750
751 if (!macsec_skb_cb(skb)->valid) {
752 spin_unlock(&rx_sa->lock);
753
754 /* 10.6.5 */
755 if (hdr->tci_an & MACSEC_TCI_C ||
756 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
757 u64_stats_update_begin(&rxsc_stats->syncp);
758 rxsc_stats->stats.InPktsNotValid++;
759 u64_stats_update_end(&rxsc_stats->syncp);
760 return false;
761 }
762
763 u64_stats_update_begin(&rxsc_stats->syncp);
764 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
765 rxsc_stats->stats.InPktsInvalid++;
766 this_cpu_inc(rx_sa->stats->InPktsInvalid);
767 } else if (pn < lowest_pn) {
768 rxsc_stats->stats.InPktsDelayed++;
769 } else {
770 rxsc_stats->stats.InPktsUnchecked++;
771 }
772 u64_stats_update_end(&rxsc_stats->syncp);
773 } else {
774 u64_stats_update_begin(&rxsc_stats->syncp);
775 if (pn < lowest_pn) {
776 rxsc_stats->stats.InPktsDelayed++;
777 } else {
778 rxsc_stats->stats.InPktsOK++;
779 this_cpu_inc(rx_sa->stats->InPktsOK);
780 }
781 u64_stats_update_end(&rxsc_stats->syncp);
782
a21ecf0e
EM
783 // Instead of "pn >=" - to support pn overflow in xpn
784 if (pn + 1 > rx_sa->next_pn_halves.lower) {
785 rx_sa->next_pn_halves.lower = pn + 1;
786 } else if (secy->xpn &&
787 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
788 rx_sa->next_pn_halves.upper++;
789 rx_sa->next_pn_halves.lower = pn + 1;
790 }
791
c09440f7
SD
792 spin_unlock(&rx_sa->lock);
793 }
794
795 return true;
796}
797
798static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
799{
800 skb->pkt_type = PACKET_HOST;
801 skb->protocol = eth_type_trans(skb, dev);
802
803 skb_reset_network_header(skb);
804 if (!skb_transport_header_was_set(skb))
805 skb_reset_transport_header(skb);
806 skb_reset_mac_len(skb);
807}
808
809static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
810{
7d8b16b9 811 skb->ip_summed = CHECKSUM_NONE;
c09440f7
SD
812 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
813 skb_pull(skb, hdr_len);
814 pskb_trim_unique(skb, skb->len - icv_len);
815}
816
817static void count_rx(struct net_device *dev, int len)
818{
819 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
820
821 u64_stats_update_begin(&stats->syncp);
822 stats->rx_packets++;
823 stats->rx_bytes += len;
824 u64_stats_update_end(&stats->syncp);
825}
826
827static void macsec_decrypt_done(struct crypto_async_request *base, int err)
828{
829 struct sk_buff *skb = base->data;
830 struct net_device *dev = skb->dev;
831 struct macsec_dev *macsec = macsec_priv(dev);
832 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
c78ebe1d 833 struct macsec_rx_sc *rx_sc = rx_sa->sc;
863483c9 834 int len;
c09440f7
SD
835 u32 pn;
836
837 aead_request_free(macsec_skb_cb(skb)->req);
838
b3bdc3ac
LR
839 if (!err)
840 macsec_skb_cb(skb)->valid = true;
841
c09440f7
SD
842 rcu_read_lock_bh();
843 pn = ntohl(macsec_ethhdr(skb)->packet_number);
844 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
845 rcu_read_unlock_bh();
846 kfree_skb(skb);
847 goto out;
848 }
849
850 macsec_finalize_skb(skb, macsec->secy.icv_len,
851 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
852 macsec_reset_skb(skb, macsec->secy.netdev);
853
854 len = skb->len;
863483c9 855 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
c09440f7 856 count_rx(dev, len);
c09440f7
SD
857
858 rcu_read_unlock_bh();
859
860out:
861 macsec_rxsa_put(rx_sa);
c78ebe1d 862 macsec_rxsc_put(rx_sc);
c09440f7 863 dev_put(dev);
c09440f7
SD
864}
865
866static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
867 struct net_device *dev,
868 struct macsec_rx_sa *rx_sa,
869 sci_t sci,
870 struct macsec_secy *secy)
871{
872 int ret;
5d9649b3 873 struct scatterlist *sg;
5294b830 874 struct sk_buff *trailer;
5d9649b3 875 unsigned char *iv;
c09440f7
SD
876 struct aead_request *req;
877 struct macsec_eth_header *hdr;
a21ecf0e 878 u32 hdr_pn;
c09440f7
SD
879 u16 icv_len = secy->icv_len;
880
881 macsec_skb_cb(skb)->valid = false;
882 skb = skb_share_check(skb, GFP_ATOMIC);
883 if (!skb)
c3b7d0bd 884 return ERR_PTR(-ENOMEM);
c09440f7 885
5294b830
JD
886 ret = skb_cow_data(skb, 0, &trailer);
887 if (unlikely(ret < 0)) {
888 kfree_skb(skb);
889 return ERR_PTR(ret);
890 }
891 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
c09440f7
SD
892 if (!req) {
893 kfree_skb(skb);
c3b7d0bd 894 return ERR_PTR(-ENOMEM);
c09440f7
SD
895 }
896
897 hdr = (struct macsec_eth_header *)skb->data;
a21ecf0e
EM
898 hdr_pn = ntohl(hdr->packet_number);
899
900 if (secy->xpn) {
901 pn_t recovered_pn = rx_sa->next_pn_halves;
902
903 recovered_pn.lower = hdr_pn;
904 if (hdr_pn < rx_sa->next_pn_halves.lower &&
905 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
906 recovered_pn.upper++;
907
908 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
909 rx_sa->key.salt);
910 } else {
911 macsec_fill_iv(iv, sci, hdr_pn);
912 }
c09440f7 913
5294b830 914 sg_init_table(sg, ret);
cda7ea69
JD
915 ret = skb_to_sgvec(skb, sg, 0, skb->len);
916 if (unlikely(ret < 0)) {
5aba2ba5 917 aead_request_free(req);
cda7ea69
JD
918 kfree_skb(skb);
919 return ERR_PTR(ret);
920 }
c09440f7
SD
921
922 if (hdr->tci_an & MACSEC_TCI_E) {
923 /* confidentiality: ethernet + macsec header
924 * authenticated, encrypted payload
925 */
926 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
927
928 aead_request_set_crypt(req, sg, sg, len, iv);
929 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
930 skb = skb_unshare(skb, GFP_ATOMIC);
931 if (!skb) {
932 aead_request_free(req);
c3b7d0bd 933 return ERR_PTR(-ENOMEM);
c09440f7
SD
934 }
935 } else {
936 /* integrity only: all headers + data authenticated */
937 aead_request_set_crypt(req, sg, sg, icv_len, iv);
938 aead_request_set_ad(req, skb->len - icv_len);
939 }
940
941 macsec_skb_cb(skb)->req = req;
c09440f7
SD
942 skb->dev = dev;
943 aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
944
945 dev_hold(dev);
946 ret = crypto_aead_decrypt(req);
947 if (ret == -EINPROGRESS) {
c3b7d0bd 948 return ERR_PTR(ret);
c09440f7
SD
949 } else if (ret != 0) {
950 /* decryption/authentication failed
951 * 10.6 if validateFrames is disabled, deliver anyway
952 */
953 if (ret != -EBADMSG) {
954 kfree_skb(skb);
c3b7d0bd 955 skb = ERR_PTR(ret);
c09440f7
SD
956 }
957 } else {
958 macsec_skb_cb(skb)->valid = true;
959 }
960 dev_put(dev);
961
962 aead_request_free(req);
963
964 return skb;
965}
966
967static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
968{
969 struct macsec_rx_sc *rx_sc;
970
971 for_each_rxsc(secy, rx_sc) {
972 if (rx_sc->sci == sci)
973 return rx_sc;
974 }
975
976 return NULL;
977}
978
979static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
980{
981 struct macsec_rx_sc *rx_sc;
982
983 for_each_rxsc_rtnl(secy, rx_sc) {
984 if (rx_sc->sci == sci)
985 return rx_sc;
986 }
987
988 return NULL;
989}
990
3cf3227a 991static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
c09440f7 992{
3cf3227a
AT
993 /* Deliver to the uncontrolled port by default */
994 enum rx_handler_result ret = RX_HANDLER_PASS;
f428011b 995 struct ethhdr *hdr = eth_hdr(skb);
c09440f7
SD
996 struct macsec_rxh_data *rxd;
997 struct macsec_dev *macsec;
998
999 rcu_read_lock();
1000 rxd = macsec_data_rcu(skb->dev);
1001
c09440f7
SD
1002 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1003 struct sk_buff *nskb;
c09440f7 1004 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
f428011b
MS
1005 struct net_device *ndev = macsec->secy.netdev;
1006
1007 /* If h/w offloading is enabled, HW decodes frames and strips
1008 * the SecTAG, so we have to deduce which port to deliver to.
1009 */
1010 if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
1011 if (ether_addr_equal_64bits(hdr->h_dest,
1012 ndev->dev_addr)) {
1013 /* exact match, divert skb to this port */
1014 skb->dev = ndev;
1015 skb->pkt_type = PACKET_HOST;
1016 ret = RX_HANDLER_ANOTHER;
1017 goto out;
1018 } else if (is_multicast_ether_addr_64bits(
1019 hdr->h_dest)) {
1020 /* multicast frame, deliver on this port too */
1021 nskb = skb_clone(skb, GFP_ATOMIC);
1022 if (!nskb)
1023 break;
1024
1025 nskb->dev = ndev;
1026 if (ether_addr_equal_64bits(hdr->h_dest,
1027 ndev->broadcast))
1028 nskb->pkt_type = PACKET_BROADCAST;
1029 else
1030 nskb->pkt_type = PACKET_MULTICAST;
1031
1032 netif_rx(nskb);
1033 }
1034 continue;
1035 }
c09440f7 1036
f428011b
MS
1037 /* 10.6 If the management control validateFrames is not
1038 * Strict, frames without a SecTAG are received, counted, and
1039 * delivered to the Controlled Port
1040 */
1041 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
c09440f7
SD
1042 u64_stats_update_begin(&secy_stats->syncp);
1043 secy_stats->stats.InPktsNoTag++;
1044 u64_stats_update_end(&secy_stats->syncp);
1045 continue;
1046 }
1047
1048 /* deliver on this port */
1049 nskb = skb_clone(skb, GFP_ATOMIC);
1050 if (!nskb)
1051 break;
1052
f428011b 1053 nskb->dev = ndev;
c09440f7 1054
863483c9 1055 if (netif_rx(nskb) == NET_RX_SUCCESS) {
c09440f7
SD
1056 u64_stats_update_begin(&secy_stats->syncp);
1057 secy_stats->stats.InPktsUntagged++;
1058 u64_stats_update_end(&secy_stats->syncp);
c09440f7
SD
1059 }
1060 }
1061
3cf3227a 1062out:
c09440f7 1063 rcu_read_unlock();
3cf3227a 1064 return ret;
c09440f7
SD
1065}
1066
1067static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1068{
1069 struct sk_buff *skb = *pskb;
1070 struct net_device *dev = skb->dev;
1071 struct macsec_eth_header *hdr;
1072 struct macsec_secy *secy = NULL;
1073 struct macsec_rx_sc *rx_sc;
1074 struct macsec_rx_sa *rx_sa;
1075 struct macsec_rxh_data *rxd;
1076 struct macsec_dev *macsec;
c7cc9200 1077 unsigned int len;
c09440f7 1078 sci_t sci;
a21ecf0e 1079 u32 hdr_pn;
c09440f7
SD
1080 bool cbit;
1081 struct pcpu_rx_sc_stats *rxsc_stats;
1082 struct pcpu_secy_stats *secy_stats;
1083 bool pulled_sci;
5491e7c6 1084 int ret;
c09440f7
SD
1085
1086 if (skb_headroom(skb) < ETH_HLEN)
1087 goto drop_direct;
1088
1089 hdr = macsec_ethhdr(skb);
3cf3227a
AT
1090 if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1091 return handle_not_macsec(skb);
c09440f7
SD
1092
1093 skb = skb_unshare(skb, GFP_ATOMIC);
095c02da
AS
1094 *pskb = skb;
1095 if (!skb)
c09440f7 1096 return RX_HANDLER_CONSUMED;
c09440f7
SD
1097
1098 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1099 if (!pulled_sci) {
1100 if (!pskb_may_pull(skb, macsec_extra_len(false)))
1101 goto drop_direct;
1102 }
1103
1104 hdr = macsec_ethhdr(skb);
1105
1106 /* Frames with a SecTAG that has the TCI E bit set but the C
1107 * bit clear are discarded, as this reserved encoding is used
1108 * to identify frames with a SecTAG that are not to be
1109 * delivered to the Controlled Port.
1110 */
1111 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1112 return RX_HANDLER_PASS;
1113
1114 /* now, pull the extra length */
1115 if (hdr->tci_an & MACSEC_TCI_SC) {
1116 if (!pulled_sci)
1117 goto drop_direct;
1118 }
1119
1120 /* ethernet header is part of crypto processing */
1121 skb_push(skb, ETH_HLEN);
1122
1123 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1124 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1125 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1126
1127 rcu_read_lock();
1128 rxd = macsec_data_rcu(skb->dev);
1129
1130 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1131 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
7979472b 1132
c78ebe1d 1133 sc = sc ? macsec_rxsc_get(sc) : NULL;
c09440f7
SD
1134
1135 if (sc) {
1136 secy = &macsec->secy;
1137 rx_sc = sc;
1138 break;
1139 }
1140 }
1141
1142 if (!secy)
1143 goto nosci;
1144
1145 dev = secy->netdev;
1146 macsec = macsec_priv(dev);
1147 secy_stats = this_cpu_ptr(macsec->stats);
1148 rxsc_stats = this_cpu_ptr(rx_sc->stats);
1149
a21ecf0e 1150 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
c09440f7
SD
1151 u64_stats_update_begin(&secy_stats->syncp);
1152 secy_stats->stats.InPktsBadTag++;
1153 u64_stats_update_end(&secy_stats->syncp);
1154 goto drop_nosa;
1155 }
1156
1157 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1158 if (!rx_sa) {
1159 /* 10.6.1 if the SA is not in use */
1160
1161 /* If validateFrames is Strict or the C bit in the
1162 * SecTAG is set, discard
1163 */
1164 if (hdr->tci_an & MACSEC_TCI_C ||
1165 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1166 u64_stats_update_begin(&rxsc_stats->syncp);
1167 rxsc_stats->stats.InPktsNotUsingSA++;
1168 u64_stats_update_end(&rxsc_stats->syncp);
1169 goto drop_nosa;
1170 }
1171
1172 /* not Strict, the frame (with the SecTAG and ICV
1173 * removed) is delivered to the Controlled Port.
1174 */
1175 u64_stats_update_begin(&rxsc_stats->syncp);
1176 rxsc_stats->stats.InPktsUnusedSA++;
1177 u64_stats_update_end(&rxsc_stats->syncp);
1178 goto deliver;
1179 }
1180
1181 /* First, PN check to avoid decrypting obviously wrong packets */
a21ecf0e 1182 hdr_pn = ntohl(hdr->packet_number);
c09440f7
SD
1183 if (secy->replay_protect) {
1184 bool late;
1185
1186 spin_lock(&rx_sa->lock);
a21ecf0e
EM
1187 late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1188 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1189
1190 if (secy->xpn)
1191 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
c09440f7
SD
1192 spin_unlock(&rx_sa->lock);
1193
1194 if (late) {
1195 u64_stats_update_begin(&rxsc_stats->syncp);
1196 rxsc_stats->stats.InPktsLate++;
1197 u64_stats_update_end(&rxsc_stats->syncp);
1198 goto drop;
1199 }
1200 }
1201
e3a3b626
BG
1202 macsec_skb_cb(skb)->rx_sa = rx_sa;
1203
c09440f7
SD
1204 /* Disabled && !changed text => skip validation */
1205 if (hdr->tci_an & MACSEC_TCI_C ||
1206 secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1207 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1208
c3b7d0bd
SD
1209 if (IS_ERR(skb)) {
1210 /* the decrypt callback needs the reference */
c78ebe1d 1211 if (PTR_ERR(skb) != -EINPROGRESS) {
c3b7d0bd 1212 macsec_rxsa_put(rx_sa);
c78ebe1d
SD
1213 macsec_rxsc_put(rx_sc);
1214 }
c09440f7
SD
1215 rcu_read_unlock();
1216 *pskb = NULL;
1217 return RX_HANDLER_CONSUMED;
1218 }
1219
a21ecf0e 1220 if (!macsec_post_decrypt(skb, secy, hdr_pn))
c09440f7
SD
1221 goto drop;
1222
1223deliver:
1224 macsec_finalize_skb(skb, secy->icv_len,
1225 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1226 macsec_reset_skb(skb, secy->netdev);
1227
497f358a
SD
1228 if (rx_sa)
1229 macsec_rxsa_put(rx_sa);
c78ebe1d 1230 macsec_rxsc_put(rx_sc);
5491e7c6 1231
ba56d8ce 1232 skb_orphan(skb);
c7cc9200 1233 len = skb->len;
5491e7c6
PA
1234 ret = gro_cells_receive(&macsec->gro_cells, skb);
1235 if (ret == NET_RX_SUCCESS)
c7cc9200 1236 count_rx(dev, len);
5491e7c6
PA
1237 else
1238 macsec->secy.netdev->stats.rx_dropped++;
c09440f7
SD
1239
1240 rcu_read_unlock();
1241
5491e7c6
PA
1242 *pskb = NULL;
1243 return RX_HANDLER_CONSUMED;
c09440f7
SD
1244
1245drop:
1246 macsec_rxsa_put(rx_sa);
1247drop_nosa:
c78ebe1d 1248 macsec_rxsc_put(rx_sc);
c09440f7
SD
1249 rcu_read_unlock();
1250drop_direct:
1251 kfree_skb(skb);
1252 *pskb = NULL;
1253 return RX_HANDLER_CONSUMED;
1254
1255nosci:
1256 /* 10.6.1 if the SC is not found */
1257 cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1258 if (!cbit)
1259 macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1260 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1261
1262 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1263 struct sk_buff *nskb;
c09440f7
SD
1264
1265 secy_stats = this_cpu_ptr(macsec->stats);
1266
1267 /* If validateFrames is Strict or the C bit in the
1268 * SecTAG is set, discard
1269 */
1270 if (cbit ||
1271 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1272 u64_stats_update_begin(&secy_stats->syncp);
1273 secy_stats->stats.InPktsNoSCI++;
1274 u64_stats_update_end(&secy_stats->syncp);
1275 continue;
1276 }
1277
1278 /* not strict, the frame (with the SecTAG and ICV
1279 * removed) is delivered to the Controlled Port.
1280 */
1281 nskb = skb_clone(skb, GFP_ATOMIC);
1282 if (!nskb)
1283 break;
1284
1285 macsec_reset_skb(nskb, macsec->secy.netdev);
1286
1287 ret = netif_rx(nskb);
1288 if (ret == NET_RX_SUCCESS) {
1289 u64_stats_update_begin(&secy_stats->syncp);
1290 secy_stats->stats.InPktsUnknownSCI++;
1291 u64_stats_update_end(&secy_stats->syncp);
1292 } else {
1293 macsec->secy.netdev->stats.rx_dropped++;
1294 }
1295 }
1296
1297 rcu_read_unlock();
1298 *pskb = skb;
1299 return RX_HANDLER_PASS;
1300}
1301
1302static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1303{
1304 struct crypto_aead *tfm;
1305 int ret;
1306
ab046a5d
SD
1307 /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
1308 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
34aedfee
DC
1309
1310 if (IS_ERR(tfm))
1311 return tfm;
c09440f7
SD
1312
1313 ret = crypto_aead_setkey(tfm, key, key_len);
34aedfee
DC
1314 if (ret < 0)
1315 goto fail;
c09440f7
SD
1316
1317 ret = crypto_aead_setauthsize(tfm, icv_len);
34aedfee
DC
1318 if (ret < 0)
1319 goto fail;
c09440f7
SD
1320
1321 return tfm;
34aedfee
DC
1322fail:
1323 crypto_free_aead(tfm);
1324 return ERR_PTR(ret);
c09440f7
SD
1325}
1326
1327static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1328 int icv_len)
1329{
1330 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1331 if (!rx_sa->stats)
34aedfee 1332 return -ENOMEM;
c09440f7
SD
1333
1334 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
34aedfee 1335 if (IS_ERR(rx_sa->key.tfm)) {
c09440f7 1336 free_percpu(rx_sa->stats);
34aedfee 1337 return PTR_ERR(rx_sa->key.tfm);
c09440f7
SD
1338 }
1339
48ef50fa 1340 rx_sa->ssci = MACSEC_UNDEF_SSCI;
c09440f7
SD
1341 rx_sa->active = false;
1342 rx_sa->next_pn = 1;
e187246f 1343 refcount_set(&rx_sa->refcnt, 1);
c09440f7
SD
1344 spin_lock_init(&rx_sa->lock);
1345
1346 return 0;
1347}
1348
1349static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1350{
1351 rx_sa->active = false;
1352
1353 macsec_rxsa_put(rx_sa);
1354}
1355
1356static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1357{
1358 int i;
1359
1360 for (i = 0; i < MACSEC_NUM_AN; i++) {
1361 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1362
1363 RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1364 if (sa)
1365 clear_rx_sa(sa);
1366 }
1367
1368 macsec_rxsc_put(rx_sc);
1369}
1370
1371static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1372{
1373 struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1374
1375 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1376 rx_sc;
1377 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1378 if (rx_sc->sci == sci) {
1379 if (rx_sc->active)
1380 secy->n_rx_sc--;
1381 rcu_assign_pointer(*rx_scp, rx_sc->next);
1382 return rx_sc;
1383 }
1384 }
1385
1386 return NULL;
1387}
1388
e370a5d0
SD
1389static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
1390 bool active)
c09440f7
SD
1391{
1392 struct macsec_rx_sc *rx_sc;
1393 struct macsec_dev *macsec;
1394 struct net_device *real_dev = macsec_priv(dev)->real_dev;
1395 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1396 struct macsec_secy *secy;
1397
1398 list_for_each_entry(macsec, &rxd->secys, secys) {
1399 if (find_rx_sc_rtnl(&macsec->secy, sci))
1400 return ERR_PTR(-EEXIST);
1401 }
1402
1403 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1404 if (!rx_sc)
1405 return ERR_PTR(-ENOMEM);
1406
1407 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1408 if (!rx_sc->stats) {
1409 kfree(rx_sc);
1410 return ERR_PTR(-ENOMEM);
1411 }
1412
1413 rx_sc->sci = sci;
e370a5d0 1414 rx_sc->active = active;
8676d76f 1415 refcount_set(&rx_sc->refcnt, 1);
c09440f7
SD
1416
1417 secy = &macsec_priv(dev)->secy;
1418 rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1419 rcu_assign_pointer(secy->rx_sc, rx_sc);
1420
1421 if (rx_sc->active)
1422 secy->n_rx_sc++;
1423
1424 return rx_sc;
1425}
1426
1427static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1428 int icv_len)
1429{
1430 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1431 if (!tx_sa->stats)
34aedfee 1432 return -ENOMEM;
c09440f7
SD
1433
1434 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
34aedfee 1435 if (IS_ERR(tx_sa->key.tfm)) {
c09440f7 1436 free_percpu(tx_sa->stats);
34aedfee 1437 return PTR_ERR(tx_sa->key.tfm);
c09440f7
SD
1438 }
1439
48ef50fa 1440 tx_sa->ssci = MACSEC_UNDEF_SSCI;
c09440f7 1441 tx_sa->active = false;
28206cdb 1442 refcount_set(&tx_sa->refcnt, 1);
c09440f7
SD
1443 spin_lock_init(&tx_sa->lock);
1444
1445 return 0;
1446}
1447
1448static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1449{
1450 tx_sa->active = false;
1451
1452 macsec_txsa_put(tx_sa);
1453}
1454
489111e5 1455static struct genl_family macsec_fam;
c09440f7
SD
1456
1457static struct net_device *get_dev_from_nl(struct net *net,
1458 struct nlattr **attrs)
1459{
1460 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1461 struct net_device *dev;
1462
1463 dev = __dev_get_by_index(net, ifindex);
1464 if (!dev)
1465 return ERR_PTR(-ENODEV);
1466
1467 if (!netif_is_macsec(dev))
1468 return ERR_PTR(-ENODEV);
1469
1470 return dev;
1471}
1472
791bb3fc
MS
1473static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1474{
1475 return (__force enum macsec_offload)nla_get_u8(nla);
1476}
1477
c09440f7
SD
1478static sci_t nla_get_sci(const struct nlattr *nla)
1479{
1480 return (__force sci_t)nla_get_u64(nla);
1481}
1482
f60d94c0
ND
1483static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1484 int padattr)
c09440f7 1485{
f60d94c0 1486 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
c09440f7
SD
1487}
1488
48ef50fa
EM
1489static ssci_t nla_get_ssci(const struct nlattr *nla)
1490{
1491 return (__force ssci_t)nla_get_u32(nla);
1492}
1493
1494static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1495{
1496 return nla_put_u32(skb, attrtype, (__force u64)value);
1497}
1498
c09440f7
SD
1499static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1500 struct nlattr **attrs,
1501 struct nlattr **tb_sa,
1502 struct net_device **devp,
1503 struct macsec_secy **secyp,
1504 struct macsec_tx_sc **scp,
1505 u8 *assoc_num)
1506{
1507 struct net_device *dev;
1508 struct macsec_secy *secy;
1509 struct macsec_tx_sc *tx_sc;
1510 struct macsec_tx_sa *tx_sa;
1511
1512 if (!tb_sa[MACSEC_SA_ATTR_AN])
1513 return ERR_PTR(-EINVAL);
1514
1515 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1516
1517 dev = get_dev_from_nl(net, attrs);
1518 if (IS_ERR(dev))
1519 return ERR_CAST(dev);
1520
1521 if (*assoc_num >= MACSEC_NUM_AN)
1522 return ERR_PTR(-EINVAL);
1523
1524 secy = &macsec_priv(dev)->secy;
1525 tx_sc = &secy->tx_sc;
1526
1527 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1528 if (!tx_sa)
1529 return ERR_PTR(-ENODEV);
1530
1531 *devp = dev;
1532 *scp = tx_sc;
1533 *secyp = secy;
1534 return tx_sa;
1535}
1536
1537static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1538 struct nlattr **attrs,
1539 struct nlattr **tb_rxsc,
1540 struct net_device **devp,
1541 struct macsec_secy **secyp)
1542{
1543 struct net_device *dev;
1544 struct macsec_secy *secy;
1545 struct macsec_rx_sc *rx_sc;
1546 sci_t sci;
1547
1548 dev = get_dev_from_nl(net, attrs);
1549 if (IS_ERR(dev))
1550 return ERR_CAST(dev);
1551
1552 secy = &macsec_priv(dev)->secy;
1553
1554 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1555 return ERR_PTR(-EINVAL);
1556
1557 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1558 rx_sc = find_rx_sc_rtnl(secy, sci);
1559 if (!rx_sc)
1560 return ERR_PTR(-ENODEV);
1561
1562 *secyp = secy;
1563 *devp = dev;
1564
1565 return rx_sc;
1566}
1567
1568static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1569 struct nlattr **attrs,
1570 struct nlattr **tb_rxsc,
1571 struct nlattr **tb_sa,
1572 struct net_device **devp,
1573 struct macsec_secy **secyp,
1574 struct macsec_rx_sc **scp,
1575 u8 *assoc_num)
1576{
1577 struct macsec_rx_sc *rx_sc;
1578 struct macsec_rx_sa *rx_sa;
1579
1580 if (!tb_sa[MACSEC_SA_ATTR_AN])
1581 return ERR_PTR(-EINVAL);
1582
1583 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1584 if (*assoc_num >= MACSEC_NUM_AN)
1585 return ERR_PTR(-EINVAL);
1586
1587 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1588 if (IS_ERR(rx_sc))
1589 return ERR_CAST(rx_sc);
1590
1591 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1592 if (!rx_sa)
1593 return ERR_PTR(-ENODEV);
1594
1595 *scp = rx_sc;
1596 return rx_sa;
1597}
1598
c09440f7
SD
1599static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1600 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1601 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1602 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
dcb780fb 1603 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
c09440f7
SD
1604};
1605
1606static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1607 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1608 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1609};
1610
1611static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1612 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1613 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
bc043585 1614 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
8acca6ac
SD
1615 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1616 .len = MACSEC_KEYID_LEN, },
c09440f7 1617 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
e8660ded 1618 .len = MACSEC_MAX_KEY_LEN, },
48ef50fa
EM
1619 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1620 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1621 .len = MACSEC_SALT_LEN, },
c09440f7
SD
1622};
1623
dcb780fb
AT
1624static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1625 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1626};
1627
3cf3227a
AT
1628/* Offloads an operation to a device driver */
1629static int macsec_offload(int (* const func)(struct macsec_context *),
1630 struct macsec_context *ctx)
1631{
1632 int ret;
1633
1634 if (unlikely(!func))
1635 return 0;
1636
1637 if (ctx->offload == MACSEC_OFFLOAD_PHY)
1638 mutex_lock(&ctx->phydev->lock);
1639
1640 /* Phase I: prepare. The drive should fail here if there are going to be
1641 * issues in the commit phase.
1642 */
1643 ctx->prepare = true;
1644 ret = (*func)(ctx);
1645 if (ret)
1646 goto phy_unlock;
1647
1648 /* Phase II: commit. This step cannot fail. */
1649 ctx->prepare = false;
1650 ret = (*func)(ctx);
1651 /* This should never happen: commit is not allowed to fail */
1652 if (unlikely(ret))
1653 WARN(1, "MACsec offloading commit failed (%d)\n", ret);
1654
1655phy_unlock:
1656 if (ctx->offload == MACSEC_OFFLOAD_PHY)
1657 mutex_unlock(&ctx->phydev->lock);
1658
1659 return ret;
1660}
1661
c09440f7
SD
1662static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1663{
1664 if (!attrs[MACSEC_ATTR_SA_CONFIG])
1665 return -EINVAL;
1666
8cb08174 1667 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
c09440f7
SD
1668 return -EINVAL;
1669
1670 return 0;
1671}
1672
1673static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1674{
1675 if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1676 return -EINVAL;
1677
8cb08174 1678 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
c09440f7
SD
1679 return -EINVAL;
1680
1681 return 0;
1682}
1683
1684static bool validate_add_rxsa(struct nlattr **attrs)
1685{
1686 if (!attrs[MACSEC_SA_ATTR_AN] ||
1687 !attrs[MACSEC_SA_ATTR_KEY] ||
1688 !attrs[MACSEC_SA_ATTR_KEYID])
1689 return false;
1690
1691 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1692 return false;
1693
48ef50fa 1694 if (attrs[MACSEC_SA_ATTR_PN] &&
76208d8a 1695 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
c09440f7
SD
1696 return false;
1697
1698 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1699 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1700 return false;
1701 }
1702
8acca6ac
SD
1703 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1704 return false;
1705
c09440f7
SD
1706 return true;
1707}
1708
1709static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1710{
1711 struct net_device *dev;
1712 struct nlattr **attrs = info->attrs;
1713 struct macsec_secy *secy;
1714 struct macsec_rx_sc *rx_sc;
1715 struct macsec_rx_sa *rx_sa;
1716 unsigned char assoc_num;
48ef50fa 1717 int pn_len;
c09440f7
SD
1718 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1719 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
34aedfee 1720 int err;
c09440f7
SD
1721
1722 if (!attrs[MACSEC_ATTR_IFINDEX])
1723 return -EINVAL;
1724
1725 if (parse_sa_config(attrs, tb_sa))
1726 return -EINVAL;
1727
1728 if (parse_rxsc_config(attrs, tb_rxsc))
1729 return -EINVAL;
1730
1731 if (!validate_add_rxsa(tb_sa))
1732 return -EINVAL;
1733
1734 rtnl_lock();
1735 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
36b232c8 1736 if (IS_ERR(rx_sc)) {
c09440f7
SD
1737 rtnl_unlock();
1738 return PTR_ERR(rx_sc);
1739 }
1740
1741 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1742
1743 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1744 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1745 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1746 rtnl_unlock();
1747 return -EINVAL;
1748 }
1749
48ef50fa 1750 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
254e2f10
SD
1751 if (tb_sa[MACSEC_SA_ATTR_PN] &&
1752 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
48ef50fa
EM
1753 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1754 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1755 rtnl_unlock();
1756 return -EINVAL;
1757 }
1758
1759 if (secy->xpn) {
1760 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1761 rtnl_unlock();
1762 return -EINVAL;
1763 }
1764
1765 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1766 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1767 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
019866ee 1768 MACSEC_SALT_LEN);
48ef50fa
EM
1769 rtnl_unlock();
1770 return -EINVAL;
1771 }
1772 }
1773
c09440f7
SD
1774 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1775 if (rx_sa) {
1776 rtnl_unlock();
1777 return -EBUSY;
1778 }
1779
1780 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
34aedfee 1781 if (!rx_sa) {
c09440f7
SD
1782 rtnl_unlock();
1783 return -ENOMEM;
1784 }
1785
34aedfee
DC
1786 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1787 secy->key_len, secy->icv_len);
1788 if (err < 0) {
1789 kfree(rx_sa);
1790 rtnl_unlock();
1791 return err;
1792 }
1793
c09440f7
SD
1794 if (tb_sa[MACSEC_SA_ATTR_PN]) {
1795 spin_lock_bh(&rx_sa->lock);
48ef50fa 1796 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
c09440f7
SD
1797 spin_unlock_bh(&rx_sa->lock);
1798 }
1799
1800 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1801 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1802
c09440f7 1803 rx_sa->sc = rx_sc;
3cf3227a
AT
1804
1805 /* If h/w offloading is available, propagate to the device */
1806 if (macsec_is_offloaded(netdev_priv(dev))) {
1807 const struct macsec_ops *ops;
1808 struct macsec_context ctx;
1809
1810 ops = macsec_get_ops(netdev_priv(dev), &ctx);
1811 if (!ops) {
1812 err = -EOPNOTSUPP;
1813 goto cleanup;
1814 }
1815
1816 ctx.sa.assoc_num = assoc_num;
1817 ctx.sa.rx_sa = rx_sa;
182879f8 1818 ctx.secy = secy;
3cf3227a 1819 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1f7fe512 1820 secy->key_len);
3cf3227a
AT
1821
1822 err = macsec_offload(ops->mdo_add_rxsa, &ctx);
24db7e5c 1823 memzero_explicit(ctx.sa.key, secy->key_len);
3cf3227a
AT
1824 if (err)
1825 goto cleanup;
1826 }
1827
48ef50fa
EM
1828 if (secy->xpn) {
1829 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1830 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1831 MACSEC_SALT_LEN);
1832 }
1833
3cf3227a 1834 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
c09440f7
SD
1835 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1836
1837 rtnl_unlock();
1838
1839 return 0;
3cf3227a
AT
1840
1841cleanup:
ee90aab2 1842 macsec_rxsa_put(rx_sa);
3cf3227a
AT
1843 rtnl_unlock();
1844 return err;
c09440f7
SD
1845}
1846
1847static bool validate_add_rxsc(struct nlattr **attrs)
1848{
1849 if (!attrs[MACSEC_RXSC_ATTR_SCI])
1850 return false;
1851
1852 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1853 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1854 return false;
1855 }
1856
1857 return true;
1858}
1859
1860static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1861{
1862 struct net_device *dev;
1863 sci_t sci = MACSEC_UNDEF_SCI;
1864 struct nlattr **attrs = info->attrs;
1865 struct macsec_rx_sc *rx_sc;
1866 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
182879f8 1867 struct macsec_secy *secy;
e370a5d0 1868 bool active = true;
3cf3227a 1869 int ret;
c09440f7
SD
1870
1871 if (!attrs[MACSEC_ATTR_IFINDEX])
1872 return -EINVAL;
1873
1874 if (parse_rxsc_config(attrs, tb_rxsc))
1875 return -EINVAL;
1876
1877 if (!validate_add_rxsc(tb_rxsc))
1878 return -EINVAL;
1879
1880 rtnl_lock();
1881 dev = get_dev_from_nl(genl_info_net(info), attrs);
1882 if (IS_ERR(dev)) {
1883 rtnl_unlock();
1884 return PTR_ERR(dev);
1885 }
1886
182879f8 1887 secy = &macsec_priv(dev)->secy;
c09440f7
SD
1888 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1889
e370a5d0
SD
1890 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1891 active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1892
1893 rx_sc = create_rx_sc(dev, sci, active);
c09440f7
SD
1894 if (IS_ERR(rx_sc)) {
1895 rtnl_unlock();
1896 return PTR_ERR(rx_sc);
1897 }
1898
3cf3227a
AT
1899 if (macsec_is_offloaded(netdev_priv(dev))) {
1900 const struct macsec_ops *ops;
1901 struct macsec_context ctx;
1902
1903 ops = macsec_get_ops(netdev_priv(dev), &ctx);
1904 if (!ops) {
1905 ret = -EOPNOTSUPP;
1906 goto cleanup;
1907 }
1908
1909 ctx.rx_sc = rx_sc;
182879f8 1910 ctx.secy = secy;
3cf3227a
AT
1911
1912 ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1913 if (ret)
1914 goto cleanup;
1915 }
1916
c09440f7
SD
1917 rtnl_unlock();
1918
1919 return 0;
3cf3227a
AT
1920
1921cleanup:
d78a6ba1
SD
1922 del_rx_sc(secy, sci);
1923 free_rx_sc(rx_sc);
3cf3227a
AT
1924 rtnl_unlock();
1925 return ret;
c09440f7
SD
1926}
1927
1928static bool validate_add_txsa(struct nlattr **attrs)
1929{
1930 if (!attrs[MACSEC_SA_ATTR_AN] ||
1931 !attrs[MACSEC_SA_ATTR_PN] ||
1932 !attrs[MACSEC_SA_ATTR_KEY] ||
1933 !attrs[MACSEC_SA_ATTR_KEYID])
1934 return false;
1935
1936 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1937 return false;
1938
76208d8a 1939 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
c09440f7
SD
1940 return false;
1941
1942 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1943 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1944 return false;
1945 }
1946
8acca6ac
SD
1947 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1948 return false;
1949
c09440f7
SD
1950 return true;
1951}
1952
1953static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1954{
1955 struct net_device *dev;
1956 struct nlattr **attrs = info->attrs;
1957 struct macsec_secy *secy;
1958 struct macsec_tx_sc *tx_sc;
1959 struct macsec_tx_sa *tx_sa;
1960 unsigned char assoc_num;
48ef50fa 1961 int pn_len;
c09440f7 1962 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
3cf3227a 1963 bool was_operational;
34aedfee 1964 int err;
c09440f7
SD
1965
1966 if (!attrs[MACSEC_ATTR_IFINDEX])
1967 return -EINVAL;
1968
1969 if (parse_sa_config(attrs, tb_sa))
1970 return -EINVAL;
1971
1972 if (!validate_add_txsa(tb_sa))
1973 return -EINVAL;
1974
1975 rtnl_lock();
1976 dev = get_dev_from_nl(genl_info_net(info), attrs);
1977 if (IS_ERR(dev)) {
1978 rtnl_unlock();
1979 return PTR_ERR(dev);
1980 }
1981
1982 secy = &macsec_priv(dev)->secy;
1983 tx_sc = &secy->tx_sc;
1984
1985 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1986
1987 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1988 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1989 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1990 rtnl_unlock();
1991 return -EINVAL;
1992 }
1993
48ef50fa
EM
1994 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1995 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1996 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
1997 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1998 rtnl_unlock();
1999 return -EINVAL;
2000 }
2001
2002 if (secy->xpn) {
2003 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2004 rtnl_unlock();
2005 return -EINVAL;
2006 }
2007
2008 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2009 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2010 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
019866ee 2011 MACSEC_SALT_LEN);
48ef50fa
EM
2012 rtnl_unlock();
2013 return -EINVAL;
2014 }
2015 }
2016
c09440f7
SD
2017 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2018 if (tx_sa) {
2019 rtnl_unlock();
2020 return -EBUSY;
2021 }
2022
2023 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
34aedfee 2024 if (!tx_sa) {
c09440f7
SD
2025 rtnl_unlock();
2026 return -ENOMEM;
2027 }
2028
34aedfee
DC
2029 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2030 secy->key_len, secy->icv_len);
2031 if (err < 0) {
2032 kfree(tx_sa);
2033 rtnl_unlock();
2034 return err;
2035 }
2036
c09440f7 2037 spin_lock_bh(&tx_sa->lock);
48ef50fa 2038 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
c09440f7
SD
2039 spin_unlock_bh(&tx_sa->lock);
2040
2041 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2042 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2043
3cf3227a 2044 was_operational = secy->operational;
c09440f7
SD
2045 if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2046 secy->operational = true;
2047
3cf3227a
AT
2048 /* If h/w offloading is available, propagate to the device */
2049 if (macsec_is_offloaded(netdev_priv(dev))) {
2050 const struct macsec_ops *ops;
2051 struct macsec_context ctx;
2052
2053 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2054 if (!ops) {
2055 err = -EOPNOTSUPP;
2056 goto cleanup;
2057 }
2058
2059 ctx.sa.assoc_num = assoc_num;
2060 ctx.sa.tx_sa = tx_sa;
182879f8 2061 ctx.secy = secy;
3cf3227a 2062 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1f7fe512 2063 secy->key_len);
3cf3227a
AT
2064
2065 err = macsec_offload(ops->mdo_add_txsa, &ctx);
24db7e5c 2066 memzero_explicit(ctx.sa.key, secy->key_len);
3cf3227a
AT
2067 if (err)
2068 goto cleanup;
2069 }
2070
48ef50fa
EM
2071 if (secy->xpn) {
2072 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2073 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2074 MACSEC_SALT_LEN);
2075 }
2076
3cf3227a 2077 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
c09440f7
SD
2078 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2079
2080 rtnl_unlock();
2081
2082 return 0;
3cf3227a
AT
2083
2084cleanup:
2085 secy->operational = was_operational;
ee90aab2 2086 macsec_txsa_put(tx_sa);
3cf3227a
AT
2087 rtnl_unlock();
2088 return err;
c09440f7
SD
2089}
2090
2091static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2092{
2093 struct nlattr **attrs = info->attrs;
2094 struct net_device *dev;
2095 struct macsec_secy *secy;
2096 struct macsec_rx_sc *rx_sc;
2097 struct macsec_rx_sa *rx_sa;
2098 u8 assoc_num;
2099 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2100 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
3cf3227a 2101 int ret;
c09440f7
SD
2102
2103 if (!attrs[MACSEC_ATTR_IFINDEX])
2104 return -EINVAL;
2105
2106 if (parse_sa_config(attrs, tb_sa))
2107 return -EINVAL;
2108
2109 if (parse_rxsc_config(attrs, tb_rxsc))
2110 return -EINVAL;
2111
2112 rtnl_lock();
2113 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2114 &dev, &secy, &rx_sc, &assoc_num);
2115 if (IS_ERR(rx_sa)) {
2116 rtnl_unlock();
2117 return PTR_ERR(rx_sa);
2118 }
2119
2120 if (rx_sa->active) {
2121 rtnl_unlock();
2122 return -EBUSY;
2123 }
2124
3cf3227a
AT
2125 /* If h/w offloading is available, propagate to the device */
2126 if (macsec_is_offloaded(netdev_priv(dev))) {
2127 const struct macsec_ops *ops;
2128 struct macsec_context ctx;
2129
2130 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2131 if (!ops) {
2132 ret = -EOPNOTSUPP;
2133 goto cleanup;
2134 }
2135
2136 ctx.sa.assoc_num = assoc_num;
2137 ctx.sa.rx_sa = rx_sa;
182879f8 2138 ctx.secy = secy;
3cf3227a
AT
2139
2140 ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2141 if (ret)
2142 goto cleanup;
2143 }
2144
c09440f7
SD
2145 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2146 clear_rx_sa(rx_sa);
2147
2148 rtnl_unlock();
2149
2150 return 0;
3cf3227a
AT
2151
2152cleanup:
2153 rtnl_unlock();
2154 return ret;
c09440f7
SD
2155}
2156
2157static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2158{
2159 struct nlattr **attrs = info->attrs;
2160 struct net_device *dev;
2161 struct macsec_secy *secy;
2162 struct macsec_rx_sc *rx_sc;
2163 sci_t sci;
2164 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
3cf3227a 2165 int ret;
c09440f7
SD
2166
2167 if (!attrs[MACSEC_ATTR_IFINDEX])
2168 return -EINVAL;
2169
2170 if (parse_rxsc_config(attrs, tb_rxsc))
2171 return -EINVAL;
2172
2173 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2174 return -EINVAL;
2175
2176 rtnl_lock();
2177 dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2178 if (IS_ERR(dev)) {
2179 rtnl_unlock();
2180 return PTR_ERR(dev);
2181 }
2182
2183 secy = &macsec_priv(dev)->secy;
2184 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2185
2186 rx_sc = del_rx_sc(secy, sci);
2187 if (!rx_sc) {
2188 rtnl_unlock();
2189 return -ENODEV;
2190 }
2191
3cf3227a
AT
2192 /* If h/w offloading is available, propagate to the device */
2193 if (macsec_is_offloaded(netdev_priv(dev))) {
2194 const struct macsec_ops *ops;
2195 struct macsec_context ctx;
2196
2197 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2198 if (!ops) {
2199 ret = -EOPNOTSUPP;
2200 goto cleanup;
2201 }
2202
2203 ctx.rx_sc = rx_sc;
182879f8 2204 ctx.secy = secy;
3cf3227a
AT
2205 ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2206 if (ret)
2207 goto cleanup;
2208 }
2209
c09440f7
SD
2210 free_rx_sc(rx_sc);
2211 rtnl_unlock();
2212
2213 return 0;
3cf3227a
AT
2214
2215cleanup:
2216 rtnl_unlock();
2217 return ret;
c09440f7
SD
2218}
2219
2220static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2221{
2222 struct nlattr **attrs = info->attrs;
2223 struct net_device *dev;
2224 struct macsec_secy *secy;
2225 struct macsec_tx_sc *tx_sc;
2226 struct macsec_tx_sa *tx_sa;
2227 u8 assoc_num;
2228 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
3cf3227a 2229 int ret;
c09440f7
SD
2230
2231 if (!attrs[MACSEC_ATTR_IFINDEX])
2232 return -EINVAL;
2233
2234 if (parse_sa_config(attrs, tb_sa))
2235 return -EINVAL;
2236
2237 rtnl_lock();
2238 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2239 &dev, &secy, &tx_sc, &assoc_num);
2240 if (IS_ERR(tx_sa)) {
2241 rtnl_unlock();
2242 return PTR_ERR(tx_sa);
2243 }
2244
2245 if (tx_sa->active) {
2246 rtnl_unlock();
2247 return -EBUSY;
2248 }
2249
3cf3227a
AT
2250 /* If h/w offloading is available, propagate to the device */
2251 if (macsec_is_offloaded(netdev_priv(dev))) {
2252 const struct macsec_ops *ops;
2253 struct macsec_context ctx;
2254
2255 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2256 if (!ops) {
2257 ret = -EOPNOTSUPP;
2258 goto cleanup;
2259 }
2260
2261 ctx.sa.assoc_num = assoc_num;
2262 ctx.sa.tx_sa = tx_sa;
182879f8 2263 ctx.secy = secy;
3cf3227a
AT
2264
2265 ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2266 if (ret)
2267 goto cleanup;
2268 }
2269
c09440f7
SD
2270 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2271 clear_tx_sa(tx_sa);
2272
2273 rtnl_unlock();
2274
2275 return 0;
3cf3227a
AT
2276
2277cleanup:
2278 rtnl_unlock();
2279 return ret;
c09440f7
SD
2280}
2281
2282static bool validate_upd_sa(struct nlattr **attrs)
2283{
2284 if (!attrs[MACSEC_SA_ATTR_AN] ||
2285 attrs[MACSEC_SA_ATTR_KEY] ||
48ef50fa
EM
2286 attrs[MACSEC_SA_ATTR_KEYID] ||
2287 attrs[MACSEC_SA_ATTR_SSCI] ||
2288 attrs[MACSEC_SA_ATTR_SALT])
c09440f7
SD
2289 return false;
2290
2291 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2292 return false;
2293
76208d8a 2294 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
c09440f7
SD
2295 return false;
2296
2297 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2298 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2299 return false;
2300 }
2301
2302 return true;
2303}
2304
2305static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2306{
2307 struct nlattr **attrs = info->attrs;
2308 struct net_device *dev;
2309 struct macsec_secy *secy;
2310 struct macsec_tx_sc *tx_sc;
2311 struct macsec_tx_sa *tx_sa;
2312 u8 assoc_num;
2313 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
3cf3227a 2314 bool was_operational, was_active;
a21ecf0e 2315 pn_t prev_pn;
3cf3227a 2316 int ret = 0;
c09440f7 2317
a21ecf0e
EM
2318 prev_pn.full64 = 0;
2319
c09440f7
SD
2320 if (!attrs[MACSEC_ATTR_IFINDEX])
2321 return -EINVAL;
2322
2323 if (parse_sa_config(attrs, tb_sa))
2324 return -EINVAL;
2325
2326 if (!validate_upd_sa(tb_sa))
2327 return -EINVAL;
2328
2329 rtnl_lock();
2330 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2331 &dev, &secy, &tx_sc, &assoc_num);
2332 if (IS_ERR(tx_sa)) {
2333 rtnl_unlock();
2334 return PTR_ERR(tx_sa);
2335 }
2336
2337 if (tb_sa[MACSEC_SA_ATTR_PN]) {
48ef50fa
EM
2338 int pn_len;
2339
2340 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2341 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2342 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2343 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2344 rtnl_unlock();
2345 return -EINVAL;
2346 }
2347
c09440f7 2348 spin_lock_bh(&tx_sa->lock);
a21ecf0e 2349 prev_pn = tx_sa->next_pn_halves;
48ef50fa 2350 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
c09440f7
SD
2351 spin_unlock_bh(&tx_sa->lock);
2352 }
2353
3cf3227a 2354 was_active = tx_sa->active;
c09440f7
SD
2355 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2356 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2357
3cf3227a 2358 was_operational = secy->operational;
c09440f7
SD
2359 if (assoc_num == tx_sc->encoding_sa)
2360 secy->operational = tx_sa->active;
2361
3cf3227a
AT
2362 /* If h/w offloading is available, propagate to the device */
2363 if (macsec_is_offloaded(netdev_priv(dev))) {
2364 const struct macsec_ops *ops;
2365 struct macsec_context ctx;
2366
2367 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2368 if (!ops) {
2369 ret = -EOPNOTSUPP;
2370 goto cleanup;
2371 }
2372
2373 ctx.sa.assoc_num = assoc_num;
2374 ctx.sa.tx_sa = tx_sa;
182879f8 2375 ctx.secy = secy;
3cf3227a
AT
2376
2377 ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2378 if (ret)
2379 goto cleanup;
2380 }
2381
c09440f7
SD
2382 rtnl_unlock();
2383
2384 return 0;
3cf3227a
AT
2385
2386cleanup:
2387 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2388 spin_lock_bh(&tx_sa->lock);
a21ecf0e 2389 tx_sa->next_pn_halves = prev_pn;
3cf3227a
AT
2390 spin_unlock_bh(&tx_sa->lock);
2391 }
2392 tx_sa->active = was_active;
2393 secy->operational = was_operational;
2394 rtnl_unlock();
2395 return ret;
c09440f7
SD
2396}
2397
2398static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2399{
2400 struct nlattr **attrs = info->attrs;
2401 struct net_device *dev;
2402 struct macsec_secy *secy;
2403 struct macsec_rx_sc *rx_sc;
2404 struct macsec_rx_sa *rx_sa;
2405 u8 assoc_num;
2406 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2407 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
3cf3227a 2408 bool was_active;
a21ecf0e 2409 pn_t prev_pn;
3cf3227a 2410 int ret = 0;
c09440f7 2411
a21ecf0e
EM
2412 prev_pn.full64 = 0;
2413
c09440f7
SD
2414 if (!attrs[MACSEC_ATTR_IFINDEX])
2415 return -EINVAL;
2416
2417 if (parse_rxsc_config(attrs, tb_rxsc))
2418 return -EINVAL;
2419
2420 if (parse_sa_config(attrs, tb_sa))
2421 return -EINVAL;
2422
2423 if (!validate_upd_sa(tb_sa))
2424 return -EINVAL;
2425
2426 rtnl_lock();
2427 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2428 &dev, &secy, &rx_sc, &assoc_num);
2429 if (IS_ERR(rx_sa)) {
2430 rtnl_unlock();
2431 return PTR_ERR(rx_sa);
2432 }
2433
2434 if (tb_sa[MACSEC_SA_ATTR_PN]) {
48ef50fa
EM
2435 int pn_len;
2436
2437 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2438 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2439 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2440 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2441 rtnl_unlock();
2442 return -EINVAL;
2443 }
2444
c09440f7 2445 spin_lock_bh(&rx_sa->lock);
a21ecf0e 2446 prev_pn = rx_sa->next_pn_halves;
48ef50fa 2447 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
c09440f7
SD
2448 spin_unlock_bh(&rx_sa->lock);
2449 }
2450
3cf3227a 2451 was_active = rx_sa->active;
c09440f7
SD
2452 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2453 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2454
3cf3227a
AT
2455 /* If h/w offloading is available, propagate to the device */
2456 if (macsec_is_offloaded(netdev_priv(dev))) {
2457 const struct macsec_ops *ops;
2458 struct macsec_context ctx;
2459
2460 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2461 if (!ops) {
2462 ret = -EOPNOTSUPP;
2463 goto cleanup;
2464 }
2465
2466 ctx.sa.assoc_num = assoc_num;
2467 ctx.sa.rx_sa = rx_sa;
182879f8 2468 ctx.secy = secy;
3cf3227a
AT
2469
2470 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2471 if (ret)
2472 goto cleanup;
2473 }
2474
c09440f7
SD
2475 rtnl_unlock();
2476 return 0;
3cf3227a
AT
2477
2478cleanup:
2479 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2480 spin_lock_bh(&rx_sa->lock);
a21ecf0e 2481 rx_sa->next_pn_halves = prev_pn;
3cf3227a
AT
2482 spin_unlock_bh(&rx_sa->lock);
2483 }
2484 rx_sa->active = was_active;
2485 rtnl_unlock();
2486 return ret;
c09440f7
SD
2487}
2488
2489static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2490{
2491 struct nlattr **attrs = info->attrs;
2492 struct net_device *dev;
2493 struct macsec_secy *secy;
2494 struct macsec_rx_sc *rx_sc;
2495 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
3cf3227a
AT
2496 unsigned int prev_n_rx_sc;
2497 bool was_active;
2498 int ret;
c09440f7
SD
2499
2500 if (!attrs[MACSEC_ATTR_IFINDEX])
2501 return -EINVAL;
2502
2503 if (parse_rxsc_config(attrs, tb_rxsc))
2504 return -EINVAL;
2505
2506 if (!validate_add_rxsc(tb_rxsc))
2507 return -EINVAL;
2508
2509 rtnl_lock();
2510 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2511 if (IS_ERR(rx_sc)) {
2512 rtnl_unlock();
2513 return PTR_ERR(rx_sc);
2514 }
2515
3cf3227a
AT
2516 was_active = rx_sc->active;
2517 prev_n_rx_sc = secy->n_rx_sc;
c09440f7
SD
2518 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2519 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2520
2521 if (rx_sc->active != new)
2522 secy->n_rx_sc += new ? 1 : -1;
2523
2524 rx_sc->active = new;
2525 }
2526
3cf3227a
AT
2527 /* If h/w offloading is available, propagate to the device */
2528 if (macsec_is_offloaded(netdev_priv(dev))) {
2529 const struct macsec_ops *ops;
2530 struct macsec_context ctx;
2531
2532 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2533 if (!ops) {
2534 ret = -EOPNOTSUPP;
2535 goto cleanup;
2536 }
2537
2538 ctx.rx_sc = rx_sc;
182879f8 2539 ctx.secy = secy;
3cf3227a
AT
2540
2541 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2542 if (ret)
2543 goto cleanup;
2544 }
2545
c09440f7
SD
2546 rtnl_unlock();
2547
2548 return 0;
3cf3227a
AT
2549
2550cleanup:
2551 secy->n_rx_sc = prev_n_rx_sc;
2552 rx_sc->active = was_active;
2553 rtnl_unlock();
2554 return ret;
c09440f7
SD
2555}
2556
dcb780fb
AT
2557static bool macsec_is_configured(struct macsec_dev *macsec)
2558{
2559 struct macsec_secy *secy = &macsec->secy;
2560 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2561 int i;
2562
5fbe0234 2563 if (secy->rx_sc)
dcb780fb
AT
2564 return true;
2565
2566 for (i = 0; i < MACSEC_NUM_AN; i++)
2567 if (tx_sc->sa[i])
2568 return true;
2569
2570 return false;
2571}
2572
2573static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
2574{
2575 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2576 enum macsec_offload offload, prev_offload;
2577 int (*func)(struct macsec_context *ctx);
2578 struct nlattr **attrs = info->attrs;
a249f805 2579 struct net_device *dev;
dcb780fb
AT
2580 const struct macsec_ops *ops;
2581 struct macsec_context ctx;
2582 struct macsec_dev *macsec;
dcb780fb
AT
2583 int ret;
2584
2585 if (!attrs[MACSEC_ATTR_IFINDEX])
2586 return -EINVAL;
2587
2588 if (!attrs[MACSEC_ATTR_OFFLOAD])
2589 return -EINVAL;
2590
2591 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2592 attrs[MACSEC_ATTR_OFFLOAD],
2593 macsec_genl_offload_policy, NULL))
2594 return -EINVAL;
2595
2596 dev = get_dev_from_nl(genl_info_net(info), attrs);
2597 if (IS_ERR(dev))
2598 return PTR_ERR(dev);
2599 macsec = macsec_priv(dev);
2600
aa81700c
DC
2601 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
2602 return -EINVAL;
2603
dcb780fb
AT
2604 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
2605 if (macsec->offload == offload)
2606 return 0;
2607
2608 /* Check if the offloading mode is supported by the underlying layers */
2609 if (offload != MACSEC_OFFLOAD_OFF &&
2610 !macsec_check_offload(offload, macsec))
2611 return -EOPNOTSUPP;
2612
dcb780fb
AT
2613 /* Check if the net device is busy. */
2614 if (netif_running(dev))
2615 return -EBUSY;
2616
2617 rtnl_lock();
2618
2619 prev_offload = macsec->offload;
2620 macsec->offload = offload;
2621
2622 /* Check if the device already has rules configured: we do not support
2623 * rules migration.
2624 */
2625 if (macsec_is_configured(macsec)) {
2626 ret = -EBUSY;
2627 goto rollback;
2628 }
2629
2630 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2631 macsec, &ctx);
2632 if (!ops) {
2633 ret = -EOPNOTSUPP;
2634 goto rollback;
2635 }
2636
2637 if (prev_offload == MACSEC_OFFLOAD_OFF)
2638 func = ops->mdo_add_secy;
2639 else
2640 func = ops->mdo_del_secy;
2641
2642 ctx.secy = &macsec->secy;
2643 ret = macsec_offload(func, &ctx);
2644 if (ret)
2645 goto rollback;
2646
29ca3cdf 2647 rtnl_unlock();
dcb780fb
AT
2648 return 0;
2649
2650rollback:
2651 macsec->offload = prev_offload;
2652
2653 rtnl_unlock();
2654 return ret;
2655}
2656
b62c3624
DB
2657static void get_tx_sa_stats(struct net_device *dev, int an,
2658 struct macsec_tx_sa *tx_sa,
2659 struct macsec_tx_sa_stats *sum)
c09440f7 2660{
b62c3624 2661 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
2662 int cpu;
2663
b62c3624
DB
2664 /* If h/w offloading is available, propagate to the device */
2665 if (macsec_is_offloaded(macsec)) {
2666 const struct macsec_ops *ops;
2667 struct macsec_context ctx;
2668
2669 ops = macsec_get_ops(macsec, &ctx);
2670 if (ops) {
2671 ctx.sa.assoc_num = an;
2672 ctx.sa.tx_sa = tx_sa;
2673 ctx.stats.tx_sa_stats = sum;
2674 ctx.secy = &macsec_priv(dev)->secy;
2675 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2676 }
2677 return;
2678 }
2679
c09440f7 2680 for_each_possible_cpu(cpu) {
b62c3624
DB
2681 const struct macsec_tx_sa_stats *stats =
2682 per_cpu_ptr(tx_sa->stats, cpu);
c09440f7 2683
b62c3624
DB
2684 sum->OutPktsProtected += stats->OutPktsProtected;
2685 sum->OutPktsEncrypted += stats->OutPktsEncrypted;
c09440f7 2686 }
b62c3624 2687}
c09440f7 2688
b62c3624
DB
2689static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2690{
2691 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2692 sum->OutPktsProtected) ||
2693 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2694 sum->OutPktsEncrypted))
c09440f7
SD
2695 return -EMSGSIZE;
2696
2697 return 0;
2698}
2699
b62c3624
DB
2700static void get_rx_sa_stats(struct net_device *dev,
2701 struct macsec_rx_sc *rx_sc, int an,
2702 struct macsec_rx_sa *rx_sa,
2703 struct macsec_rx_sa_stats *sum)
c09440f7 2704{
b62c3624 2705 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
2706 int cpu;
2707
b62c3624
DB
2708 /* If h/w offloading is available, propagate to the device */
2709 if (macsec_is_offloaded(macsec)) {
2710 const struct macsec_ops *ops;
2711 struct macsec_context ctx;
2712
2713 ops = macsec_get_ops(macsec, &ctx);
2714 if (ops) {
2715 ctx.sa.assoc_num = an;
2716 ctx.sa.rx_sa = rx_sa;
2717 ctx.stats.rx_sa_stats = sum;
2718 ctx.secy = &macsec_priv(dev)->secy;
2719 ctx.rx_sc = rx_sc;
2720 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2721 }
2722 return;
2723 }
2724
c09440f7 2725 for_each_possible_cpu(cpu) {
b62c3624
DB
2726 const struct macsec_rx_sa_stats *stats =
2727 per_cpu_ptr(rx_sa->stats, cpu);
c09440f7 2728
b62c3624
DB
2729 sum->InPktsOK += stats->InPktsOK;
2730 sum->InPktsInvalid += stats->InPktsInvalid;
2731 sum->InPktsNotValid += stats->InPktsNotValid;
2732 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2733 sum->InPktsUnusedSA += stats->InPktsUnusedSA;
c09440f7 2734 }
b62c3624 2735}
c09440f7 2736
b62c3624
DB
2737static int copy_rx_sa_stats(struct sk_buff *skb,
2738 struct macsec_rx_sa_stats *sum)
2739{
2740 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2741 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2742 sum->InPktsInvalid) ||
2743 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2744 sum->InPktsNotValid) ||
2745 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2746 sum->InPktsNotUsingSA) ||
2747 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2748 sum->InPktsUnusedSA))
c09440f7
SD
2749 return -EMSGSIZE;
2750
2751 return 0;
2752}
2753
b62c3624
DB
2754static void get_rx_sc_stats(struct net_device *dev,
2755 struct macsec_rx_sc *rx_sc,
2756 struct macsec_rx_sc_stats *sum)
c09440f7 2757{
b62c3624 2758 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
2759 int cpu;
2760
b62c3624
DB
2761 /* If h/w offloading is available, propagate to the device */
2762 if (macsec_is_offloaded(macsec)) {
2763 const struct macsec_ops *ops;
2764 struct macsec_context ctx;
2765
2766 ops = macsec_get_ops(macsec, &ctx);
2767 if (ops) {
2768 ctx.stats.rx_sc_stats = sum;
2769 ctx.secy = &macsec_priv(dev)->secy;
2770 ctx.rx_sc = rx_sc;
2771 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2772 }
2773 return;
2774 }
2775
c09440f7
SD
2776 for_each_possible_cpu(cpu) {
2777 const struct pcpu_rx_sc_stats *stats;
2778 struct macsec_rx_sc_stats tmp;
2779 unsigned int start;
2780
b62c3624 2781 stats = per_cpu_ptr(rx_sc->stats, cpu);
c09440f7
SD
2782 do {
2783 start = u64_stats_fetch_begin_irq(&stats->syncp);
2784 memcpy(&tmp, &stats->stats, sizeof(tmp));
2785 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2786
b62c3624
DB
2787 sum->InOctetsValidated += tmp.InOctetsValidated;
2788 sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2789 sum->InPktsUnchecked += tmp.InPktsUnchecked;
2790 sum->InPktsDelayed += tmp.InPktsDelayed;
2791 sum->InPktsOK += tmp.InPktsOK;
2792 sum->InPktsInvalid += tmp.InPktsInvalid;
2793 sum->InPktsLate += tmp.InPktsLate;
2794 sum->InPktsNotValid += tmp.InPktsNotValid;
2795 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
2796 sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
c09440f7 2797 }
b62c3624 2798}
c09440f7 2799
b62c3624
DB
2800static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2801{
f60d94c0 2802 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
b62c3624 2803 sum->InOctetsValidated,
f60d94c0
ND
2804 MACSEC_RXSC_STATS_ATTR_PAD) ||
2805 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
b62c3624 2806 sum->InOctetsDecrypted,
f60d94c0
ND
2807 MACSEC_RXSC_STATS_ATTR_PAD) ||
2808 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
b62c3624 2809 sum->InPktsUnchecked,
f60d94c0
ND
2810 MACSEC_RXSC_STATS_ATTR_PAD) ||
2811 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
b62c3624 2812 sum->InPktsDelayed,
f60d94c0
ND
2813 MACSEC_RXSC_STATS_ATTR_PAD) ||
2814 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
b62c3624 2815 sum->InPktsOK,
f60d94c0
ND
2816 MACSEC_RXSC_STATS_ATTR_PAD) ||
2817 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
b62c3624 2818 sum->InPktsInvalid,
f60d94c0
ND
2819 MACSEC_RXSC_STATS_ATTR_PAD) ||
2820 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
b62c3624 2821 sum->InPktsLate,
f60d94c0
ND
2822 MACSEC_RXSC_STATS_ATTR_PAD) ||
2823 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
b62c3624 2824 sum->InPktsNotValid,
f60d94c0
ND
2825 MACSEC_RXSC_STATS_ATTR_PAD) ||
2826 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
b62c3624 2827 sum->InPktsNotUsingSA,
f60d94c0
ND
2828 MACSEC_RXSC_STATS_ATTR_PAD) ||
2829 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
b62c3624 2830 sum->InPktsUnusedSA,
f60d94c0 2831 MACSEC_RXSC_STATS_ATTR_PAD))
c09440f7
SD
2832 return -EMSGSIZE;
2833
2834 return 0;
2835}
2836
b62c3624
DB
2837static void get_tx_sc_stats(struct net_device *dev,
2838 struct macsec_tx_sc_stats *sum)
c09440f7 2839{
b62c3624 2840 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
2841 int cpu;
2842
b62c3624
DB
2843 /* If h/w offloading is available, propagate to the device */
2844 if (macsec_is_offloaded(macsec)) {
2845 const struct macsec_ops *ops;
2846 struct macsec_context ctx;
2847
2848 ops = macsec_get_ops(macsec, &ctx);
2849 if (ops) {
2850 ctx.stats.tx_sc_stats = sum;
2851 ctx.secy = &macsec_priv(dev)->secy;
2852 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2853 }
2854 return;
2855 }
2856
c09440f7
SD
2857 for_each_possible_cpu(cpu) {
2858 const struct pcpu_tx_sc_stats *stats;
2859 struct macsec_tx_sc_stats tmp;
2860 unsigned int start;
2861
b62c3624 2862 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
c09440f7
SD
2863 do {
2864 start = u64_stats_fetch_begin_irq(&stats->syncp);
2865 memcpy(&tmp, &stats->stats, sizeof(tmp));
2866 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2867
b62c3624
DB
2868 sum->OutPktsProtected += tmp.OutPktsProtected;
2869 sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
2870 sum->OutOctetsProtected += tmp.OutOctetsProtected;
2871 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
c09440f7 2872 }
b62c3624 2873}
c09440f7 2874
b62c3624
DB
2875static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2876{
f60d94c0 2877 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
b62c3624 2878 sum->OutPktsProtected,
f60d94c0
ND
2879 MACSEC_TXSC_STATS_ATTR_PAD) ||
2880 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
b62c3624 2881 sum->OutPktsEncrypted,
f60d94c0
ND
2882 MACSEC_TXSC_STATS_ATTR_PAD) ||
2883 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
b62c3624 2884 sum->OutOctetsProtected,
f60d94c0
ND
2885 MACSEC_TXSC_STATS_ATTR_PAD) ||
2886 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
b62c3624 2887 sum->OutOctetsEncrypted,
f60d94c0 2888 MACSEC_TXSC_STATS_ATTR_PAD))
c09440f7
SD
2889 return -EMSGSIZE;
2890
2891 return 0;
2892}
2893
b62c3624 2894static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
c09440f7 2895{
b62c3624 2896 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
2897 int cpu;
2898
b62c3624
DB
2899 /* If h/w offloading is available, propagate to the device */
2900 if (macsec_is_offloaded(macsec)) {
2901 const struct macsec_ops *ops;
2902 struct macsec_context ctx;
2903
2904 ops = macsec_get_ops(macsec, &ctx);
2905 if (ops) {
2906 ctx.stats.dev_stats = sum;
2907 ctx.secy = &macsec_priv(dev)->secy;
2908 macsec_offload(ops->mdo_get_dev_stats, &ctx);
2909 }
2910 return;
2911 }
2912
c09440f7
SD
2913 for_each_possible_cpu(cpu) {
2914 const struct pcpu_secy_stats *stats;
2915 struct macsec_dev_stats tmp;
2916 unsigned int start;
2917
b62c3624 2918 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
c09440f7
SD
2919 do {
2920 start = u64_stats_fetch_begin_irq(&stats->syncp);
2921 memcpy(&tmp, &stats->stats, sizeof(tmp));
2922 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2923
b62c3624
DB
2924 sum->OutPktsUntagged += tmp.OutPktsUntagged;
2925 sum->InPktsUntagged += tmp.InPktsUntagged;
2926 sum->OutPktsTooLong += tmp.OutPktsTooLong;
2927 sum->InPktsNoTag += tmp.InPktsNoTag;
2928 sum->InPktsBadTag += tmp.InPktsBadTag;
2929 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2930 sum->InPktsNoSCI += tmp.InPktsNoSCI;
2931 sum->InPktsOverrun += tmp.InPktsOverrun;
c09440f7 2932 }
b62c3624 2933}
c09440f7 2934
b62c3624
DB
2935static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
2936{
f60d94c0 2937 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
b62c3624 2938 sum->OutPktsUntagged,
f60d94c0
ND
2939 MACSEC_SECY_STATS_ATTR_PAD) ||
2940 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
b62c3624 2941 sum->InPktsUntagged,
f60d94c0
ND
2942 MACSEC_SECY_STATS_ATTR_PAD) ||
2943 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
b62c3624 2944 sum->OutPktsTooLong,
f60d94c0
ND
2945 MACSEC_SECY_STATS_ATTR_PAD) ||
2946 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
b62c3624 2947 sum->InPktsNoTag,
f60d94c0
ND
2948 MACSEC_SECY_STATS_ATTR_PAD) ||
2949 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
b62c3624 2950 sum->InPktsBadTag,
f60d94c0
ND
2951 MACSEC_SECY_STATS_ATTR_PAD) ||
2952 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
b62c3624 2953 sum->InPktsUnknownSCI,
f60d94c0
ND
2954 MACSEC_SECY_STATS_ATTR_PAD) ||
2955 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
b62c3624 2956 sum->InPktsNoSCI,
f60d94c0
ND
2957 MACSEC_SECY_STATS_ATTR_PAD) ||
2958 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
b62c3624 2959 sum->InPktsOverrun,
f60d94c0 2960 MACSEC_SECY_STATS_ATTR_PAD))
c09440f7
SD
2961 return -EMSGSIZE;
2962
2963 return 0;
2964}
2965
2966static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2967{
2968 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
ae0be8de
MK
2969 struct nlattr *secy_nest = nla_nest_start_noflag(skb,
2970 MACSEC_ATTR_SECY);
ccfdec90 2971 u64 csid;
c09440f7
SD
2972
2973 if (!secy_nest)
2974 return 1;
2975
ccfdec90
FW
2976 switch (secy->key_len) {
2977 case MACSEC_GCM_AES_128_SAK_LEN:
48ef50fa 2978 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
ccfdec90
FW
2979 break;
2980 case MACSEC_GCM_AES_256_SAK_LEN:
48ef50fa 2981 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
ccfdec90
FW
2982 break;
2983 default:
2984 goto cancel;
2985 }
2986
f60d94c0
ND
2987 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
2988 MACSEC_SECY_ATTR_PAD) ||
2989 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
ccfdec90 2990 csid, MACSEC_SECY_ATTR_PAD) ||
c09440f7
SD
2991 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2992 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2993 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
2994 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
2995 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
2996 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
2997 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
2998 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
2999 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3000 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3001 goto cancel;
3002
3003 if (secy->replay_protect) {
3004 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3005 goto cancel;
3006 }
3007
3008 nla_nest_end(skb, secy_nest);
3009 return 0;
3010
3011cancel:
3012 nla_nest_cancel(skb, secy_nest);
3013 return 1;
3014}
3015
e1427237
FW
3016static noinline_for_stack int
3017dump_secy(struct macsec_secy *secy, struct net_device *dev,
3018 struct sk_buff *skb, struct netlink_callback *cb)
c09440f7 3019{
b62c3624
DB
3020 struct macsec_tx_sc_stats tx_sc_stats = {0, };
3021 struct macsec_tx_sa_stats tx_sa_stats = {0, };
3022 struct macsec_rx_sc_stats rx_sc_stats = {0, };
3023 struct macsec_rx_sa_stats rx_sa_stats = {0, };
dcb780fb 3024 struct macsec_dev *macsec = netdev_priv(dev);
b62c3624 3025 struct macsec_dev_stats dev_stats = {0, };
c09440f7
SD
3026 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3027 struct nlattr *txsa_list, *rxsc_list;
dcb780fb 3028 struct macsec_rx_sc *rx_sc;
c09440f7 3029 struct nlattr *attr;
dcb780fb
AT
3030 void *hdr;
3031 int i, j;
c09440f7
SD
3032
3033 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3034 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3035 if (!hdr)
3036 return -EMSGSIZE;
3037
0a833c29 3038 genl_dump_check_consistent(cb, hdr);
c09440f7
SD
3039
3040 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3041 goto nla_put_failure;
3042
dcb780fb
AT
3043 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3044 if (!attr)
3045 goto nla_put_failure;
3046 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3047 goto nla_put_failure;
3048 nla_nest_end(skb, attr);
3049
c09440f7
SD
3050 if (nla_put_secy(secy, skb))
3051 goto nla_put_failure;
3052
ae0be8de 3053 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
c09440f7
SD
3054 if (!attr)
3055 goto nla_put_failure;
b62c3624
DB
3056
3057 get_tx_sc_stats(dev, &tx_sc_stats);
3058 if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
c09440f7
SD
3059 nla_nest_cancel(skb, attr);
3060 goto nla_put_failure;
3061 }
3062 nla_nest_end(skb, attr);
3063
ae0be8de 3064 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
c09440f7
SD
3065 if (!attr)
3066 goto nla_put_failure;
b62c3624
DB
3067 get_secy_stats(dev, &dev_stats);
3068 if (copy_secy_stats(skb, &dev_stats)) {
c09440f7
SD
3069 nla_nest_cancel(skb, attr);
3070 goto nla_put_failure;
3071 }
3072 nla_nest_end(skb, attr);
3073
ae0be8de 3074 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
c09440f7
SD
3075 if (!txsa_list)
3076 goto nla_put_failure;
3077 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3078 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3079 struct nlattr *txsa_nest;
48ef50fa
EM
3080 u64 pn;
3081 int pn_len;
c09440f7
SD
3082
3083 if (!tx_sa)
3084 continue;
3085
ae0be8de 3086 txsa_nest = nla_nest_start_noflag(skb, j++);
c09440f7
SD
3087 if (!txsa_nest) {
3088 nla_nest_cancel(skb, txsa_list);
3089 goto nla_put_failure;
3090 }
3091
b62c3624
DB
3092 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3093 if (!attr) {
3094 nla_nest_cancel(skb, txsa_nest);
3095 nla_nest_cancel(skb, txsa_list);
3096 goto nla_put_failure;
3097 }
3098 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3099 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3100 if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3101 nla_nest_cancel(skb, attr);
3102 nla_nest_cancel(skb, txsa_nest);
3103 nla_nest_cancel(skb, txsa_list);
3104 goto nla_put_failure;
3105 }
3106 nla_nest_end(skb, attr);
3107
48ef50fa
EM
3108 if (secy->xpn) {
3109 pn = tx_sa->next_pn;
3110 pn_len = MACSEC_XPN_PN_LEN;
3111 } else {
3112 pn = tx_sa->next_pn_halves.lower;
3113 pn_len = MACSEC_DEFAULT_PN_LEN;
3114 }
3115
c09440f7 3116 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
48ef50fa 3117 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
8acca6ac 3118 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
48ef50fa 3119 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
c09440f7
SD
3120 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3121 nla_nest_cancel(skb, txsa_nest);
3122 nla_nest_cancel(skb, txsa_list);
3123 goto nla_put_failure;
3124 }
3125
c09440f7
SD
3126 nla_nest_end(skb, txsa_nest);
3127 }
3128 nla_nest_end(skb, txsa_list);
3129
ae0be8de 3130 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
c09440f7
SD
3131 if (!rxsc_list)
3132 goto nla_put_failure;
3133
3134 j = 1;
3135 for_each_rxsc_rtnl(secy, rx_sc) {
3136 int k;
3137 struct nlattr *rxsa_list;
ae0be8de 3138 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
c09440f7
SD
3139
3140 if (!rxsc_nest) {
3141 nla_nest_cancel(skb, rxsc_list);
3142 goto nla_put_failure;
3143 }
3144
3145 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
f60d94c0
ND
3146 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3147 MACSEC_RXSC_ATTR_PAD)) {
c09440f7
SD
3148 nla_nest_cancel(skb, rxsc_nest);
3149 nla_nest_cancel(skb, rxsc_list);
3150 goto nla_put_failure;
3151 }
3152
ae0be8de 3153 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
c09440f7
SD
3154 if (!attr) {
3155 nla_nest_cancel(skb, rxsc_nest);
3156 nla_nest_cancel(skb, rxsc_list);
3157 goto nla_put_failure;
3158 }
b62c3624
DB
3159 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3160 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3161 if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
c09440f7
SD
3162 nla_nest_cancel(skb, attr);
3163 nla_nest_cancel(skb, rxsc_nest);
3164 nla_nest_cancel(skb, rxsc_list);
3165 goto nla_put_failure;
3166 }
3167 nla_nest_end(skb, attr);
3168
ae0be8de
MK
3169 rxsa_list = nla_nest_start_noflag(skb,
3170 MACSEC_RXSC_ATTR_SA_LIST);
c09440f7
SD
3171 if (!rxsa_list) {
3172 nla_nest_cancel(skb, rxsc_nest);
3173 nla_nest_cancel(skb, rxsc_list);
3174 goto nla_put_failure;
3175 }
3176
3177 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3178 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3179 struct nlattr *rxsa_nest;
48ef50fa
EM
3180 u64 pn;
3181 int pn_len;
c09440f7
SD
3182
3183 if (!rx_sa)
3184 continue;
3185
ae0be8de 3186 rxsa_nest = nla_nest_start_noflag(skb, k++);
c09440f7
SD
3187 if (!rxsa_nest) {
3188 nla_nest_cancel(skb, rxsa_list);
3189 nla_nest_cancel(skb, rxsc_nest);
3190 nla_nest_cancel(skb, rxsc_list);
3191 goto nla_put_failure;
3192 }
3193
ae0be8de
MK
3194 attr = nla_nest_start_noflag(skb,
3195 MACSEC_SA_ATTR_STATS);
c09440f7
SD
3196 if (!attr) {
3197 nla_nest_cancel(skb, rxsa_list);
3198 nla_nest_cancel(skb, rxsc_nest);
3199 nla_nest_cancel(skb, rxsc_list);
3200 goto nla_put_failure;
3201 }
b62c3624
DB
3202 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3203 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3204 if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
c09440f7
SD
3205 nla_nest_cancel(skb, attr);
3206 nla_nest_cancel(skb, rxsa_list);
3207 nla_nest_cancel(skb, rxsc_nest);
3208 nla_nest_cancel(skb, rxsc_list);
3209 goto nla_put_failure;
3210 }
3211 nla_nest_end(skb, attr);
3212
48ef50fa
EM
3213 if (secy->xpn) {
3214 pn = rx_sa->next_pn;
3215 pn_len = MACSEC_XPN_PN_LEN;
3216 } else {
3217 pn = rx_sa->next_pn_halves.lower;
3218 pn_len = MACSEC_DEFAULT_PN_LEN;
3219 }
3220
c09440f7 3221 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
48ef50fa 3222 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
8acca6ac 3223 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
48ef50fa 3224 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
c09440f7
SD
3225 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3226 nla_nest_cancel(skb, rxsa_nest);
3227 nla_nest_cancel(skb, rxsc_nest);
3228 nla_nest_cancel(skb, rxsc_list);
3229 goto nla_put_failure;
3230 }
3231 nla_nest_end(skb, rxsa_nest);
3232 }
3233
3234 nla_nest_end(skb, rxsa_list);
3235 nla_nest_end(skb, rxsc_nest);
3236 }
3237
3238 nla_nest_end(skb, rxsc_list);
3239
c09440f7
SD
3240 genlmsg_end(skb, hdr);
3241
3242 return 0;
3243
3244nla_put_failure:
c09440f7
SD
3245 genlmsg_cancel(skb, hdr);
3246 return -EMSGSIZE;
3247}
3248
96cfc505
SD
3249static int macsec_generation = 1; /* protected by RTNL */
3250
c09440f7
SD
3251static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3252{
3253 struct net *net = sock_net(skb->sk);
3254 struct net_device *dev;
3255 int dev_idx, d;
3256
3257 dev_idx = cb->args[0];
3258
3259 d = 0;
c10c63ea 3260 rtnl_lock();
96cfc505
SD
3261
3262 cb->seq = macsec_generation;
3263
c09440f7
SD
3264 for_each_netdev(net, dev) {
3265 struct macsec_secy *secy;
3266
3267 if (d < dev_idx)
3268 goto next;
3269
3270 if (!netif_is_macsec(dev))
3271 goto next;
3272
3273 secy = &macsec_priv(dev)->secy;
3274 if (dump_secy(secy, dev, skb, cb) < 0)
3275 goto done;
3276next:
3277 d++;
3278 }
3279
3280done:
c10c63ea 3281 rtnl_unlock();
c09440f7
SD
3282 cb->args[0] = d;
3283 return skb->len;
3284}
3285
66a9b928 3286static const struct genl_small_ops macsec_genl_ops[] = {
c09440f7
SD
3287 {
3288 .cmd = MACSEC_CMD_GET_TXSC,
ef6243ac 3289 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3290 .dumpit = macsec_dump_txsc,
c09440f7
SD
3291 },
3292 {
3293 .cmd = MACSEC_CMD_ADD_RXSC,
ef6243ac 3294 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3295 .doit = macsec_add_rxsc,
c09440f7
SD
3296 .flags = GENL_ADMIN_PERM,
3297 },
3298 {
3299 .cmd = MACSEC_CMD_DEL_RXSC,
ef6243ac 3300 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3301 .doit = macsec_del_rxsc,
c09440f7
SD
3302 .flags = GENL_ADMIN_PERM,
3303 },
3304 {
3305 .cmd = MACSEC_CMD_UPD_RXSC,
ef6243ac 3306 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3307 .doit = macsec_upd_rxsc,
c09440f7
SD
3308 .flags = GENL_ADMIN_PERM,
3309 },
3310 {
3311 .cmd = MACSEC_CMD_ADD_TXSA,
ef6243ac 3312 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3313 .doit = macsec_add_txsa,
c09440f7
SD
3314 .flags = GENL_ADMIN_PERM,
3315 },
3316 {
3317 .cmd = MACSEC_CMD_DEL_TXSA,
ef6243ac 3318 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3319 .doit = macsec_del_txsa,
c09440f7
SD
3320 .flags = GENL_ADMIN_PERM,
3321 },
3322 {
3323 .cmd = MACSEC_CMD_UPD_TXSA,
ef6243ac 3324 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3325 .doit = macsec_upd_txsa,
c09440f7
SD
3326 .flags = GENL_ADMIN_PERM,
3327 },
3328 {
3329 .cmd = MACSEC_CMD_ADD_RXSA,
ef6243ac 3330 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3331 .doit = macsec_add_rxsa,
c09440f7
SD
3332 .flags = GENL_ADMIN_PERM,
3333 },
3334 {
3335 .cmd = MACSEC_CMD_DEL_RXSA,
ef6243ac 3336 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3337 .doit = macsec_del_rxsa,
c09440f7
SD
3338 .flags = GENL_ADMIN_PERM,
3339 },
3340 {
3341 .cmd = MACSEC_CMD_UPD_RXSA,
ef6243ac 3342 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3343 .doit = macsec_upd_rxsa,
c09440f7
SD
3344 .flags = GENL_ADMIN_PERM,
3345 },
dcb780fb
AT
3346 {
3347 .cmd = MACSEC_CMD_UPD_OFFLOAD,
3348 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3349 .doit = macsec_upd_offload,
3350 .flags = GENL_ADMIN_PERM,
3351 },
c09440f7
SD
3352};
3353
56989f6d 3354static struct genl_family macsec_fam __ro_after_init = {
489111e5
JB
3355 .name = MACSEC_GENL_NAME,
3356 .hdrsize = 0,
3357 .version = MACSEC_GENL_VERSION,
3358 .maxattr = MACSEC_ATTR_MAX,
3b0f31f2 3359 .policy = macsec_genl_policy,
489111e5
JB
3360 .netnsok = true,
3361 .module = THIS_MODULE,
66a9b928
JK
3362 .small_ops = macsec_genl_ops,
3363 .n_small_ops = ARRAY_SIZE(macsec_genl_ops),
489111e5
JB
3364};
3365
c09440f7
SD
3366static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3367 struct net_device *dev)
3368{
3369 struct macsec_dev *macsec = netdev_priv(dev);
3370 struct macsec_secy *secy = &macsec->secy;
3371 struct pcpu_secy_stats *secy_stats;
3372 int ret, len;
3373
3cf3227a
AT
3374 if (macsec_is_offloaded(netdev_priv(dev))) {
3375 skb->dev = macsec->real_dev;
3376 return dev_queue_xmit(skb);
3377 }
3378
c09440f7
SD
3379 /* 10.5 */
3380 if (!secy->protect_frames) {
3381 secy_stats = this_cpu_ptr(macsec->stats);
3382 u64_stats_update_begin(&secy_stats->syncp);
3383 secy_stats->stats.OutPktsUntagged++;
3384 u64_stats_update_end(&secy_stats->syncp);
79c62220 3385 skb->dev = macsec->real_dev;
c09440f7
SD
3386 len = skb->len;
3387 ret = dev_queue_xmit(skb);
3388 count_tx(dev, ret, len);
3389 return ret;
3390 }
3391
3392 if (!secy->operational) {
3393 kfree_skb(skb);
3394 dev->stats.tx_dropped++;
3395 return NETDEV_TX_OK;
3396 }
3397
3398 skb = macsec_encrypt(skb, dev);
3399 if (IS_ERR(skb)) {
3400 if (PTR_ERR(skb) != -EINPROGRESS)
3401 dev->stats.tx_dropped++;
3402 return NETDEV_TX_OK;
3403 }
3404
3405 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3406
3407 macsec_encrypt_finish(skb, dev);
3408 len = skb->len;
3409 ret = dev_queue_xmit(skb);
3410 count_tx(dev, ret, len);
3411 return ret;
3412}
3413
c9532cc3 3414#define MACSEC_FEATURES \
c09440f7 3415 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
e2003872 3416
c09440f7
SD
3417static int macsec_dev_init(struct net_device *dev)
3418{
3419 struct macsec_dev *macsec = macsec_priv(dev);
3420 struct net_device *real_dev = macsec->real_dev;
5491e7c6 3421 int err;
c09440f7
SD
3422
3423 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3424 if (!dev->tstats)
3425 return -ENOMEM;
3426
5491e7c6
PA
3427 err = gro_cells_init(&macsec->gro_cells, dev);
3428 if (err) {
3429 free_percpu(dev->tstats);
3430 return err;
3431 }
3432
c9532cc3
SD
3433 dev->features = real_dev->features & MACSEC_FEATURES;
3434 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
c09440f7
SD
3435
3436 dev->needed_headroom = real_dev->needed_headroom +
3437 MACSEC_NEEDED_HEADROOM;
3438 dev->needed_tailroom = real_dev->needed_tailroom +
3439 MACSEC_NEEDED_TAILROOM;
3440
3441 if (is_zero_ether_addr(dev->dev_addr))
3442 eth_hw_addr_inherit(dev, real_dev);
3443 if (is_zero_ether_addr(dev->broadcast))
3444 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3445
3446 return 0;
3447}
3448
3449static void macsec_dev_uninit(struct net_device *dev)
3450{
5491e7c6
PA
3451 struct macsec_dev *macsec = macsec_priv(dev);
3452
3453 gro_cells_destroy(&macsec->gro_cells);
c09440f7
SD
3454 free_percpu(dev->tstats);
3455}
3456
3457static netdev_features_t macsec_fix_features(struct net_device *dev,
3458 netdev_features_t features)
3459{
3460 struct macsec_dev *macsec = macsec_priv(dev);
3461 struct net_device *real_dev = macsec->real_dev;
3462
c9532cc3 3463 features &= (real_dev->features & MACSEC_FEATURES) |
5491e7c6
PA
3464 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
3465 features |= NETIF_F_LLTX;
c09440f7
SD
3466
3467 return features;
3468}
3469
3470static int macsec_dev_open(struct net_device *dev)
3471{
3472 struct macsec_dev *macsec = macsec_priv(dev);
3473 struct net_device *real_dev = macsec->real_dev;
3474 int err;
3475
c09440f7
SD
3476 err = dev_uc_add(real_dev, dev->dev_addr);
3477 if (err < 0)
3478 return err;
3479
3480 if (dev->flags & IFF_ALLMULTI) {
3481 err = dev_set_allmulti(real_dev, 1);
3482 if (err < 0)
3483 goto del_unicast;
3484 }
3485
3486 if (dev->flags & IFF_PROMISC) {
3487 err = dev_set_promiscuity(real_dev, 1);
3488 if (err < 0)
3489 goto clear_allmulti;
3490 }
3491
3cf3227a
AT
3492 /* If h/w offloading is available, propagate to the device */
3493 if (macsec_is_offloaded(macsec)) {
3494 const struct macsec_ops *ops;
3495 struct macsec_context ctx;
3496
3497 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3498 if (!ops) {
3499 err = -EOPNOTSUPP;
3500 goto clear_allmulti;
3501 }
3502
182879f8 3503 ctx.secy = &macsec->secy;
3cf3227a
AT
3504 err = macsec_offload(ops->mdo_dev_open, &ctx);
3505 if (err)
3506 goto clear_allmulti;
3507 }
3508
c09440f7
SD
3509 if (netif_carrier_ok(real_dev))
3510 netif_carrier_on(dev);
3511
3512 return 0;
3513clear_allmulti:
3514 if (dev->flags & IFF_ALLMULTI)
3515 dev_set_allmulti(real_dev, -1);
3516del_unicast:
3517 dev_uc_del(real_dev, dev->dev_addr);
3518 netif_carrier_off(dev);
3519 return err;
3520}
3521
3522static int macsec_dev_stop(struct net_device *dev)
3523{
3524 struct macsec_dev *macsec = macsec_priv(dev);
3525 struct net_device *real_dev = macsec->real_dev;
3526
3527 netif_carrier_off(dev);
3528
3cf3227a
AT
3529 /* If h/w offloading is available, propagate to the device */
3530 if (macsec_is_offloaded(macsec)) {
3531 const struct macsec_ops *ops;
3532 struct macsec_context ctx;
3533
3534 ops = macsec_get_ops(macsec, &ctx);
182879f8
DB
3535 if (ops) {
3536 ctx.secy = &macsec->secy;
3cf3227a 3537 macsec_offload(ops->mdo_dev_stop, &ctx);
182879f8 3538 }
3cf3227a
AT
3539 }
3540
c09440f7
SD
3541 dev_mc_unsync(real_dev, dev);
3542 dev_uc_unsync(real_dev, dev);
3543
3544 if (dev->flags & IFF_ALLMULTI)
3545 dev_set_allmulti(real_dev, -1);
3546
3547 if (dev->flags & IFF_PROMISC)
3548 dev_set_promiscuity(real_dev, -1);
3549
3550 dev_uc_del(real_dev, dev->dev_addr);
3551
3552 return 0;
3553}
3554
3555static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3556{
3557 struct net_device *real_dev = macsec_priv(dev)->real_dev;
3558
3559 if (!(dev->flags & IFF_UP))
3560 return;
3561
3562 if (change & IFF_ALLMULTI)
3563 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3564
3565 if (change & IFF_PROMISC)
3566 dev_set_promiscuity(real_dev,
3567 dev->flags & IFF_PROMISC ? 1 : -1);
3568}
3569
3570static void macsec_dev_set_rx_mode(struct net_device *dev)
3571{
3572 struct net_device *real_dev = macsec_priv(dev)->real_dev;
3573
3574 dev_mc_sync(real_dev, dev);
3575 dev_uc_sync(real_dev, dev);
3576}
3577
3578static int macsec_set_mac_address(struct net_device *dev, void *p)
3579{
3580 struct macsec_dev *macsec = macsec_priv(dev);
3581 struct net_device *real_dev = macsec->real_dev;
3582 struct sockaddr *addr = p;
3583 int err;
3584
3585 if (!is_valid_ether_addr(addr->sa_data))
3586 return -EADDRNOTAVAIL;
3587
3588 if (!(dev->flags & IFF_UP))
3589 goto out;
3590
3591 err = dev_uc_add(real_dev, addr->sa_data);
3592 if (err < 0)
3593 return err;
3594
3595 dev_uc_del(real_dev, dev->dev_addr);
3596
3597out:
c49555ee 3598 eth_hw_addr_set(dev, addr->sa_data);
09f4136c
DB
3599
3600 /* If h/w offloading is available, propagate to the device */
3601 if (macsec_is_offloaded(macsec)) {
3602 const struct macsec_ops *ops;
3603 struct macsec_context ctx;
3604
3605 ops = macsec_get_ops(macsec, &ctx);
3606 if (ops) {
3607 ctx.secy = &macsec->secy;
3608 macsec_offload(ops->mdo_upd_secy, &ctx);
3609 }
3610 }
3611
c09440f7
SD
3612 return 0;
3613}
3614
3615static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3616{
3617 struct macsec_dev *macsec = macsec_priv(dev);
3618 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3619
3620 if (macsec->real_dev->mtu - extra < new_mtu)
3621 return -ERANGE;
3622
3623 dev->mtu = new_mtu;
3624
3625 return 0;
3626}
3627
bc1f4470 3628static void macsec_get_stats64(struct net_device *dev,
3629 struct rtnl_link_stats64 *s)
c09440f7 3630{
c09440f7 3631 if (!dev->tstats)
bc1f4470 3632 return;
c09440f7 3633
9d015167 3634 dev_fetch_sw_netstats(s, dev->tstats);
c09440f7
SD
3635
3636 s->rx_dropped = dev->stats.rx_dropped;
3637 s->tx_dropped = dev->stats.tx_dropped;
c09440f7
SD
3638}
3639
3640static int macsec_get_iflink(const struct net_device *dev)
3641{
3642 return macsec_priv(dev)->real_dev->ifindex;
3643}
3644
3645static const struct net_device_ops macsec_netdev_ops = {
3646 .ndo_init = macsec_dev_init,
3647 .ndo_uninit = macsec_dev_uninit,
3648 .ndo_open = macsec_dev_open,
3649 .ndo_stop = macsec_dev_stop,
3650 .ndo_fix_features = macsec_fix_features,
3651 .ndo_change_mtu = macsec_change_mtu,
3652 .ndo_set_rx_mode = macsec_dev_set_rx_mode,
3653 .ndo_change_rx_flags = macsec_dev_change_rx_flags,
3654 .ndo_set_mac_address = macsec_set_mac_address,
3655 .ndo_start_xmit = macsec_start_xmit,
3656 .ndo_get_stats64 = macsec_get_stats64,
3657 .ndo_get_iflink = macsec_get_iflink,
3658};
3659
3660static const struct device_type macsec_type = {
3661 .name = "macsec",
3662};
3663
3664static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3665 [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
31d9a1c5 3666 [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
c09440f7
SD
3667 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3668 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3669 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3670 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3671 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3672 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3673 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3674 [IFLA_MACSEC_ES] = { .type = NLA_U8 },
3675 [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3676 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3677 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3678};
3679
3680static void macsec_free_netdev(struct net_device *dev)
3681{
3682 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
3683
3684 free_percpu(macsec->stats);
3685 free_percpu(macsec->secy.tx_sc.stats);
3686
c09440f7
SD
3687}
3688
3689static void macsec_setup(struct net_device *dev)
3690{
3691 ether_setup(dev);
91572088
JW
3692 dev->min_mtu = 0;
3693 dev->max_mtu = ETH_MAX_MTU;
e425974f 3694 dev->priv_flags |= IFF_NO_QUEUE;
c09440f7 3695 dev->netdev_ops = &macsec_netdev_ops;
cf124db5
DM
3696 dev->needs_free_netdev = true;
3697 dev->priv_destructor = macsec_free_netdev;
c24acf03 3698 SET_NETDEV_DEVTYPE(dev, &macsec_type);
c09440f7
SD
3699
3700 eth_zero_addr(dev->broadcast);
3701}
3702
ccfdec90
FW
3703static int macsec_changelink_common(struct net_device *dev,
3704 struct nlattr *data[])
c09440f7
SD
3705{
3706 struct macsec_secy *secy;
3707 struct macsec_tx_sc *tx_sc;
3708
3709 secy = &macsec_priv(dev)->secy;
3710 tx_sc = &secy->tx_sc;
3711
3712 if (data[IFLA_MACSEC_ENCODING_SA]) {
3713 struct macsec_tx_sa *tx_sa;
3714
3715 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3716 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3717
3718 secy->operational = tx_sa && tx_sa->active;
3719 }
3720
c09440f7
SD
3721 if (data[IFLA_MACSEC_ENCRYPT])
3722 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3723
3724 if (data[IFLA_MACSEC_PROTECT])
3725 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3726
3727 if (data[IFLA_MACSEC_INC_SCI])
3728 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3729
3730 if (data[IFLA_MACSEC_ES])
3731 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3732
3733 if (data[IFLA_MACSEC_SCB])
3734 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3735
3736 if (data[IFLA_MACSEC_REPLAY_PROTECT])
3737 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3738
3739 if (data[IFLA_MACSEC_VALIDATION])
3740 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
ccfdec90
FW
3741
3742 if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3743 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3744 case MACSEC_CIPHER_ID_GCM_AES_128:
e8660ded 3745 case MACSEC_DEFAULT_CIPHER_ID:
ccfdec90 3746 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
48ef50fa 3747 secy->xpn = false;
ccfdec90
FW
3748 break;
3749 case MACSEC_CIPHER_ID_GCM_AES_256:
3750 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
48ef50fa
EM
3751 secy->xpn = false;
3752 break;
3753 case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3754 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3755 secy->xpn = true;
3756 break;
3757 case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3758 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3759 secy->xpn = true;
ccfdec90
FW
3760 break;
3761 default:
3762 return -EINVAL;
3763 }
3764 }
3765
0b52e10a
SD
3766 if (data[IFLA_MACSEC_WINDOW]) {
3767 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3768
3769 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
3770 * for XPN cipher suites */
3771 if (secy->xpn &&
3772 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
3773 return -EINVAL;
3774 }
3775
ccfdec90 3776 return 0;
c09440f7
SD
3777}
3778
3779static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
ad744b22
MS
3780 struct nlattr *data[],
3781 struct netlink_ext_ack *extack)
c09440f7 3782{
3cf3227a 3783 struct macsec_dev *macsec = macsec_priv(dev);
022e9d60 3784 struct macsec_tx_sc tx_sc;
3cf3227a
AT
3785 struct macsec_secy secy;
3786 int ret;
3787
c09440f7
SD
3788 if (!data)
3789 return 0;
3790
3791 if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3792 data[IFLA_MACSEC_ICV_LEN] ||
3793 data[IFLA_MACSEC_SCI] ||
3794 data[IFLA_MACSEC_PORT])
3795 return -EINVAL;
3796
3cf3227a
AT
3797 /* Keep a copy of unmodified secy and tx_sc, in case the offload
3798 * propagation fails, to revert macsec_changelink_common.
3799 */
3800 memcpy(&secy, &macsec->secy, sizeof(secy));
3801 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3802
3803 ret = macsec_changelink_common(dev, data);
3804 if (ret)
0b52e10a 3805 goto cleanup;
3cf3227a
AT
3806
3807 /* If h/w offloading is available, propagate to the device */
3808 if (macsec_is_offloaded(macsec)) {
3809 const struct macsec_ops *ops;
3810 struct macsec_context ctx;
3cf3227a
AT
3811
3812 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3813 if (!ops) {
3814 ret = -EOPNOTSUPP;
3815 goto cleanup;
3816 }
3817
3818 ctx.secy = &macsec->secy;
3819 ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3820 if (ret)
3821 goto cleanup;
3822 }
3823
3824 return 0;
3825
3826cleanup:
3827 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3828 memcpy(&macsec->secy, &secy, sizeof(secy));
3829
3830 return ret;
c09440f7
SD
3831}
3832
3833static void macsec_del_dev(struct macsec_dev *macsec)
3834{
3835 int i;
3836
3837 while (macsec->secy.rx_sc) {
3838 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3839
3840 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3841 free_rx_sc(rx_sc);
3842 }
3843
3844 for (i = 0; i < MACSEC_NUM_AN; i++) {
3845 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3846
3847 if (sa) {
3848 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3849 clear_tx_sa(sa);
3850 }
3851 }
3852}
3853
bbe11fab
SD
3854static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3855{
3856 struct macsec_dev *macsec = macsec_priv(dev);
e2003872 3857 struct net_device *real_dev = macsec->real_dev;
bbe11fab 3858
19ee9054
LN
3859 /* If h/w offloading is available, propagate to the device */
3860 if (macsec_is_offloaded(macsec)) {
3861 const struct macsec_ops *ops;
3862 struct macsec_context ctx;
3863
3864 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3865 if (ops) {
3866 ctx.secy = &macsec->secy;
3867 macsec_offload(ops->mdo_del_secy, &ctx);
3868 }
3869 }
3870
bbe11fab
SD
3871 unregister_netdevice_queue(dev, head);
3872 list_del_rcu(&macsec->secys);
3873 macsec_del_dev(macsec);
e2003872 3874 netdev_upper_dev_unlink(real_dev, dev);
bbe11fab
SD
3875
3876 macsec_generation++;
3877}
3878
c09440f7
SD
3879static void macsec_dellink(struct net_device *dev, struct list_head *head)
3880{
3881 struct macsec_dev *macsec = macsec_priv(dev);
3882 struct net_device *real_dev = macsec->real_dev;
3883 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3884
bbe11fab 3885 macsec_common_dellink(dev, head);
96cfc505 3886
960d5848 3887 if (list_empty(&rxd->secys)) {
c09440f7 3888 netdev_rx_handler_unregister(real_dev);
960d5848
SD
3889 kfree(rxd);
3890 }
c09440f7
SD
3891}
3892
3893static int register_macsec_dev(struct net_device *real_dev,
3894 struct net_device *dev)
3895{
3896 struct macsec_dev *macsec = macsec_priv(dev);
3897 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3898
3899 if (!rxd) {
3900 int err;
3901
3902 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3903 if (!rxd)
3904 return -ENOMEM;
3905
3906 INIT_LIST_HEAD(&rxd->secys);
3907
3908 err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3909 rxd);
960d5848
SD
3910 if (err < 0) {
3911 kfree(rxd);
c09440f7 3912 return err;
960d5848 3913 }
c09440f7
SD
3914 }
3915
3916 list_add_tail_rcu(&macsec->secys, &rxd->secys);
3917 return 0;
3918}
3919
3920static bool sci_exists(struct net_device *dev, sci_t sci)
3921{
3922 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3923 struct macsec_dev *macsec;
3924
3925 list_for_each_entry(macsec, &rxd->secys, secys) {
3926 if (macsec->secy.sci == sci)
3927 return true;
3928 }
3929
3930 return false;
3931}
3932
034cc03b
SD
3933static sci_t dev_to_sci(struct net_device *dev, __be16 port)
3934{
3935 return make_sci(dev->dev_addr, port);
3936}
3937
c09440f7
SD
3938static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3939{
3940 struct macsec_dev *macsec = macsec_priv(dev);
3941 struct macsec_secy *secy = &macsec->secy;
3942
3943 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3944 if (!macsec->stats)
3945 return -ENOMEM;
3946
3947 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3948 if (!secy->tx_sc.stats) {
3949 free_percpu(macsec->stats);
3950 return -ENOMEM;
3951 }
3952
3953 if (sci == MACSEC_UNDEF_SCI)
3954 sci = dev_to_sci(dev, MACSEC_PORT_ES);
3955
3956 secy->netdev = dev;
3957 secy->operational = true;
3958 secy->key_len = DEFAULT_SAK_LEN;
3959 secy->icv_len = icv_len;
3960 secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
3961 secy->protect_frames = true;
3962 secy->replay_protect = false;
48ef50fa 3963 secy->xpn = DEFAULT_XPN;
c09440f7
SD
3964
3965 secy->sci = sci;
3966 secy->tx_sc.active = true;
3967 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
3968 secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
3969 secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
3970 secy->tx_sc.end_station = false;
3971 secy->tx_sc.scb = false;
3972
3973 return 0;
3974}
3975
845e0ebb
CW
3976static struct lock_class_key macsec_netdev_addr_lock_key;
3977
c09440f7 3978static int macsec_newlink(struct net *net, struct net_device *dev,
7a3f4a18
MS
3979 struct nlattr *tb[], struct nlattr *data[],
3980 struct netlink_ext_ack *extack)
c09440f7
SD
3981{
3982 struct macsec_dev *macsec = macsec_priv(dev);
7f327080
TY
3983 rx_handler_func_t *rx_handler;
3984 u8 icv_len = DEFAULT_ICV_LEN;
c09440f7 3985 struct net_device *real_dev;
7f327080 3986 int err, mtu;
c09440f7 3987 sci_t sci;
c09440f7
SD
3988
3989 if (!tb[IFLA_LINK])
3990 return -EINVAL;
3991 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
3992 if (!real_dev)
3993 return -ENODEV;
b06d072c
WB
3994 if (real_dev->type != ARPHRD_ETHER)
3995 return -EINVAL;
c09440f7
SD
3996
3997 dev->priv_flags |= IFF_MACSEC;
3998
3999 macsec->real_dev = real_dev;
4000
791bb3fc
MS
4001 if (data && data[IFLA_MACSEC_OFFLOAD])
4002 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4003 else
4004 /* MACsec offloading is off by default */
4005 macsec->offload = MACSEC_OFFLOAD_OFF;
4006
4007 /* Check if the offloading mode is supported by the underlying layers */
4008 if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4009 !macsec_check_offload(macsec->offload, macsec))
4010 return -EOPNOTSUPP;
3cf3227a 4011
e4f40642
LN
4012 /* send_sci must be set to true when transmit sci explicitly is set */
4013 if ((data && data[IFLA_MACSEC_SCI]) &&
4014 (data && data[IFLA_MACSEC_INC_SCI])) {
4015 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
4016
4017 if (!send_sci)
4018 return -EINVAL;
4019 }
4020
c09440f7
SD
4021 if (data && data[IFLA_MACSEC_ICV_LEN])
4022 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
7f327080
TY
4023 mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4024 if (mtu < 0)
4025 dev->mtu = 0;
4026 else
4027 dev->mtu = mtu;
c09440f7
SD
4028
4029 rx_handler = rtnl_dereference(real_dev->rx_handler);
4030 if (rx_handler && rx_handler != macsec_handle_frame)
4031 return -EBUSY;
4032
4033 err = register_netdevice(dev);
4034 if (err < 0)
4035 return err;
4036
1a33e10e 4037 netdev_lockdep_set_classes(dev);
be74294f
CW
4038 lockdep_set_class(&dev->addr_list_lock,
4039 &macsec_netdev_addr_lock_key);
1a33e10e 4040
42ab19ee 4041 err = netdev_upper_dev_link(real_dev, dev, extack);
e2003872 4042 if (err < 0)
bd28899d 4043 goto unregister;
e2003872 4044
c09440f7
SD
4045 /* need to be already registered so that ->init has run and
4046 * the MAC addr is set
4047 */
4048 if (data && data[IFLA_MACSEC_SCI])
4049 sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4050 else if (data && data[IFLA_MACSEC_PORT])
4051 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4052 else
4053 sci = dev_to_sci(dev, MACSEC_PORT_ES);
4054
4055 if (rx_handler && sci_exists(real_dev, sci)) {
4056 err = -EBUSY;
e2003872 4057 goto unlink;
c09440f7
SD
4058 }
4059
4060 err = macsec_add_dev(dev, sci, icv_len);
4061 if (err)
e2003872 4062 goto unlink;
c09440f7 4063
ccfdec90
FW
4064 if (data) {
4065 err = macsec_changelink_common(dev, data);
4066 if (err)
4067 goto del_dev;
4068 }
c09440f7 4069
791bb3fc
MS
4070 /* If h/w offloading is available, propagate to the device */
4071 if (macsec_is_offloaded(macsec)) {
4072 const struct macsec_ops *ops;
4073 struct macsec_context ctx;
4074
4075 ops = macsec_get_ops(macsec, &ctx);
4076 if (ops) {
4077 ctx.secy = &macsec->secy;
4078 err = macsec_offload(ops->mdo_add_secy, &ctx);
4079 if (err)
4080 goto del_dev;
4081 }
4082 }
4083
c09440f7
SD
4084 err = register_macsec_dev(real_dev, dev);
4085 if (err < 0)
4086 goto del_dev;
4087
e6ac0758
SD
4088 netif_stacked_transfer_operstate(real_dev, dev);
4089 linkwatch_fire_event(dev);
4090
96cfc505
SD
4091 macsec_generation++;
4092
c09440f7
SD
4093 return 0;
4094
4095del_dev:
4096 macsec_del_dev(macsec);
e2003872
SD
4097unlink:
4098 netdev_upper_dev_unlink(real_dev, dev);
bd28899d 4099unregister:
c09440f7
SD
4100 unregister_netdevice(dev);
4101 return err;
4102}
4103
a8b8a889
MS
4104static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4105 struct netlink_ext_ack *extack)
c09440f7 4106{
74816480 4107 u64 csid = MACSEC_DEFAULT_CIPHER_ID;
c09440f7
SD
4108 u8 icv_len = DEFAULT_ICV_LEN;
4109 int flag;
4110 bool es, scb, sci;
4111
4112 if (!data)
4113 return 0;
4114
4115 if (data[IFLA_MACSEC_CIPHER_SUITE])
4116 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4117
f04c392d 4118 if (data[IFLA_MACSEC_ICV_LEN]) {
c09440f7 4119 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
f04c392d
DC
4120 if (icv_len != DEFAULT_ICV_LEN) {
4121 char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4122 struct crypto_aead *dummy_tfm;
4123
4124 dummy_tfm = macsec_alloc_tfm(dummy_key,
4125 DEFAULT_SAK_LEN,
4126 icv_len);
4127 if (IS_ERR(dummy_tfm))
4128 return PTR_ERR(dummy_tfm);
4129 crypto_free_aead(dummy_tfm);
4130 }
4131 }
c09440f7
SD
4132
4133 switch (csid) {
ccfdec90
FW
4134 case MACSEC_CIPHER_ID_GCM_AES_128:
4135 case MACSEC_CIPHER_ID_GCM_AES_256:
48ef50fa
EM
4136 case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4137 case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
e8660ded 4138 case MACSEC_DEFAULT_CIPHER_ID:
c09440f7 4139 if (icv_len < MACSEC_MIN_ICV_LEN ||
2ccbe2cb 4140 icv_len > MACSEC_STD_ICV_LEN)
c09440f7
SD
4141 return -EINVAL;
4142 break;
4143 default:
4144 return -EINVAL;
4145 }
4146
4147 if (data[IFLA_MACSEC_ENCODING_SA]) {
4148 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4149 return -EINVAL;
4150 }
4151
4152 for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4153 flag < IFLA_MACSEC_VALIDATION;
4154 flag++) {
4155 if (data[flag]) {
4156 if (nla_get_u8(data[flag]) > 1)
4157 return -EINVAL;
4158 }
4159 }
4160
4161 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
4162 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
4163 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
4164
4165 if ((sci && (scb || es)) || (scb && es))
4166 return -EINVAL;
4167
4168 if (data[IFLA_MACSEC_VALIDATION] &&
4169 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4170 return -EINVAL;
4171
4b1fb935
SD
4172 if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4173 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
c09440f7
SD
4174 !data[IFLA_MACSEC_WINDOW])
4175 return -EINVAL;
4176
4177 return 0;
4178}
4179
4180static struct net *macsec_get_link_net(const struct net_device *dev)
4181{
4182 return dev_net(macsec_priv(dev)->real_dev);
4183}
4184
4185static size_t macsec_get_size(const struct net_device *dev)
4186{
c9fba3ed
ZS
4187 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4188 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4189 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4190 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4191 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4192 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4193 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4194 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4195 nla_total_size(1) + /* IFLA_MACSEC_ES */
4196 nla_total_size(1) + /* IFLA_MACSEC_SCB */
4197 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4198 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
c09440f7
SD
4199 0;
4200}
4201
4202static int macsec_fill_info(struct sk_buff *skb,
4203 const struct net_device *dev)
4204{
4205 struct macsec_secy *secy = &macsec_priv(dev)->secy;
4206 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
ccfdec90
FW
4207 u64 csid;
4208
4209 switch (secy->key_len) {
4210 case MACSEC_GCM_AES_128_SAK_LEN:
48ef50fa 4211 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
ccfdec90
FW
4212 break;
4213 case MACSEC_GCM_AES_256_SAK_LEN:
48ef50fa 4214 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
ccfdec90
FW
4215 break;
4216 default:
4217 goto nla_put_failure;
4218 }
c09440f7 4219
f60d94c0
ND
4220 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4221 IFLA_MACSEC_PAD) ||
c09440f7 4222 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
f60d94c0 4223 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
ccfdec90 4224 csid, IFLA_MACSEC_PAD) ||
c09440f7
SD
4225 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4226 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4227 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4228 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4229 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4230 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4231 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4232 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
4233 0)
4234 goto nla_put_failure;
4235
4236 if (secy->replay_protect) {
4237 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4238 goto nla_put_failure;
4239 }
4240
4241 return 0;
4242
4243nla_put_failure:
4244 return -EMSGSIZE;
4245}
4246
4247static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4248 .kind = "macsec",
4249 .priv_size = sizeof(struct macsec_dev),
4250 .maxtype = IFLA_MACSEC_MAX,
4251 .policy = macsec_rtnl_policy,
4252 .setup = macsec_setup,
4253 .validate = macsec_validate_attr,
4254 .newlink = macsec_newlink,
4255 .changelink = macsec_changelink,
4256 .dellink = macsec_dellink,
4257 .get_size = macsec_get_size,
4258 .fill_info = macsec_fill_info,
4259 .get_link_net = macsec_get_link_net,
4260};
4261
4262static bool is_macsec_master(struct net_device *dev)
4263{
4264 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4265}
4266
4267static int macsec_notify(struct notifier_block *this, unsigned long event,
4268 void *ptr)
4269{
4270 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
4271 LIST_HEAD(head);
4272
4273 if (!is_macsec_master(real_dev))
4274 return NOTIFY_DONE;
4275
4276 switch (event) {
e6ac0758
SD
4277 case NETDEV_DOWN:
4278 case NETDEV_UP:
4279 case NETDEV_CHANGE: {
4280 struct macsec_dev *m, *n;
4281 struct macsec_rxh_data *rxd;
4282
4283 rxd = macsec_data_rtnl(real_dev);
4284 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4285 struct net_device *dev = m->secy.netdev;
4286
4287 netif_stacked_transfer_operstate(real_dev, dev);
4288 }
4289 break;
4290 }
c09440f7
SD
4291 case NETDEV_UNREGISTER: {
4292 struct macsec_dev *m, *n;
4293 struct macsec_rxh_data *rxd;
4294
4295 rxd = macsec_data_rtnl(real_dev);
4296 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
bbe11fab 4297 macsec_common_dellink(m->secy.netdev, &head);
c09440f7 4298 }
bbe11fab
SD
4299
4300 netdev_rx_handler_unregister(real_dev);
4301 kfree(rxd);
4302
c09440f7
SD
4303 unregister_netdevice_many(&head);
4304 break;
4305 }
4306 case NETDEV_CHANGEMTU: {
4307 struct macsec_dev *m;
4308 struct macsec_rxh_data *rxd;
4309
4310 rxd = macsec_data_rtnl(real_dev);
4311 list_for_each_entry(m, &rxd->secys, secys) {
4312 struct net_device *dev = m->secy.netdev;
4313 unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4314 macsec_extra_len(true));
4315
4316 if (dev->mtu > mtu)
4317 dev_set_mtu(dev, mtu);
4318 }
4319 }
4320 }
4321
4322 return NOTIFY_OK;
4323}
4324
4325static struct notifier_block macsec_notifier = {
4326 .notifier_call = macsec_notify,
4327};
4328
4329static int __init macsec_init(void)
4330{
4331 int err;
4332
4333 pr_info("MACsec IEEE 802.1AE\n");
4334 err = register_netdevice_notifier(&macsec_notifier);
4335 if (err)
4336 return err;
4337
4338 err = rtnl_link_register(&macsec_link_ops);
4339 if (err)
4340 goto notifier;
4341
489111e5 4342 err = genl_register_family(&macsec_fam);
c09440f7
SD
4343 if (err)
4344 goto rtnl;
4345
4346 return 0;
4347
4348rtnl:
4349 rtnl_link_unregister(&macsec_link_ops);
4350notifier:
4351 unregister_netdevice_notifier(&macsec_notifier);
4352 return err;
4353}
4354
4355static void __exit macsec_exit(void)
4356{
4357 genl_unregister_family(&macsec_fam);
4358 rtnl_link_unregister(&macsec_link_ops);
4359 unregister_netdevice_notifier(&macsec_notifier);
b196c22a 4360 rcu_barrier();
c09440f7
SD
4361}
4362
4363module_init(macsec_init);
4364module_exit(macsec_exit);
4365
4366MODULE_ALIAS_RTNL_LINK("macsec");
78362998 4367MODULE_ALIAS_GENL_FAMILY("macsec");
c09440f7
SD
4368
4369MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4370MODULE_LICENSE("GPL v2");