]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/macsec.c
net: gso: fix panic on frag_list with mixed head alloc types
[mirror_ubuntu-jammy-kernel.git] / drivers / net / macsec.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
c09440f7
SD
2/*
3 * drivers/net/macsec.c - MACsec device
4 *
5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
c09440f7
SD
6 */
7
8#include <linux/types.h>
9#include <linux/skbuff.h>
10#include <linux/socket.h>
11#include <linux/module.h>
12#include <crypto/aead.h>
13#include <linux/etherdevice.h>
3cf3227a 14#include <linux/netdevice.h>
c09440f7 15#include <linux/rtnetlink.h>
e187246f 16#include <linux/refcount.h>
c09440f7
SD
17#include <net/genetlink.h>
18#include <net/sock.h>
5491e7c6 19#include <net/gro_cells.h>
c0e4eadf 20#include <net/macsec.h>
3cf3227a 21#include <linux/phy.h>
a21ecf0e 22#include <linux/byteorder/generic.h>
b06d072c 23#include <linux/if_arp.h>
c09440f7
SD
24
25#include <uapi/linux/if_macsec.h>
26
c09440f7
SD
27#define MACSEC_SCI_LEN 8
28
29/* SecTAG length = macsec_eth_header without the optional SCI */
30#define MACSEC_TAG_LEN 6
31
32struct macsec_eth_header {
33 struct ethhdr eth;
34 /* SecTAG */
35 u8 tci_an;
36#if defined(__LITTLE_ENDIAN_BITFIELD)
37 u8 short_length:6,
38 unused:2;
39#elif defined(__BIG_ENDIAN_BITFIELD)
40 u8 unused:2,
41 short_length:6;
42#else
43#error "Please fix <asm/byteorder.h>"
44#endif
45 __be32 packet_number;
46 u8 secure_channel_id[8]; /* optional */
47} __packed;
48
49#define MACSEC_TCI_VERSION 0x80
50#define MACSEC_TCI_ES 0x40 /* end station */
51#define MACSEC_TCI_SC 0x20 /* SCI present */
52#define MACSEC_TCI_SCB 0x10 /* epon */
53#define MACSEC_TCI_E 0x08 /* encryption */
54#define MACSEC_TCI_C 0x04 /* changed text */
55#define MACSEC_AN_MASK 0x03 /* association number */
56#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
57
58/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
59#define MIN_NON_SHORT_LEN 48
60
61#define GCM_AES_IV_LEN 12
62#define DEFAULT_ICV_LEN 16
63
7979472b 64#define for_each_rxsc(secy, sc) \
c09440f7 65 for (sc = rcu_dereference_bh(secy->rx_sc); \
7979472b 66 sc; \
c09440f7
SD
67 sc = rcu_dereference_bh(sc->next))
68#define for_each_rxsc_rtnl(secy, sc) \
69 for (sc = rtnl_dereference(secy->rx_sc); \
70 sc; \
71 sc = rtnl_dereference(sc->next))
72
a21ecf0e
EM
73#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
74
75struct gcm_iv_xpn {
76 union {
77 u8 short_secure_channel_id[4];
78 ssci_t ssci;
79 };
80 __be64 pn;
81} __packed;
82
c09440f7
SD
83struct gcm_iv {
84 union {
85 u8 secure_channel_id[8];
86 sci_t sci;
87 };
88 __be32 pn;
89};
90
c09440f7
SD
91#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
92
c09440f7
SD
93struct pcpu_secy_stats {
94 struct macsec_dev_stats stats;
95 struct u64_stats_sync syncp;
96};
97
98/**
99 * struct macsec_dev - private data
100 * @secy: SecY config
101 * @real_dev: pointer to underlying netdevice
102 * @stats: MACsec device stats
103 * @secys: linked list of SecY's on the underlying device
ecdc5689 104 * @gro_cells: pointer to the Generic Receive Offload cell
3cf3227a 105 * @offload: status of offloading on the MACsec device
c09440f7
SD
106 */
107struct macsec_dev {
108 struct macsec_secy secy;
109 struct net_device *real_dev;
110 struct pcpu_secy_stats __percpu *stats;
111 struct list_head secys;
5491e7c6 112 struct gro_cells gro_cells;
3cf3227a 113 enum macsec_offload offload;
c09440f7
SD
114};
115
116/**
117 * struct macsec_rxh_data - rx_handler private argument
118 * @secys: linked list of SecY's on this underlying device
119 */
120struct macsec_rxh_data {
121 struct list_head secys;
122};
123
124static struct macsec_dev *macsec_priv(const struct net_device *dev)
125{
126 return (struct macsec_dev *)netdev_priv(dev);
127}
128
129static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
130{
131 return rcu_dereference_bh(dev->rx_handler_data);
132}
133
134static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
135{
136 return rtnl_dereference(dev->rx_handler_data);
137}
138
139struct macsec_cb {
140 struct aead_request *req;
141 union {
142 struct macsec_tx_sa *tx_sa;
143 struct macsec_rx_sa *rx_sa;
144 };
145 u8 assoc_num;
146 bool valid;
147 bool has_sci;
148};
149
150static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
151{
152 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
153
154 if (!sa || !sa->active)
155 return NULL;
156
e187246f 157 if (!refcount_inc_not_zero(&sa->refcnt))
c09440f7
SD
158 return NULL;
159
160 return sa;
161}
162
163static void free_rx_sc_rcu(struct rcu_head *head)
164{
165 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
166
167 free_percpu(rx_sc->stats);
168 kfree(rx_sc);
169}
170
171static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
172{
8676d76f 173 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
c09440f7
SD
174}
175
176static void macsec_rxsc_put(struct macsec_rx_sc *sc)
177{
8676d76f 178 if (refcount_dec_and_test(&sc->refcnt))
c09440f7
SD
179 call_rcu(&sc->rcu_head, free_rx_sc_rcu);
180}
181
182static void free_rxsa(struct rcu_head *head)
183{
184 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
185
186 crypto_free_aead(sa->key.tfm);
187 free_percpu(sa->stats);
c09440f7
SD
188 kfree(sa);
189}
190
191static void macsec_rxsa_put(struct macsec_rx_sa *sa)
192{
e187246f 193 if (refcount_dec_and_test(&sa->refcnt))
c09440f7
SD
194 call_rcu(&sa->rcu, free_rxsa);
195}
196
197static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
198{
199 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
200
201 if (!sa || !sa->active)
202 return NULL;
203
28206cdb 204 if (!refcount_inc_not_zero(&sa->refcnt))
c09440f7
SD
205 return NULL;
206
207 return sa;
208}
209
210static void free_txsa(struct rcu_head *head)
211{
212 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
213
214 crypto_free_aead(sa->key.tfm);
215 free_percpu(sa->stats);
216 kfree(sa);
217}
218
219static void macsec_txsa_put(struct macsec_tx_sa *sa)
220{
28206cdb 221 if (refcount_dec_and_test(&sa->refcnt))
c09440f7
SD
222 call_rcu(&sa->rcu, free_txsa);
223}
224
225static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
226{
227 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
228 return (struct macsec_cb *)skb->cb;
229}
230
231#define MACSEC_PORT_ES (htons(0x0001))
232#define MACSEC_PORT_SCB (0x0000)
233#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
48ef50fa 234#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
c09440f7 235
ccfdec90
FW
236#define MACSEC_GCM_AES_128_SAK_LEN 16
237#define MACSEC_GCM_AES_256_SAK_LEN 32
238
ccfdec90 239#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
48ef50fa 240#define DEFAULT_XPN false
c09440f7
SD
241#define DEFAULT_SEND_SCI true
242#define DEFAULT_ENCRYPT false
243#define DEFAULT_ENCODING_SA 0
0b52e10a 244#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
c09440f7 245
e0f841f5
TB
246static bool send_sci(const struct macsec_secy *secy)
247{
248 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
249
250 return tx_sc->send_sci ||
251 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
252}
253
c09440f7
SD
254static sci_t make_sci(u8 *addr, __be16 port)
255{
256 sci_t sci;
257
258 memcpy(&sci, addr, ETH_ALEN);
259 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
260
261 return sci;
262}
263
264static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
265{
266 sci_t sci;
267
268 if (sci_present)
269 memcpy(&sci, hdr->secure_channel_id,
270 sizeof(hdr->secure_channel_id));
271 else
272 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
273
274 return sci;
275}
276
277static unsigned int macsec_sectag_len(bool sci_present)
278{
279 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
280}
281
282static unsigned int macsec_hdr_len(bool sci_present)
283{
284 return macsec_sectag_len(sci_present) + ETH_HLEN;
285}
286
287static unsigned int macsec_extra_len(bool sci_present)
288{
289 return macsec_sectag_len(sci_present) + sizeof(__be16);
290}
291
292/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
293static void macsec_fill_sectag(struct macsec_eth_header *h,
e0f841f5
TB
294 const struct macsec_secy *secy, u32 pn,
295 bool sci_present)
c09440f7
SD
296{
297 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
298
e0f841f5 299 memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
c09440f7
SD
300 h->eth.h_proto = htons(ETH_P_MACSEC);
301
e0f841f5 302 if (sci_present) {
c09440f7
SD
303 h->tci_an |= MACSEC_TCI_SC;
304 memcpy(&h->secure_channel_id, &secy->sci,
305 sizeof(h->secure_channel_id));
306 } else {
307 if (tx_sc->end_station)
308 h->tci_an |= MACSEC_TCI_ES;
309 if (tx_sc->scb)
310 h->tci_an |= MACSEC_TCI_SCB;
311 }
312
313 h->packet_number = htonl(pn);
314
315 /* with GCM, C/E clear for !encrypt, both set for encrypt */
316 if (tx_sc->encrypt)
317 h->tci_an |= MACSEC_TCI_CONFID;
318 else if (secy->icv_len != DEFAULT_ICV_LEN)
319 h->tci_an |= MACSEC_TCI_C;
320
321 h->tci_an |= tx_sc->encoding_sa;
322}
323
324static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
325{
326 if (data_len < MIN_NON_SHORT_LEN)
327 h->short_length = data_len;
328}
329
3cf3227a
AT
330/* Checks if a MACsec interface is being offloaded to an hardware engine */
331static bool macsec_is_offloaded(struct macsec_dev *macsec)
332{
21114b7f
AT
333 if (macsec->offload == MACSEC_OFFLOAD_MAC ||
334 macsec->offload == MACSEC_OFFLOAD_PHY)
3cf3227a
AT
335 return true;
336
337 return false;
338}
339
340/* Checks if underlying layers implement MACsec offloading functions. */
341static bool macsec_check_offload(enum macsec_offload offload,
342 struct macsec_dev *macsec)
343{
344 if (!macsec || !macsec->real_dev)
345 return false;
346
347 if (offload == MACSEC_OFFLOAD_PHY)
348 return macsec->real_dev->phydev &&
349 macsec->real_dev->phydev->macsec_ops;
21114b7f
AT
350 else if (offload == MACSEC_OFFLOAD_MAC)
351 return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
352 macsec->real_dev->macsec_ops;
3cf3227a
AT
353
354 return false;
355}
356
357static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
358 struct macsec_dev *macsec,
359 struct macsec_context *ctx)
360{
361 if (ctx) {
362 memset(ctx, 0, sizeof(*ctx));
363 ctx->offload = offload;
364
365 if (offload == MACSEC_OFFLOAD_PHY)
366 ctx->phydev = macsec->real_dev->phydev;
21114b7f
AT
367 else if (offload == MACSEC_OFFLOAD_MAC)
368 ctx->netdev = macsec->real_dev;
3cf3227a
AT
369 }
370
21114b7f
AT
371 if (offload == MACSEC_OFFLOAD_PHY)
372 return macsec->real_dev->phydev->macsec_ops;
373 else
374 return macsec->real_dev->macsec_ops;
3cf3227a
AT
375}
376
377/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
378 * context device reference if provided.
379 */
380static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
381 struct macsec_context *ctx)
382{
383 if (!macsec_check_offload(macsec->offload, macsec))
384 return NULL;
385
386 return __macsec_get_ops(macsec->offload, macsec, ctx);
387}
388
a21ecf0e
EM
389/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
390static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
c09440f7
SD
391{
392 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
393 int len = skb->len - 2 * ETH_ALEN;
394 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
395
396 /* a) It comprises at least 17 octets */
397 if (skb->len <= 16)
398 return false;
399
400 /* b) MACsec EtherType: already checked */
401
402 /* c) V bit is clear */
403 if (h->tci_an & MACSEC_TCI_VERSION)
404 return false;
405
406 /* d) ES or SCB => !SC */
407 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
408 (h->tci_an & MACSEC_TCI_SC))
409 return false;
410
411 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
412 if (h->unused)
413 return false;
414
a21ecf0e
EM
415 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
416 if (!h->packet_number && !xpn)
c09440f7
SD
417 return false;
418
419 /* length check, f) g) h) i) */
420 if (h->short_length)
421 return len == extra_len + h->short_length;
422 return len >= extra_len + MIN_NON_SHORT_LEN;
423}
424
425#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
2ccbe2cb 426#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
c09440f7 427
a21ecf0e
EM
428static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
429 salt_t salt)
430{
431 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
432
433 gcm_iv->ssci = ssci ^ salt.ssci;
434 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
435}
436
c09440f7
SD
437static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
438{
439 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
440
441 gcm_iv->sci = sci;
442 gcm_iv->pn = htonl(pn);
443}
444
445static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
446{
447 return (struct macsec_eth_header *)skb_mac_header(skb);
448}
449
5c937de7
AT
450static void __macsec_pn_wrapped(struct macsec_secy *secy,
451 struct macsec_tx_sa *tx_sa)
452{
453 pr_debug("PN wrapped, transitioning to !oper\n");
454 tx_sa->active = false;
455 if (secy->protect_frames)
456 secy->operational = false;
457}
458
459void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
460{
461 spin_lock_bh(&tx_sa->lock);
462 __macsec_pn_wrapped(secy, tx_sa);
463 spin_unlock_bh(&tx_sa->lock);
464}
465EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
466
a21ecf0e
EM
467static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
468 struct macsec_secy *secy)
c09440f7 469{
a21ecf0e 470 pn_t pn;
c09440f7
SD
471
472 spin_lock_bh(&tx_sa->lock);
c09440f7 473
a21ecf0e
EM
474 pn = tx_sa->next_pn_halves;
475 if (secy->xpn)
476 tx_sa->next_pn++;
477 else
478 tx_sa->next_pn_halves.lower++;
479
5c937de7
AT
480 if (tx_sa->next_pn == 0)
481 __macsec_pn_wrapped(secy, tx_sa);
c09440f7
SD
482 spin_unlock_bh(&tx_sa->lock);
483
484 return pn;
485}
486
487static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
488{
489 struct macsec_dev *macsec = netdev_priv(dev);
490
491 skb->dev = macsec->real_dev;
492 skb_reset_mac_header(skb);
493 skb->protocol = eth_hdr(skb)->h_proto;
494}
495
496static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
497 struct macsec_tx_sa *tx_sa)
498{
499 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
500
501 u64_stats_update_begin(&txsc_stats->syncp);
502 if (tx_sc->encrypt) {
503 txsc_stats->stats.OutOctetsEncrypted += skb->len;
504 txsc_stats->stats.OutPktsEncrypted++;
505 this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
506 } else {
507 txsc_stats->stats.OutOctetsProtected += skb->len;
508 txsc_stats->stats.OutPktsProtected++;
509 this_cpu_inc(tx_sa->stats->OutPktsProtected);
510 }
511 u64_stats_update_end(&txsc_stats->syncp);
512}
513
514static void count_tx(struct net_device *dev, int ret, int len)
515{
516 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
517 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
518
519 u64_stats_update_begin(&stats->syncp);
520 stats->tx_packets++;
521 stats->tx_bytes += len;
522 u64_stats_update_end(&stats->syncp);
c09440f7
SD
523 }
524}
525
526static void macsec_encrypt_done(struct crypto_async_request *base, int err)
527{
528 struct sk_buff *skb = base->data;
529 struct net_device *dev = skb->dev;
530 struct macsec_dev *macsec = macsec_priv(dev);
531 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
532 int len, ret;
533
534 aead_request_free(macsec_skb_cb(skb)->req);
535
536 rcu_read_lock_bh();
537 macsec_encrypt_finish(skb, dev);
538 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
539 len = skb->len;
540 ret = dev_queue_xmit(skb);
541 count_tx(dev, ret, len);
542 rcu_read_unlock_bh();
543
544 macsec_txsa_put(sa);
545 dev_put(dev);
546}
547
5d9649b3
SD
548static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
549 unsigned char **iv,
5294b830
JD
550 struct scatterlist **sg,
551 int num_frags)
5d9649b3
SD
552{
553 size_t size, iv_offset, sg_offset;
554 struct aead_request *req;
555 void *tmp;
556
557 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
558 iv_offset = size;
559 size += GCM_AES_IV_LEN;
560
561 size = ALIGN(size, __alignof__(struct scatterlist));
562 sg_offset = size;
5294b830 563 size += sizeof(struct scatterlist) * num_frags;
5d9649b3
SD
564
565 tmp = kmalloc(size, GFP_ATOMIC);
566 if (!tmp)
567 return NULL;
568
569 *iv = (unsigned char *)(tmp + iv_offset);
570 *sg = (struct scatterlist *)(tmp + sg_offset);
571 req = tmp;
572
573 aead_request_set_tfm(req, tfm);
574
575 return req;
576}
577
c09440f7
SD
578static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
579 struct net_device *dev)
580{
581 int ret;
5d9649b3 582 struct scatterlist *sg;
5294b830 583 struct sk_buff *trailer;
5d9649b3 584 unsigned char *iv;
c09440f7
SD
585 struct ethhdr *eth;
586 struct macsec_eth_header *hh;
587 size_t unprotected_len;
588 struct aead_request *req;
589 struct macsec_secy *secy;
590 struct macsec_tx_sc *tx_sc;
591 struct macsec_tx_sa *tx_sa;
592 struct macsec_dev *macsec = macsec_priv(dev);
e0f841f5 593 bool sci_present;
a21ecf0e 594 pn_t pn;
c09440f7
SD
595
596 secy = &macsec->secy;
597 tx_sc = &secy->tx_sc;
598
599 /* 10.5.1 TX SA assignment */
600 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
601 if (!tx_sa) {
602 secy->operational = false;
603 kfree_skb(skb);
604 return ERR_PTR(-EINVAL);
605 }
606
607 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
608 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
609 struct sk_buff *nskb = skb_copy_expand(skb,
610 MACSEC_NEEDED_HEADROOM,
611 MACSEC_NEEDED_TAILROOM,
612 GFP_ATOMIC);
613 if (likely(nskb)) {
614 consume_skb(skb);
615 skb = nskb;
616 } else {
617 macsec_txsa_put(tx_sa);
618 kfree_skb(skb);
619 return ERR_PTR(-ENOMEM);
620 }
621 } else {
622 skb = skb_unshare(skb, GFP_ATOMIC);
623 if (!skb) {
624 macsec_txsa_put(tx_sa);
625 return ERR_PTR(-ENOMEM);
626 }
627 }
628
629 unprotected_len = skb->len;
630 eth = eth_hdr(skb);
e0f841f5 631 sci_present = send_sci(secy);
d58ff351 632 hh = skb_push(skb, macsec_extra_len(sci_present));
c09440f7
SD
633 memmove(hh, eth, 2 * ETH_ALEN);
634
635 pn = tx_sa_update_pn(tx_sa, secy);
a21ecf0e 636 if (pn.full64 == 0) {
c09440f7
SD
637 macsec_txsa_put(tx_sa);
638 kfree_skb(skb);
639 return ERR_PTR(-ENOLINK);
640 }
a21ecf0e 641 macsec_fill_sectag(hh, secy, pn.lower, sci_present);
c09440f7
SD
642 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
643
c09440f7
SD
644 skb_put(skb, secy->icv_len);
645
646 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
647 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
648
649 u64_stats_update_begin(&secy_stats->syncp);
650 secy_stats->stats.OutPktsTooLong++;
651 u64_stats_update_end(&secy_stats->syncp);
652
653 macsec_txsa_put(tx_sa);
654 kfree_skb(skb);
655 return ERR_PTR(-EINVAL);
656 }
657
5294b830
JD
658 ret = skb_cow_data(skb, 0, &trailer);
659 if (unlikely(ret < 0)) {
660 macsec_txsa_put(tx_sa);
661 kfree_skb(skb);
662 return ERR_PTR(ret);
663 }
664
665 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
c09440f7
SD
666 if (!req) {
667 macsec_txsa_put(tx_sa);
668 kfree_skb(skb);
669 return ERR_PTR(-ENOMEM);
670 }
671
a21ecf0e
EM
672 if (secy->xpn)
673 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
674 else
675 macsec_fill_iv(iv, secy->sci, pn.lower);
5d9649b3 676
5294b830 677 sg_init_table(sg, ret);
cda7ea69
JD
678 ret = skb_to_sgvec(skb, sg, 0, skb->len);
679 if (unlikely(ret < 0)) {
5aba2ba5 680 aead_request_free(req);
cda7ea69
JD
681 macsec_txsa_put(tx_sa);
682 kfree_skb(skb);
683 return ERR_PTR(ret);
684 }
c09440f7
SD
685
686 if (tx_sc->encrypt) {
e0f841f5 687 int len = skb->len - macsec_hdr_len(sci_present) -
c09440f7
SD
688 secy->icv_len;
689 aead_request_set_crypt(req, sg, sg, len, iv);
e0f841f5 690 aead_request_set_ad(req, macsec_hdr_len(sci_present));
c09440f7
SD
691 } else {
692 aead_request_set_crypt(req, sg, sg, 0, iv);
693 aead_request_set_ad(req, skb->len - secy->icv_len);
694 }
695
696 macsec_skb_cb(skb)->req = req;
697 macsec_skb_cb(skb)->tx_sa = tx_sa;
698 aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
699
700 dev_hold(skb->dev);
701 ret = crypto_aead_encrypt(req);
702 if (ret == -EINPROGRESS) {
703 return ERR_PTR(ret);
704 } else if (ret != 0) {
705 dev_put(skb->dev);
706 kfree_skb(skb);
707 aead_request_free(req);
708 macsec_txsa_put(tx_sa);
709 return ERR_PTR(-EINVAL);
710 }
711
712 dev_put(skb->dev);
713 aead_request_free(req);
714 macsec_txsa_put(tx_sa);
715
716 return skb;
717}
718
719static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
720{
721 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
722 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
723 struct macsec_eth_header *hdr = macsec_ethhdr(skb);
724 u32 lowest_pn = 0;
725
726 spin_lock(&rx_sa->lock);
a21ecf0e
EM
727 if (rx_sa->next_pn_halves.lower >= secy->replay_window)
728 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
c09440f7
SD
729
730 /* Now perform replay protection check again
731 * (see IEEE 802.1AE-2006 figure 10-5)
732 */
a21ecf0e
EM
733 if (secy->replay_protect && pn < lowest_pn &&
734 (!secy->xpn || pn_same_half(pn, lowest_pn))) {
c09440f7
SD
735 spin_unlock(&rx_sa->lock);
736 u64_stats_update_begin(&rxsc_stats->syncp);
737 rxsc_stats->stats.InPktsLate++;
738 u64_stats_update_end(&rxsc_stats->syncp);
739 return false;
740 }
741
742 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
743 u64_stats_update_begin(&rxsc_stats->syncp);
744 if (hdr->tci_an & MACSEC_TCI_E)
745 rxsc_stats->stats.InOctetsDecrypted += skb->len;
746 else
747 rxsc_stats->stats.InOctetsValidated += skb->len;
748 u64_stats_update_end(&rxsc_stats->syncp);
749 }
750
751 if (!macsec_skb_cb(skb)->valid) {
752 spin_unlock(&rx_sa->lock);
753
754 /* 10.6.5 */
755 if (hdr->tci_an & MACSEC_TCI_C ||
756 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
757 u64_stats_update_begin(&rxsc_stats->syncp);
758 rxsc_stats->stats.InPktsNotValid++;
759 u64_stats_update_end(&rxsc_stats->syncp);
760 return false;
761 }
762
763 u64_stats_update_begin(&rxsc_stats->syncp);
764 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
765 rxsc_stats->stats.InPktsInvalid++;
766 this_cpu_inc(rx_sa->stats->InPktsInvalid);
767 } else if (pn < lowest_pn) {
768 rxsc_stats->stats.InPktsDelayed++;
769 } else {
770 rxsc_stats->stats.InPktsUnchecked++;
771 }
772 u64_stats_update_end(&rxsc_stats->syncp);
773 } else {
774 u64_stats_update_begin(&rxsc_stats->syncp);
775 if (pn < lowest_pn) {
776 rxsc_stats->stats.InPktsDelayed++;
777 } else {
778 rxsc_stats->stats.InPktsOK++;
779 this_cpu_inc(rx_sa->stats->InPktsOK);
780 }
781 u64_stats_update_end(&rxsc_stats->syncp);
782
a21ecf0e
EM
783 // Instead of "pn >=" - to support pn overflow in xpn
784 if (pn + 1 > rx_sa->next_pn_halves.lower) {
785 rx_sa->next_pn_halves.lower = pn + 1;
786 } else if (secy->xpn &&
787 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
788 rx_sa->next_pn_halves.upper++;
789 rx_sa->next_pn_halves.lower = pn + 1;
790 }
791
c09440f7
SD
792 spin_unlock(&rx_sa->lock);
793 }
794
795 return true;
796}
797
798static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
799{
800 skb->pkt_type = PACKET_HOST;
801 skb->protocol = eth_type_trans(skb, dev);
802
803 skb_reset_network_header(skb);
804 if (!skb_transport_header_was_set(skb))
805 skb_reset_transport_header(skb);
806 skb_reset_mac_len(skb);
807}
808
809static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
810{
7d8b16b9 811 skb->ip_summed = CHECKSUM_NONE;
c09440f7
SD
812 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
813 skb_pull(skb, hdr_len);
814 pskb_trim_unique(skb, skb->len - icv_len);
815}
816
817static void count_rx(struct net_device *dev, int len)
818{
819 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
820
821 u64_stats_update_begin(&stats->syncp);
822 stats->rx_packets++;
823 stats->rx_bytes += len;
824 u64_stats_update_end(&stats->syncp);
825}
826
827static void macsec_decrypt_done(struct crypto_async_request *base, int err)
828{
829 struct sk_buff *skb = base->data;
830 struct net_device *dev = skb->dev;
831 struct macsec_dev *macsec = macsec_priv(dev);
832 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
c78ebe1d 833 struct macsec_rx_sc *rx_sc = rx_sa->sc;
863483c9 834 int len;
c09440f7
SD
835 u32 pn;
836
837 aead_request_free(macsec_skb_cb(skb)->req);
838
b3bdc3ac
LR
839 if (!err)
840 macsec_skb_cb(skb)->valid = true;
841
c09440f7
SD
842 rcu_read_lock_bh();
843 pn = ntohl(macsec_ethhdr(skb)->packet_number);
844 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
845 rcu_read_unlock_bh();
846 kfree_skb(skb);
847 goto out;
848 }
849
850 macsec_finalize_skb(skb, macsec->secy.icv_len,
851 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
852 macsec_reset_skb(skb, macsec->secy.netdev);
853
854 len = skb->len;
863483c9 855 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
c09440f7 856 count_rx(dev, len);
c09440f7
SD
857
858 rcu_read_unlock_bh();
859
860out:
861 macsec_rxsa_put(rx_sa);
c78ebe1d 862 macsec_rxsc_put(rx_sc);
c09440f7 863 dev_put(dev);
c09440f7
SD
864}
865
866static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
867 struct net_device *dev,
868 struct macsec_rx_sa *rx_sa,
869 sci_t sci,
870 struct macsec_secy *secy)
871{
872 int ret;
5d9649b3 873 struct scatterlist *sg;
5294b830 874 struct sk_buff *trailer;
5d9649b3 875 unsigned char *iv;
c09440f7
SD
876 struct aead_request *req;
877 struct macsec_eth_header *hdr;
a21ecf0e 878 u32 hdr_pn;
c09440f7
SD
879 u16 icv_len = secy->icv_len;
880
881 macsec_skb_cb(skb)->valid = false;
882 skb = skb_share_check(skb, GFP_ATOMIC);
883 if (!skb)
c3b7d0bd 884 return ERR_PTR(-ENOMEM);
c09440f7 885
5294b830
JD
886 ret = skb_cow_data(skb, 0, &trailer);
887 if (unlikely(ret < 0)) {
888 kfree_skb(skb);
889 return ERR_PTR(ret);
890 }
891 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
c09440f7
SD
892 if (!req) {
893 kfree_skb(skb);
c3b7d0bd 894 return ERR_PTR(-ENOMEM);
c09440f7
SD
895 }
896
897 hdr = (struct macsec_eth_header *)skb->data;
a21ecf0e
EM
898 hdr_pn = ntohl(hdr->packet_number);
899
900 if (secy->xpn) {
901 pn_t recovered_pn = rx_sa->next_pn_halves;
902
903 recovered_pn.lower = hdr_pn;
904 if (hdr_pn < rx_sa->next_pn_halves.lower &&
905 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
906 recovered_pn.upper++;
907
908 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
909 rx_sa->key.salt);
910 } else {
911 macsec_fill_iv(iv, sci, hdr_pn);
912 }
c09440f7 913
5294b830 914 sg_init_table(sg, ret);
cda7ea69
JD
915 ret = skb_to_sgvec(skb, sg, 0, skb->len);
916 if (unlikely(ret < 0)) {
5aba2ba5 917 aead_request_free(req);
cda7ea69
JD
918 kfree_skb(skb);
919 return ERR_PTR(ret);
920 }
c09440f7
SD
921
922 if (hdr->tci_an & MACSEC_TCI_E) {
923 /* confidentiality: ethernet + macsec header
924 * authenticated, encrypted payload
925 */
926 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
927
928 aead_request_set_crypt(req, sg, sg, len, iv);
929 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
930 skb = skb_unshare(skb, GFP_ATOMIC);
931 if (!skb) {
932 aead_request_free(req);
c3b7d0bd 933 return ERR_PTR(-ENOMEM);
c09440f7
SD
934 }
935 } else {
936 /* integrity only: all headers + data authenticated */
937 aead_request_set_crypt(req, sg, sg, icv_len, iv);
938 aead_request_set_ad(req, skb->len - icv_len);
939 }
940
941 macsec_skb_cb(skb)->req = req;
c09440f7
SD
942 skb->dev = dev;
943 aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
944
945 dev_hold(dev);
946 ret = crypto_aead_decrypt(req);
947 if (ret == -EINPROGRESS) {
c3b7d0bd 948 return ERR_PTR(ret);
c09440f7
SD
949 } else if (ret != 0) {
950 /* decryption/authentication failed
951 * 10.6 if validateFrames is disabled, deliver anyway
952 */
953 if (ret != -EBADMSG) {
954 kfree_skb(skb);
c3b7d0bd 955 skb = ERR_PTR(ret);
c09440f7
SD
956 }
957 } else {
958 macsec_skb_cb(skb)->valid = true;
959 }
960 dev_put(dev);
961
962 aead_request_free(req);
963
964 return skb;
965}
966
967static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
968{
969 struct macsec_rx_sc *rx_sc;
970
971 for_each_rxsc(secy, rx_sc) {
972 if (rx_sc->sci == sci)
973 return rx_sc;
974 }
975
976 return NULL;
977}
978
979static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
980{
981 struct macsec_rx_sc *rx_sc;
982
983 for_each_rxsc_rtnl(secy, rx_sc) {
984 if (rx_sc->sci == sci)
985 return rx_sc;
986 }
987
988 return NULL;
989}
990
3cf3227a 991static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
c09440f7 992{
3cf3227a
AT
993 /* Deliver to the uncontrolled port by default */
994 enum rx_handler_result ret = RX_HANDLER_PASS;
f428011b 995 struct ethhdr *hdr = eth_hdr(skb);
c09440f7
SD
996 struct macsec_rxh_data *rxd;
997 struct macsec_dev *macsec;
998
999 rcu_read_lock();
1000 rxd = macsec_data_rcu(skb->dev);
1001
c09440f7
SD
1002 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1003 struct sk_buff *nskb;
c09440f7 1004 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
f428011b
MS
1005 struct net_device *ndev = macsec->secy.netdev;
1006
1007 /* If h/w offloading is enabled, HW decodes frames and strips
1008 * the SecTAG, so we have to deduce which port to deliver to.
1009 */
1010 if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
1011 if (ether_addr_equal_64bits(hdr->h_dest,
1012 ndev->dev_addr)) {
1013 /* exact match, divert skb to this port */
1014 skb->dev = ndev;
1015 skb->pkt_type = PACKET_HOST;
1016 ret = RX_HANDLER_ANOTHER;
1017 goto out;
1018 } else if (is_multicast_ether_addr_64bits(
1019 hdr->h_dest)) {
1020 /* multicast frame, deliver on this port too */
1021 nskb = skb_clone(skb, GFP_ATOMIC);
1022 if (!nskb)
1023 break;
1024
1025 nskb->dev = ndev;
1026 if (ether_addr_equal_64bits(hdr->h_dest,
1027 ndev->broadcast))
1028 nskb->pkt_type = PACKET_BROADCAST;
1029 else
1030 nskb->pkt_type = PACKET_MULTICAST;
1031
1032 netif_rx(nskb);
1033 }
1034 continue;
1035 }
c09440f7 1036
f428011b
MS
1037 /* 10.6 If the management control validateFrames is not
1038 * Strict, frames without a SecTAG are received, counted, and
1039 * delivered to the Controlled Port
1040 */
1041 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
c09440f7
SD
1042 u64_stats_update_begin(&secy_stats->syncp);
1043 secy_stats->stats.InPktsNoTag++;
1044 u64_stats_update_end(&secy_stats->syncp);
1045 continue;
1046 }
1047
1048 /* deliver on this port */
1049 nskb = skb_clone(skb, GFP_ATOMIC);
1050 if (!nskb)
1051 break;
1052
f428011b 1053 nskb->dev = ndev;
c09440f7 1054
863483c9 1055 if (netif_rx(nskb) == NET_RX_SUCCESS) {
c09440f7
SD
1056 u64_stats_update_begin(&secy_stats->syncp);
1057 secy_stats->stats.InPktsUntagged++;
1058 u64_stats_update_end(&secy_stats->syncp);
c09440f7
SD
1059 }
1060 }
1061
3cf3227a 1062out:
c09440f7 1063 rcu_read_unlock();
3cf3227a 1064 return ret;
c09440f7
SD
1065}
1066
1067static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1068{
1069 struct sk_buff *skb = *pskb;
1070 struct net_device *dev = skb->dev;
1071 struct macsec_eth_header *hdr;
1072 struct macsec_secy *secy = NULL;
1073 struct macsec_rx_sc *rx_sc;
1074 struct macsec_rx_sa *rx_sa;
1075 struct macsec_rxh_data *rxd;
1076 struct macsec_dev *macsec;
c7cc9200 1077 unsigned int len;
c09440f7 1078 sci_t sci;
a21ecf0e 1079 u32 hdr_pn;
c09440f7
SD
1080 bool cbit;
1081 struct pcpu_rx_sc_stats *rxsc_stats;
1082 struct pcpu_secy_stats *secy_stats;
1083 bool pulled_sci;
5491e7c6 1084 int ret;
c09440f7
SD
1085
1086 if (skb_headroom(skb) < ETH_HLEN)
1087 goto drop_direct;
1088
1089 hdr = macsec_ethhdr(skb);
3cf3227a
AT
1090 if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1091 return handle_not_macsec(skb);
c09440f7
SD
1092
1093 skb = skb_unshare(skb, GFP_ATOMIC);
095c02da
AS
1094 *pskb = skb;
1095 if (!skb)
c09440f7 1096 return RX_HANDLER_CONSUMED;
c09440f7
SD
1097
1098 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1099 if (!pulled_sci) {
1100 if (!pskb_may_pull(skb, macsec_extra_len(false)))
1101 goto drop_direct;
1102 }
1103
1104 hdr = macsec_ethhdr(skb);
1105
1106 /* Frames with a SecTAG that has the TCI E bit set but the C
1107 * bit clear are discarded, as this reserved encoding is used
1108 * to identify frames with a SecTAG that are not to be
1109 * delivered to the Controlled Port.
1110 */
1111 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1112 return RX_HANDLER_PASS;
1113
1114 /* now, pull the extra length */
1115 if (hdr->tci_an & MACSEC_TCI_SC) {
1116 if (!pulled_sci)
1117 goto drop_direct;
1118 }
1119
1120 /* ethernet header is part of crypto processing */
1121 skb_push(skb, ETH_HLEN);
1122
1123 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1124 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1125 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1126
1127 rcu_read_lock();
1128 rxd = macsec_data_rcu(skb->dev);
1129
1130 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1131 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
7979472b 1132
c78ebe1d 1133 sc = sc ? macsec_rxsc_get(sc) : NULL;
c09440f7
SD
1134
1135 if (sc) {
1136 secy = &macsec->secy;
1137 rx_sc = sc;
1138 break;
1139 }
1140 }
1141
1142 if (!secy)
1143 goto nosci;
1144
1145 dev = secy->netdev;
1146 macsec = macsec_priv(dev);
1147 secy_stats = this_cpu_ptr(macsec->stats);
1148 rxsc_stats = this_cpu_ptr(rx_sc->stats);
1149
a21ecf0e 1150 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
c09440f7
SD
1151 u64_stats_update_begin(&secy_stats->syncp);
1152 secy_stats->stats.InPktsBadTag++;
1153 u64_stats_update_end(&secy_stats->syncp);
1154 goto drop_nosa;
1155 }
1156
1157 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1158 if (!rx_sa) {
1159 /* 10.6.1 if the SA is not in use */
1160
1161 /* If validateFrames is Strict or the C bit in the
1162 * SecTAG is set, discard
1163 */
1164 if (hdr->tci_an & MACSEC_TCI_C ||
1165 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1166 u64_stats_update_begin(&rxsc_stats->syncp);
1167 rxsc_stats->stats.InPktsNotUsingSA++;
1168 u64_stats_update_end(&rxsc_stats->syncp);
1169 goto drop_nosa;
1170 }
1171
1172 /* not Strict, the frame (with the SecTAG and ICV
1173 * removed) is delivered to the Controlled Port.
1174 */
1175 u64_stats_update_begin(&rxsc_stats->syncp);
1176 rxsc_stats->stats.InPktsUnusedSA++;
1177 u64_stats_update_end(&rxsc_stats->syncp);
1178 goto deliver;
1179 }
1180
1181 /* First, PN check to avoid decrypting obviously wrong packets */
a21ecf0e 1182 hdr_pn = ntohl(hdr->packet_number);
c09440f7
SD
1183 if (secy->replay_protect) {
1184 bool late;
1185
1186 spin_lock(&rx_sa->lock);
a21ecf0e
EM
1187 late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1188 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1189
1190 if (secy->xpn)
1191 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
c09440f7
SD
1192 spin_unlock(&rx_sa->lock);
1193
1194 if (late) {
1195 u64_stats_update_begin(&rxsc_stats->syncp);
1196 rxsc_stats->stats.InPktsLate++;
1197 u64_stats_update_end(&rxsc_stats->syncp);
1198 goto drop;
1199 }
1200 }
1201
e3a3b626
BG
1202 macsec_skb_cb(skb)->rx_sa = rx_sa;
1203
c09440f7
SD
1204 /* Disabled && !changed text => skip validation */
1205 if (hdr->tci_an & MACSEC_TCI_C ||
1206 secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1207 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1208
c3b7d0bd
SD
1209 if (IS_ERR(skb)) {
1210 /* the decrypt callback needs the reference */
c78ebe1d 1211 if (PTR_ERR(skb) != -EINPROGRESS) {
c3b7d0bd 1212 macsec_rxsa_put(rx_sa);
c78ebe1d
SD
1213 macsec_rxsc_put(rx_sc);
1214 }
c09440f7
SD
1215 rcu_read_unlock();
1216 *pskb = NULL;
1217 return RX_HANDLER_CONSUMED;
1218 }
1219
a21ecf0e 1220 if (!macsec_post_decrypt(skb, secy, hdr_pn))
c09440f7
SD
1221 goto drop;
1222
1223deliver:
1224 macsec_finalize_skb(skb, secy->icv_len,
1225 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1226 macsec_reset_skb(skb, secy->netdev);
1227
497f358a
SD
1228 if (rx_sa)
1229 macsec_rxsa_put(rx_sa);
c78ebe1d 1230 macsec_rxsc_put(rx_sc);
5491e7c6 1231
ba56d8ce 1232 skb_orphan(skb);
c7cc9200 1233 len = skb->len;
5491e7c6
PA
1234 ret = gro_cells_receive(&macsec->gro_cells, skb);
1235 if (ret == NET_RX_SUCCESS)
c7cc9200 1236 count_rx(dev, len);
5491e7c6
PA
1237 else
1238 macsec->secy.netdev->stats.rx_dropped++;
c09440f7
SD
1239
1240 rcu_read_unlock();
1241
5491e7c6
PA
1242 *pskb = NULL;
1243 return RX_HANDLER_CONSUMED;
c09440f7
SD
1244
1245drop:
1246 macsec_rxsa_put(rx_sa);
1247drop_nosa:
c78ebe1d 1248 macsec_rxsc_put(rx_sc);
c09440f7
SD
1249 rcu_read_unlock();
1250drop_direct:
1251 kfree_skb(skb);
1252 *pskb = NULL;
1253 return RX_HANDLER_CONSUMED;
1254
1255nosci:
1256 /* 10.6.1 if the SC is not found */
1257 cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1258 if (!cbit)
1259 macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1260 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1261
1262 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1263 struct sk_buff *nskb;
c09440f7
SD
1264
1265 secy_stats = this_cpu_ptr(macsec->stats);
1266
1267 /* If validateFrames is Strict or the C bit in the
1268 * SecTAG is set, discard
1269 */
1270 if (cbit ||
1271 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1272 u64_stats_update_begin(&secy_stats->syncp);
1273 secy_stats->stats.InPktsNoSCI++;
1274 u64_stats_update_end(&secy_stats->syncp);
1275 continue;
1276 }
1277
1278 /* not strict, the frame (with the SecTAG and ICV
1279 * removed) is delivered to the Controlled Port.
1280 */
1281 nskb = skb_clone(skb, GFP_ATOMIC);
1282 if (!nskb)
1283 break;
1284
1285 macsec_reset_skb(nskb, macsec->secy.netdev);
1286
1287 ret = netif_rx(nskb);
1288 if (ret == NET_RX_SUCCESS) {
1289 u64_stats_update_begin(&secy_stats->syncp);
1290 secy_stats->stats.InPktsUnknownSCI++;
1291 u64_stats_update_end(&secy_stats->syncp);
1292 } else {
1293 macsec->secy.netdev->stats.rx_dropped++;
1294 }
1295 }
1296
1297 rcu_read_unlock();
1298 *pskb = skb;
1299 return RX_HANDLER_PASS;
1300}
1301
1302static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1303{
1304 struct crypto_aead *tfm;
1305 int ret;
1306
ab046a5d
SD
1307 /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
1308 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
34aedfee
DC
1309
1310 if (IS_ERR(tfm))
1311 return tfm;
c09440f7
SD
1312
1313 ret = crypto_aead_setkey(tfm, key, key_len);
34aedfee
DC
1314 if (ret < 0)
1315 goto fail;
c09440f7
SD
1316
1317 ret = crypto_aead_setauthsize(tfm, icv_len);
34aedfee
DC
1318 if (ret < 0)
1319 goto fail;
c09440f7
SD
1320
1321 return tfm;
34aedfee
DC
1322fail:
1323 crypto_free_aead(tfm);
1324 return ERR_PTR(ret);
c09440f7
SD
1325}
1326
1327static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1328 int icv_len)
1329{
1330 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1331 if (!rx_sa->stats)
34aedfee 1332 return -ENOMEM;
c09440f7
SD
1333
1334 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
34aedfee 1335 if (IS_ERR(rx_sa->key.tfm)) {
c09440f7 1336 free_percpu(rx_sa->stats);
34aedfee 1337 return PTR_ERR(rx_sa->key.tfm);
c09440f7
SD
1338 }
1339
48ef50fa 1340 rx_sa->ssci = MACSEC_UNDEF_SSCI;
c09440f7
SD
1341 rx_sa->active = false;
1342 rx_sa->next_pn = 1;
e187246f 1343 refcount_set(&rx_sa->refcnt, 1);
c09440f7
SD
1344 spin_lock_init(&rx_sa->lock);
1345
1346 return 0;
1347}
1348
1349static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1350{
1351 rx_sa->active = false;
1352
1353 macsec_rxsa_put(rx_sa);
1354}
1355
1356static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1357{
1358 int i;
1359
1360 for (i = 0; i < MACSEC_NUM_AN; i++) {
1361 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1362
1363 RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1364 if (sa)
1365 clear_rx_sa(sa);
1366 }
1367
1368 macsec_rxsc_put(rx_sc);
1369}
1370
1371static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1372{
1373 struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1374
1375 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1376 rx_sc;
1377 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1378 if (rx_sc->sci == sci) {
1379 if (rx_sc->active)
1380 secy->n_rx_sc--;
1381 rcu_assign_pointer(*rx_scp, rx_sc->next);
1382 return rx_sc;
1383 }
1384 }
1385
1386 return NULL;
1387}
1388
1389static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
1390{
1391 struct macsec_rx_sc *rx_sc;
1392 struct macsec_dev *macsec;
1393 struct net_device *real_dev = macsec_priv(dev)->real_dev;
1394 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1395 struct macsec_secy *secy;
1396
1397 list_for_each_entry(macsec, &rxd->secys, secys) {
1398 if (find_rx_sc_rtnl(&macsec->secy, sci))
1399 return ERR_PTR(-EEXIST);
1400 }
1401
1402 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1403 if (!rx_sc)
1404 return ERR_PTR(-ENOMEM);
1405
1406 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1407 if (!rx_sc->stats) {
1408 kfree(rx_sc);
1409 return ERR_PTR(-ENOMEM);
1410 }
1411
1412 rx_sc->sci = sci;
1413 rx_sc->active = true;
8676d76f 1414 refcount_set(&rx_sc->refcnt, 1);
c09440f7
SD
1415
1416 secy = &macsec_priv(dev)->secy;
1417 rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1418 rcu_assign_pointer(secy->rx_sc, rx_sc);
1419
1420 if (rx_sc->active)
1421 secy->n_rx_sc++;
1422
1423 return rx_sc;
1424}
1425
1426static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1427 int icv_len)
1428{
1429 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1430 if (!tx_sa->stats)
34aedfee 1431 return -ENOMEM;
c09440f7
SD
1432
1433 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
34aedfee 1434 if (IS_ERR(tx_sa->key.tfm)) {
c09440f7 1435 free_percpu(tx_sa->stats);
34aedfee 1436 return PTR_ERR(tx_sa->key.tfm);
c09440f7
SD
1437 }
1438
48ef50fa 1439 tx_sa->ssci = MACSEC_UNDEF_SSCI;
c09440f7 1440 tx_sa->active = false;
28206cdb 1441 refcount_set(&tx_sa->refcnt, 1);
c09440f7
SD
1442 spin_lock_init(&tx_sa->lock);
1443
1444 return 0;
1445}
1446
1447static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1448{
1449 tx_sa->active = false;
1450
1451 macsec_txsa_put(tx_sa);
1452}
1453
489111e5 1454static struct genl_family macsec_fam;
c09440f7
SD
1455
1456static struct net_device *get_dev_from_nl(struct net *net,
1457 struct nlattr **attrs)
1458{
1459 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1460 struct net_device *dev;
1461
1462 dev = __dev_get_by_index(net, ifindex);
1463 if (!dev)
1464 return ERR_PTR(-ENODEV);
1465
1466 if (!netif_is_macsec(dev))
1467 return ERR_PTR(-ENODEV);
1468
1469 return dev;
1470}
1471
791bb3fc
MS
1472static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1473{
1474 return (__force enum macsec_offload)nla_get_u8(nla);
1475}
1476
c09440f7
SD
1477static sci_t nla_get_sci(const struct nlattr *nla)
1478{
1479 return (__force sci_t)nla_get_u64(nla);
1480}
1481
f60d94c0
ND
1482static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1483 int padattr)
c09440f7 1484{
f60d94c0 1485 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
c09440f7
SD
1486}
1487
48ef50fa
EM
1488static ssci_t nla_get_ssci(const struct nlattr *nla)
1489{
1490 return (__force ssci_t)nla_get_u32(nla);
1491}
1492
1493static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1494{
1495 return nla_put_u32(skb, attrtype, (__force u64)value);
1496}
1497
c09440f7
SD
1498static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1499 struct nlattr **attrs,
1500 struct nlattr **tb_sa,
1501 struct net_device **devp,
1502 struct macsec_secy **secyp,
1503 struct macsec_tx_sc **scp,
1504 u8 *assoc_num)
1505{
1506 struct net_device *dev;
1507 struct macsec_secy *secy;
1508 struct macsec_tx_sc *tx_sc;
1509 struct macsec_tx_sa *tx_sa;
1510
1511 if (!tb_sa[MACSEC_SA_ATTR_AN])
1512 return ERR_PTR(-EINVAL);
1513
1514 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1515
1516 dev = get_dev_from_nl(net, attrs);
1517 if (IS_ERR(dev))
1518 return ERR_CAST(dev);
1519
1520 if (*assoc_num >= MACSEC_NUM_AN)
1521 return ERR_PTR(-EINVAL);
1522
1523 secy = &macsec_priv(dev)->secy;
1524 tx_sc = &secy->tx_sc;
1525
1526 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1527 if (!tx_sa)
1528 return ERR_PTR(-ENODEV);
1529
1530 *devp = dev;
1531 *scp = tx_sc;
1532 *secyp = secy;
1533 return tx_sa;
1534}
1535
1536static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1537 struct nlattr **attrs,
1538 struct nlattr **tb_rxsc,
1539 struct net_device **devp,
1540 struct macsec_secy **secyp)
1541{
1542 struct net_device *dev;
1543 struct macsec_secy *secy;
1544 struct macsec_rx_sc *rx_sc;
1545 sci_t sci;
1546
1547 dev = get_dev_from_nl(net, attrs);
1548 if (IS_ERR(dev))
1549 return ERR_CAST(dev);
1550
1551 secy = &macsec_priv(dev)->secy;
1552
1553 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1554 return ERR_PTR(-EINVAL);
1555
1556 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1557 rx_sc = find_rx_sc_rtnl(secy, sci);
1558 if (!rx_sc)
1559 return ERR_PTR(-ENODEV);
1560
1561 *secyp = secy;
1562 *devp = dev;
1563
1564 return rx_sc;
1565}
1566
1567static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1568 struct nlattr **attrs,
1569 struct nlattr **tb_rxsc,
1570 struct nlattr **tb_sa,
1571 struct net_device **devp,
1572 struct macsec_secy **secyp,
1573 struct macsec_rx_sc **scp,
1574 u8 *assoc_num)
1575{
1576 struct macsec_rx_sc *rx_sc;
1577 struct macsec_rx_sa *rx_sa;
1578
1579 if (!tb_sa[MACSEC_SA_ATTR_AN])
1580 return ERR_PTR(-EINVAL);
1581
1582 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1583 if (*assoc_num >= MACSEC_NUM_AN)
1584 return ERR_PTR(-EINVAL);
1585
1586 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1587 if (IS_ERR(rx_sc))
1588 return ERR_CAST(rx_sc);
1589
1590 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1591 if (!rx_sa)
1592 return ERR_PTR(-ENODEV);
1593
1594 *scp = rx_sc;
1595 return rx_sa;
1596}
1597
c09440f7
SD
1598static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1599 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1600 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1601 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
dcb780fb 1602 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
c09440f7
SD
1603};
1604
1605static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1606 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1607 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1608};
1609
1610static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1611 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1612 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
bc043585 1613 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
8acca6ac
SD
1614 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1615 .len = MACSEC_KEYID_LEN, },
c09440f7 1616 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
e8660ded 1617 .len = MACSEC_MAX_KEY_LEN, },
48ef50fa
EM
1618 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1619 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1620 .len = MACSEC_SALT_LEN, },
c09440f7
SD
1621};
1622
dcb780fb
AT
1623static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1624 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1625};
1626
3cf3227a
AT
1627/* Offloads an operation to a device driver */
1628static int macsec_offload(int (* const func)(struct macsec_context *),
1629 struct macsec_context *ctx)
1630{
1631 int ret;
1632
1633 if (unlikely(!func))
1634 return 0;
1635
1636 if (ctx->offload == MACSEC_OFFLOAD_PHY)
1637 mutex_lock(&ctx->phydev->lock);
1638
1639 /* Phase I: prepare. The drive should fail here if there are going to be
1640 * issues in the commit phase.
1641 */
1642 ctx->prepare = true;
1643 ret = (*func)(ctx);
1644 if (ret)
1645 goto phy_unlock;
1646
1647 /* Phase II: commit. This step cannot fail. */
1648 ctx->prepare = false;
1649 ret = (*func)(ctx);
1650 /* This should never happen: commit is not allowed to fail */
1651 if (unlikely(ret))
1652 WARN(1, "MACsec offloading commit failed (%d)\n", ret);
1653
1654phy_unlock:
1655 if (ctx->offload == MACSEC_OFFLOAD_PHY)
1656 mutex_unlock(&ctx->phydev->lock);
1657
1658 return ret;
1659}
1660
c09440f7
SD
1661static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1662{
1663 if (!attrs[MACSEC_ATTR_SA_CONFIG])
1664 return -EINVAL;
1665
8cb08174 1666 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
c09440f7
SD
1667 return -EINVAL;
1668
1669 return 0;
1670}
1671
1672static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1673{
1674 if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1675 return -EINVAL;
1676
8cb08174 1677 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
c09440f7
SD
1678 return -EINVAL;
1679
1680 return 0;
1681}
1682
1683static bool validate_add_rxsa(struct nlattr **attrs)
1684{
1685 if (!attrs[MACSEC_SA_ATTR_AN] ||
1686 !attrs[MACSEC_SA_ATTR_KEY] ||
1687 !attrs[MACSEC_SA_ATTR_KEYID])
1688 return false;
1689
1690 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1691 return false;
1692
48ef50fa 1693 if (attrs[MACSEC_SA_ATTR_PN] &&
76208d8a 1694 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
c09440f7
SD
1695 return false;
1696
1697 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1698 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1699 return false;
1700 }
1701
8acca6ac
SD
1702 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1703 return false;
1704
c09440f7
SD
1705 return true;
1706}
1707
1708static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1709{
1710 struct net_device *dev;
1711 struct nlattr **attrs = info->attrs;
1712 struct macsec_secy *secy;
1713 struct macsec_rx_sc *rx_sc;
1714 struct macsec_rx_sa *rx_sa;
1715 unsigned char assoc_num;
48ef50fa 1716 int pn_len;
c09440f7
SD
1717 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1718 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
34aedfee 1719 int err;
c09440f7
SD
1720
1721 if (!attrs[MACSEC_ATTR_IFINDEX])
1722 return -EINVAL;
1723
1724 if (parse_sa_config(attrs, tb_sa))
1725 return -EINVAL;
1726
1727 if (parse_rxsc_config(attrs, tb_rxsc))
1728 return -EINVAL;
1729
1730 if (!validate_add_rxsa(tb_sa))
1731 return -EINVAL;
1732
1733 rtnl_lock();
1734 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
36b232c8 1735 if (IS_ERR(rx_sc)) {
c09440f7
SD
1736 rtnl_unlock();
1737 return PTR_ERR(rx_sc);
1738 }
1739
1740 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1741
1742 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1743 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1744 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1745 rtnl_unlock();
1746 return -EINVAL;
1747 }
1748
48ef50fa 1749 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
254e2f10
SD
1750 if (tb_sa[MACSEC_SA_ATTR_PN] &&
1751 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
48ef50fa
EM
1752 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1753 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1754 rtnl_unlock();
1755 return -EINVAL;
1756 }
1757
1758 if (secy->xpn) {
1759 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1760 rtnl_unlock();
1761 return -EINVAL;
1762 }
1763
1764 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1765 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1766 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
019866ee 1767 MACSEC_SALT_LEN);
48ef50fa
EM
1768 rtnl_unlock();
1769 return -EINVAL;
1770 }
1771 }
1772
c09440f7
SD
1773 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1774 if (rx_sa) {
1775 rtnl_unlock();
1776 return -EBUSY;
1777 }
1778
1779 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
34aedfee 1780 if (!rx_sa) {
c09440f7
SD
1781 rtnl_unlock();
1782 return -ENOMEM;
1783 }
1784
34aedfee
DC
1785 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1786 secy->key_len, secy->icv_len);
1787 if (err < 0) {
1788 kfree(rx_sa);
1789 rtnl_unlock();
1790 return err;
1791 }
1792
c09440f7
SD
1793 if (tb_sa[MACSEC_SA_ATTR_PN]) {
1794 spin_lock_bh(&rx_sa->lock);
48ef50fa 1795 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
c09440f7
SD
1796 spin_unlock_bh(&rx_sa->lock);
1797 }
1798
1799 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1800 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1801
c09440f7 1802 rx_sa->sc = rx_sc;
3cf3227a
AT
1803
1804 /* If h/w offloading is available, propagate to the device */
1805 if (macsec_is_offloaded(netdev_priv(dev))) {
1806 const struct macsec_ops *ops;
1807 struct macsec_context ctx;
1808
1809 ops = macsec_get_ops(netdev_priv(dev), &ctx);
1810 if (!ops) {
1811 err = -EOPNOTSUPP;
1812 goto cleanup;
1813 }
1814
1815 ctx.sa.assoc_num = assoc_num;
1816 ctx.sa.rx_sa = rx_sa;
182879f8 1817 ctx.secy = secy;
3cf3227a 1818 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1f7fe512 1819 secy->key_len);
3cf3227a
AT
1820
1821 err = macsec_offload(ops->mdo_add_rxsa, &ctx);
1822 if (err)
1823 goto cleanup;
1824 }
1825
48ef50fa
EM
1826 if (secy->xpn) {
1827 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1828 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1829 MACSEC_SALT_LEN);
1830 }
1831
3cf3227a 1832 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
c09440f7
SD
1833 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1834
1835 rtnl_unlock();
1836
1837 return 0;
3cf3227a
AT
1838
1839cleanup:
ee90aab2 1840 macsec_rxsa_put(rx_sa);
3cf3227a
AT
1841 rtnl_unlock();
1842 return err;
c09440f7
SD
1843}
1844
1845static bool validate_add_rxsc(struct nlattr **attrs)
1846{
1847 if (!attrs[MACSEC_RXSC_ATTR_SCI])
1848 return false;
1849
1850 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1851 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1852 return false;
1853 }
1854
1855 return true;
1856}
1857
1858static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1859{
1860 struct net_device *dev;
1861 sci_t sci = MACSEC_UNDEF_SCI;
1862 struct nlattr **attrs = info->attrs;
1863 struct macsec_rx_sc *rx_sc;
1864 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
182879f8 1865 struct macsec_secy *secy;
3cf3227a
AT
1866 bool was_active;
1867 int ret;
c09440f7
SD
1868
1869 if (!attrs[MACSEC_ATTR_IFINDEX])
1870 return -EINVAL;
1871
1872 if (parse_rxsc_config(attrs, tb_rxsc))
1873 return -EINVAL;
1874
1875 if (!validate_add_rxsc(tb_rxsc))
1876 return -EINVAL;
1877
1878 rtnl_lock();
1879 dev = get_dev_from_nl(genl_info_net(info), attrs);
1880 if (IS_ERR(dev)) {
1881 rtnl_unlock();
1882 return PTR_ERR(dev);
1883 }
1884
182879f8 1885 secy = &macsec_priv(dev)->secy;
c09440f7
SD
1886 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1887
1888 rx_sc = create_rx_sc(dev, sci);
1889 if (IS_ERR(rx_sc)) {
1890 rtnl_unlock();
1891 return PTR_ERR(rx_sc);
1892 }
1893
3cf3227a 1894 was_active = rx_sc->active;
c09440f7
SD
1895 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1896 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1897
3cf3227a
AT
1898 if (macsec_is_offloaded(netdev_priv(dev))) {
1899 const struct macsec_ops *ops;
1900 struct macsec_context ctx;
1901
1902 ops = macsec_get_ops(netdev_priv(dev), &ctx);
1903 if (!ops) {
1904 ret = -EOPNOTSUPP;
1905 goto cleanup;
1906 }
1907
1908 ctx.rx_sc = rx_sc;
182879f8 1909 ctx.secy = secy;
3cf3227a
AT
1910
1911 ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1912 if (ret)
1913 goto cleanup;
1914 }
1915
c09440f7
SD
1916 rtnl_unlock();
1917
1918 return 0;
3cf3227a
AT
1919
1920cleanup:
1921 rx_sc->active = was_active;
1922 rtnl_unlock();
1923 return ret;
c09440f7
SD
1924}
1925
1926static bool validate_add_txsa(struct nlattr **attrs)
1927{
1928 if (!attrs[MACSEC_SA_ATTR_AN] ||
1929 !attrs[MACSEC_SA_ATTR_PN] ||
1930 !attrs[MACSEC_SA_ATTR_KEY] ||
1931 !attrs[MACSEC_SA_ATTR_KEYID])
1932 return false;
1933
1934 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1935 return false;
1936
76208d8a 1937 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
c09440f7
SD
1938 return false;
1939
1940 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1941 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1942 return false;
1943 }
1944
8acca6ac
SD
1945 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1946 return false;
1947
c09440f7
SD
1948 return true;
1949}
1950
1951static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1952{
1953 struct net_device *dev;
1954 struct nlattr **attrs = info->attrs;
1955 struct macsec_secy *secy;
1956 struct macsec_tx_sc *tx_sc;
1957 struct macsec_tx_sa *tx_sa;
1958 unsigned char assoc_num;
48ef50fa 1959 int pn_len;
c09440f7 1960 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
3cf3227a 1961 bool was_operational;
34aedfee 1962 int err;
c09440f7
SD
1963
1964 if (!attrs[MACSEC_ATTR_IFINDEX])
1965 return -EINVAL;
1966
1967 if (parse_sa_config(attrs, tb_sa))
1968 return -EINVAL;
1969
1970 if (!validate_add_txsa(tb_sa))
1971 return -EINVAL;
1972
1973 rtnl_lock();
1974 dev = get_dev_from_nl(genl_info_net(info), attrs);
1975 if (IS_ERR(dev)) {
1976 rtnl_unlock();
1977 return PTR_ERR(dev);
1978 }
1979
1980 secy = &macsec_priv(dev)->secy;
1981 tx_sc = &secy->tx_sc;
1982
1983 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1984
1985 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1986 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1987 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1988 rtnl_unlock();
1989 return -EINVAL;
1990 }
1991
48ef50fa
EM
1992 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1993 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1994 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
1995 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1996 rtnl_unlock();
1997 return -EINVAL;
1998 }
1999
2000 if (secy->xpn) {
2001 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2002 rtnl_unlock();
2003 return -EINVAL;
2004 }
2005
2006 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2007 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2008 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
019866ee 2009 MACSEC_SALT_LEN);
48ef50fa
EM
2010 rtnl_unlock();
2011 return -EINVAL;
2012 }
2013 }
2014
c09440f7
SD
2015 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2016 if (tx_sa) {
2017 rtnl_unlock();
2018 return -EBUSY;
2019 }
2020
2021 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
34aedfee 2022 if (!tx_sa) {
c09440f7
SD
2023 rtnl_unlock();
2024 return -ENOMEM;
2025 }
2026
34aedfee
DC
2027 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2028 secy->key_len, secy->icv_len);
2029 if (err < 0) {
2030 kfree(tx_sa);
2031 rtnl_unlock();
2032 return err;
2033 }
2034
c09440f7 2035 spin_lock_bh(&tx_sa->lock);
48ef50fa 2036 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
c09440f7
SD
2037 spin_unlock_bh(&tx_sa->lock);
2038
2039 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2040 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2041
3cf3227a 2042 was_operational = secy->operational;
c09440f7
SD
2043 if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2044 secy->operational = true;
2045
3cf3227a
AT
2046 /* If h/w offloading is available, propagate to the device */
2047 if (macsec_is_offloaded(netdev_priv(dev))) {
2048 const struct macsec_ops *ops;
2049 struct macsec_context ctx;
2050
2051 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2052 if (!ops) {
2053 err = -EOPNOTSUPP;
2054 goto cleanup;
2055 }
2056
2057 ctx.sa.assoc_num = assoc_num;
2058 ctx.sa.tx_sa = tx_sa;
182879f8 2059 ctx.secy = secy;
3cf3227a 2060 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1f7fe512 2061 secy->key_len);
3cf3227a
AT
2062
2063 err = macsec_offload(ops->mdo_add_txsa, &ctx);
2064 if (err)
2065 goto cleanup;
2066 }
2067
48ef50fa
EM
2068 if (secy->xpn) {
2069 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2070 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2071 MACSEC_SALT_LEN);
2072 }
2073
3cf3227a 2074 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
c09440f7
SD
2075 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2076
2077 rtnl_unlock();
2078
2079 return 0;
3cf3227a
AT
2080
2081cleanup:
2082 secy->operational = was_operational;
ee90aab2 2083 macsec_txsa_put(tx_sa);
3cf3227a
AT
2084 rtnl_unlock();
2085 return err;
c09440f7
SD
2086}
2087
2088static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2089{
2090 struct nlattr **attrs = info->attrs;
2091 struct net_device *dev;
2092 struct macsec_secy *secy;
2093 struct macsec_rx_sc *rx_sc;
2094 struct macsec_rx_sa *rx_sa;
2095 u8 assoc_num;
2096 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2097 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
3cf3227a 2098 int ret;
c09440f7
SD
2099
2100 if (!attrs[MACSEC_ATTR_IFINDEX])
2101 return -EINVAL;
2102
2103 if (parse_sa_config(attrs, tb_sa))
2104 return -EINVAL;
2105
2106 if (parse_rxsc_config(attrs, tb_rxsc))
2107 return -EINVAL;
2108
2109 rtnl_lock();
2110 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2111 &dev, &secy, &rx_sc, &assoc_num);
2112 if (IS_ERR(rx_sa)) {
2113 rtnl_unlock();
2114 return PTR_ERR(rx_sa);
2115 }
2116
2117 if (rx_sa->active) {
2118 rtnl_unlock();
2119 return -EBUSY;
2120 }
2121
3cf3227a
AT
2122 /* If h/w offloading is available, propagate to the device */
2123 if (macsec_is_offloaded(netdev_priv(dev))) {
2124 const struct macsec_ops *ops;
2125 struct macsec_context ctx;
2126
2127 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2128 if (!ops) {
2129 ret = -EOPNOTSUPP;
2130 goto cleanup;
2131 }
2132
2133 ctx.sa.assoc_num = assoc_num;
2134 ctx.sa.rx_sa = rx_sa;
182879f8 2135 ctx.secy = secy;
3cf3227a
AT
2136
2137 ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2138 if (ret)
2139 goto cleanup;
2140 }
2141
c09440f7
SD
2142 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2143 clear_rx_sa(rx_sa);
2144
2145 rtnl_unlock();
2146
2147 return 0;
3cf3227a
AT
2148
2149cleanup:
2150 rtnl_unlock();
2151 return ret;
c09440f7
SD
2152}
2153
2154static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2155{
2156 struct nlattr **attrs = info->attrs;
2157 struct net_device *dev;
2158 struct macsec_secy *secy;
2159 struct macsec_rx_sc *rx_sc;
2160 sci_t sci;
2161 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
3cf3227a 2162 int ret;
c09440f7
SD
2163
2164 if (!attrs[MACSEC_ATTR_IFINDEX])
2165 return -EINVAL;
2166
2167 if (parse_rxsc_config(attrs, tb_rxsc))
2168 return -EINVAL;
2169
2170 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2171 return -EINVAL;
2172
2173 rtnl_lock();
2174 dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2175 if (IS_ERR(dev)) {
2176 rtnl_unlock();
2177 return PTR_ERR(dev);
2178 }
2179
2180 secy = &macsec_priv(dev)->secy;
2181 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2182
2183 rx_sc = del_rx_sc(secy, sci);
2184 if (!rx_sc) {
2185 rtnl_unlock();
2186 return -ENODEV;
2187 }
2188
3cf3227a
AT
2189 /* If h/w offloading is available, propagate to the device */
2190 if (macsec_is_offloaded(netdev_priv(dev))) {
2191 const struct macsec_ops *ops;
2192 struct macsec_context ctx;
2193
2194 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2195 if (!ops) {
2196 ret = -EOPNOTSUPP;
2197 goto cleanup;
2198 }
2199
2200 ctx.rx_sc = rx_sc;
182879f8 2201 ctx.secy = secy;
3cf3227a
AT
2202 ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2203 if (ret)
2204 goto cleanup;
2205 }
2206
c09440f7
SD
2207 free_rx_sc(rx_sc);
2208 rtnl_unlock();
2209
2210 return 0;
3cf3227a
AT
2211
2212cleanup:
2213 rtnl_unlock();
2214 return ret;
c09440f7
SD
2215}
2216
2217static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2218{
2219 struct nlattr **attrs = info->attrs;
2220 struct net_device *dev;
2221 struct macsec_secy *secy;
2222 struct macsec_tx_sc *tx_sc;
2223 struct macsec_tx_sa *tx_sa;
2224 u8 assoc_num;
2225 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
3cf3227a 2226 int ret;
c09440f7
SD
2227
2228 if (!attrs[MACSEC_ATTR_IFINDEX])
2229 return -EINVAL;
2230
2231 if (parse_sa_config(attrs, tb_sa))
2232 return -EINVAL;
2233
2234 rtnl_lock();
2235 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2236 &dev, &secy, &tx_sc, &assoc_num);
2237 if (IS_ERR(tx_sa)) {
2238 rtnl_unlock();
2239 return PTR_ERR(tx_sa);
2240 }
2241
2242 if (tx_sa->active) {
2243 rtnl_unlock();
2244 return -EBUSY;
2245 }
2246
3cf3227a
AT
2247 /* If h/w offloading is available, propagate to the device */
2248 if (macsec_is_offloaded(netdev_priv(dev))) {
2249 const struct macsec_ops *ops;
2250 struct macsec_context ctx;
2251
2252 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2253 if (!ops) {
2254 ret = -EOPNOTSUPP;
2255 goto cleanup;
2256 }
2257
2258 ctx.sa.assoc_num = assoc_num;
2259 ctx.sa.tx_sa = tx_sa;
182879f8 2260 ctx.secy = secy;
3cf3227a
AT
2261
2262 ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2263 if (ret)
2264 goto cleanup;
2265 }
2266
c09440f7
SD
2267 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2268 clear_tx_sa(tx_sa);
2269
2270 rtnl_unlock();
2271
2272 return 0;
3cf3227a
AT
2273
2274cleanup:
2275 rtnl_unlock();
2276 return ret;
c09440f7
SD
2277}
2278
2279static bool validate_upd_sa(struct nlattr **attrs)
2280{
2281 if (!attrs[MACSEC_SA_ATTR_AN] ||
2282 attrs[MACSEC_SA_ATTR_KEY] ||
48ef50fa
EM
2283 attrs[MACSEC_SA_ATTR_KEYID] ||
2284 attrs[MACSEC_SA_ATTR_SSCI] ||
2285 attrs[MACSEC_SA_ATTR_SALT])
c09440f7
SD
2286 return false;
2287
2288 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2289 return false;
2290
76208d8a 2291 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
c09440f7
SD
2292 return false;
2293
2294 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2295 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2296 return false;
2297 }
2298
2299 return true;
2300}
2301
2302static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2303{
2304 struct nlattr **attrs = info->attrs;
2305 struct net_device *dev;
2306 struct macsec_secy *secy;
2307 struct macsec_tx_sc *tx_sc;
2308 struct macsec_tx_sa *tx_sa;
2309 u8 assoc_num;
2310 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
3cf3227a 2311 bool was_operational, was_active;
a21ecf0e 2312 pn_t prev_pn;
3cf3227a 2313 int ret = 0;
c09440f7 2314
a21ecf0e
EM
2315 prev_pn.full64 = 0;
2316
c09440f7
SD
2317 if (!attrs[MACSEC_ATTR_IFINDEX])
2318 return -EINVAL;
2319
2320 if (parse_sa_config(attrs, tb_sa))
2321 return -EINVAL;
2322
2323 if (!validate_upd_sa(tb_sa))
2324 return -EINVAL;
2325
2326 rtnl_lock();
2327 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2328 &dev, &secy, &tx_sc, &assoc_num);
2329 if (IS_ERR(tx_sa)) {
2330 rtnl_unlock();
2331 return PTR_ERR(tx_sa);
2332 }
2333
2334 if (tb_sa[MACSEC_SA_ATTR_PN]) {
48ef50fa
EM
2335 int pn_len;
2336
2337 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2338 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2339 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2340 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2341 rtnl_unlock();
2342 return -EINVAL;
2343 }
2344
c09440f7 2345 spin_lock_bh(&tx_sa->lock);
a21ecf0e 2346 prev_pn = tx_sa->next_pn_halves;
48ef50fa 2347 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
c09440f7
SD
2348 spin_unlock_bh(&tx_sa->lock);
2349 }
2350
3cf3227a 2351 was_active = tx_sa->active;
c09440f7
SD
2352 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2353 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2354
3cf3227a 2355 was_operational = secy->operational;
c09440f7
SD
2356 if (assoc_num == tx_sc->encoding_sa)
2357 secy->operational = tx_sa->active;
2358
3cf3227a
AT
2359 /* If h/w offloading is available, propagate to the device */
2360 if (macsec_is_offloaded(netdev_priv(dev))) {
2361 const struct macsec_ops *ops;
2362 struct macsec_context ctx;
2363
2364 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2365 if (!ops) {
2366 ret = -EOPNOTSUPP;
2367 goto cleanup;
2368 }
2369
2370 ctx.sa.assoc_num = assoc_num;
2371 ctx.sa.tx_sa = tx_sa;
182879f8 2372 ctx.secy = secy;
3cf3227a
AT
2373
2374 ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2375 if (ret)
2376 goto cleanup;
2377 }
2378
c09440f7
SD
2379 rtnl_unlock();
2380
2381 return 0;
3cf3227a
AT
2382
2383cleanup:
2384 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2385 spin_lock_bh(&tx_sa->lock);
a21ecf0e 2386 tx_sa->next_pn_halves = prev_pn;
3cf3227a
AT
2387 spin_unlock_bh(&tx_sa->lock);
2388 }
2389 tx_sa->active = was_active;
2390 secy->operational = was_operational;
2391 rtnl_unlock();
2392 return ret;
c09440f7
SD
2393}
2394
2395static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2396{
2397 struct nlattr **attrs = info->attrs;
2398 struct net_device *dev;
2399 struct macsec_secy *secy;
2400 struct macsec_rx_sc *rx_sc;
2401 struct macsec_rx_sa *rx_sa;
2402 u8 assoc_num;
2403 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2404 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
3cf3227a 2405 bool was_active;
a21ecf0e 2406 pn_t prev_pn;
3cf3227a 2407 int ret = 0;
c09440f7 2408
a21ecf0e
EM
2409 prev_pn.full64 = 0;
2410
c09440f7
SD
2411 if (!attrs[MACSEC_ATTR_IFINDEX])
2412 return -EINVAL;
2413
2414 if (parse_rxsc_config(attrs, tb_rxsc))
2415 return -EINVAL;
2416
2417 if (parse_sa_config(attrs, tb_sa))
2418 return -EINVAL;
2419
2420 if (!validate_upd_sa(tb_sa))
2421 return -EINVAL;
2422
2423 rtnl_lock();
2424 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2425 &dev, &secy, &rx_sc, &assoc_num);
2426 if (IS_ERR(rx_sa)) {
2427 rtnl_unlock();
2428 return PTR_ERR(rx_sa);
2429 }
2430
2431 if (tb_sa[MACSEC_SA_ATTR_PN]) {
48ef50fa
EM
2432 int pn_len;
2433
2434 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2435 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2436 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2437 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2438 rtnl_unlock();
2439 return -EINVAL;
2440 }
2441
c09440f7 2442 spin_lock_bh(&rx_sa->lock);
a21ecf0e 2443 prev_pn = rx_sa->next_pn_halves;
48ef50fa 2444 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
c09440f7
SD
2445 spin_unlock_bh(&rx_sa->lock);
2446 }
2447
3cf3227a 2448 was_active = rx_sa->active;
c09440f7
SD
2449 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2450 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2451
3cf3227a
AT
2452 /* If h/w offloading is available, propagate to the device */
2453 if (macsec_is_offloaded(netdev_priv(dev))) {
2454 const struct macsec_ops *ops;
2455 struct macsec_context ctx;
2456
2457 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2458 if (!ops) {
2459 ret = -EOPNOTSUPP;
2460 goto cleanup;
2461 }
2462
2463 ctx.sa.assoc_num = assoc_num;
2464 ctx.sa.rx_sa = rx_sa;
182879f8 2465 ctx.secy = secy;
3cf3227a
AT
2466
2467 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2468 if (ret)
2469 goto cleanup;
2470 }
2471
c09440f7
SD
2472 rtnl_unlock();
2473 return 0;
3cf3227a
AT
2474
2475cleanup:
2476 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2477 spin_lock_bh(&rx_sa->lock);
a21ecf0e 2478 rx_sa->next_pn_halves = prev_pn;
3cf3227a
AT
2479 spin_unlock_bh(&rx_sa->lock);
2480 }
2481 rx_sa->active = was_active;
2482 rtnl_unlock();
2483 return ret;
c09440f7
SD
2484}
2485
2486static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2487{
2488 struct nlattr **attrs = info->attrs;
2489 struct net_device *dev;
2490 struct macsec_secy *secy;
2491 struct macsec_rx_sc *rx_sc;
2492 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
3cf3227a
AT
2493 unsigned int prev_n_rx_sc;
2494 bool was_active;
2495 int ret;
c09440f7
SD
2496
2497 if (!attrs[MACSEC_ATTR_IFINDEX])
2498 return -EINVAL;
2499
2500 if (parse_rxsc_config(attrs, tb_rxsc))
2501 return -EINVAL;
2502
2503 if (!validate_add_rxsc(tb_rxsc))
2504 return -EINVAL;
2505
2506 rtnl_lock();
2507 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2508 if (IS_ERR(rx_sc)) {
2509 rtnl_unlock();
2510 return PTR_ERR(rx_sc);
2511 }
2512
3cf3227a
AT
2513 was_active = rx_sc->active;
2514 prev_n_rx_sc = secy->n_rx_sc;
c09440f7
SD
2515 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2516 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2517
2518 if (rx_sc->active != new)
2519 secy->n_rx_sc += new ? 1 : -1;
2520
2521 rx_sc->active = new;
2522 }
2523
3cf3227a
AT
2524 /* If h/w offloading is available, propagate to the device */
2525 if (macsec_is_offloaded(netdev_priv(dev))) {
2526 const struct macsec_ops *ops;
2527 struct macsec_context ctx;
2528
2529 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2530 if (!ops) {
2531 ret = -EOPNOTSUPP;
2532 goto cleanup;
2533 }
2534
2535 ctx.rx_sc = rx_sc;
182879f8 2536 ctx.secy = secy;
3cf3227a
AT
2537
2538 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2539 if (ret)
2540 goto cleanup;
2541 }
2542
c09440f7
SD
2543 rtnl_unlock();
2544
2545 return 0;
3cf3227a
AT
2546
2547cleanup:
2548 secy->n_rx_sc = prev_n_rx_sc;
2549 rx_sc->active = was_active;
2550 rtnl_unlock();
2551 return ret;
c09440f7
SD
2552}
2553
dcb780fb
AT
2554static bool macsec_is_configured(struct macsec_dev *macsec)
2555{
2556 struct macsec_secy *secy = &macsec->secy;
2557 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2558 int i;
2559
2560 if (secy->n_rx_sc > 0)
2561 return true;
2562
2563 for (i = 0; i < MACSEC_NUM_AN; i++)
2564 if (tx_sc->sa[i])
2565 return true;
2566
2567 return false;
2568}
2569
2570static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
2571{
2572 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2573 enum macsec_offload offload, prev_offload;
2574 int (*func)(struct macsec_context *ctx);
2575 struct nlattr **attrs = info->attrs;
a249f805 2576 struct net_device *dev;
dcb780fb
AT
2577 const struct macsec_ops *ops;
2578 struct macsec_context ctx;
2579 struct macsec_dev *macsec;
dcb780fb
AT
2580 int ret;
2581
2582 if (!attrs[MACSEC_ATTR_IFINDEX])
2583 return -EINVAL;
2584
2585 if (!attrs[MACSEC_ATTR_OFFLOAD])
2586 return -EINVAL;
2587
2588 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2589 attrs[MACSEC_ATTR_OFFLOAD],
2590 macsec_genl_offload_policy, NULL))
2591 return -EINVAL;
2592
2593 dev = get_dev_from_nl(genl_info_net(info), attrs);
2594 if (IS_ERR(dev))
2595 return PTR_ERR(dev);
2596 macsec = macsec_priv(dev);
2597
aa81700c
DC
2598 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
2599 return -EINVAL;
2600
dcb780fb
AT
2601 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
2602 if (macsec->offload == offload)
2603 return 0;
2604
2605 /* Check if the offloading mode is supported by the underlying layers */
2606 if (offload != MACSEC_OFFLOAD_OFF &&
2607 !macsec_check_offload(offload, macsec))
2608 return -EOPNOTSUPP;
2609
dcb780fb
AT
2610 /* Check if the net device is busy. */
2611 if (netif_running(dev))
2612 return -EBUSY;
2613
2614 rtnl_lock();
2615
2616 prev_offload = macsec->offload;
2617 macsec->offload = offload;
2618
2619 /* Check if the device already has rules configured: we do not support
2620 * rules migration.
2621 */
2622 if (macsec_is_configured(macsec)) {
2623 ret = -EBUSY;
2624 goto rollback;
2625 }
2626
2627 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2628 macsec, &ctx);
2629 if (!ops) {
2630 ret = -EOPNOTSUPP;
2631 goto rollback;
2632 }
2633
2634 if (prev_offload == MACSEC_OFFLOAD_OFF)
2635 func = ops->mdo_add_secy;
2636 else
2637 func = ops->mdo_del_secy;
2638
2639 ctx.secy = &macsec->secy;
2640 ret = macsec_offload(func, &ctx);
2641 if (ret)
2642 goto rollback;
2643
c850240b
MS
2644 /* Force features update, since they are different for SW MACSec and
2645 * HW offloading cases.
2646 */
2647 netdev_update_features(dev);
29ca3cdf
AT
2648
2649 rtnl_unlock();
dcb780fb
AT
2650 return 0;
2651
2652rollback:
2653 macsec->offload = prev_offload;
2654
2655 rtnl_unlock();
2656 return ret;
2657}
2658
b62c3624
DB
2659static void get_tx_sa_stats(struct net_device *dev, int an,
2660 struct macsec_tx_sa *tx_sa,
2661 struct macsec_tx_sa_stats *sum)
c09440f7 2662{
b62c3624 2663 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
2664 int cpu;
2665
b62c3624
DB
2666 /* If h/w offloading is available, propagate to the device */
2667 if (macsec_is_offloaded(macsec)) {
2668 const struct macsec_ops *ops;
2669 struct macsec_context ctx;
2670
2671 ops = macsec_get_ops(macsec, &ctx);
2672 if (ops) {
2673 ctx.sa.assoc_num = an;
2674 ctx.sa.tx_sa = tx_sa;
2675 ctx.stats.tx_sa_stats = sum;
2676 ctx.secy = &macsec_priv(dev)->secy;
2677 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2678 }
2679 return;
2680 }
2681
c09440f7 2682 for_each_possible_cpu(cpu) {
b62c3624
DB
2683 const struct macsec_tx_sa_stats *stats =
2684 per_cpu_ptr(tx_sa->stats, cpu);
c09440f7 2685
b62c3624
DB
2686 sum->OutPktsProtected += stats->OutPktsProtected;
2687 sum->OutPktsEncrypted += stats->OutPktsEncrypted;
c09440f7 2688 }
b62c3624 2689}
c09440f7 2690
b62c3624
DB
2691static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2692{
2693 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2694 sum->OutPktsProtected) ||
2695 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2696 sum->OutPktsEncrypted))
c09440f7
SD
2697 return -EMSGSIZE;
2698
2699 return 0;
2700}
2701
b62c3624
DB
2702static void get_rx_sa_stats(struct net_device *dev,
2703 struct macsec_rx_sc *rx_sc, int an,
2704 struct macsec_rx_sa *rx_sa,
2705 struct macsec_rx_sa_stats *sum)
c09440f7 2706{
b62c3624 2707 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
2708 int cpu;
2709
b62c3624
DB
2710 /* If h/w offloading is available, propagate to the device */
2711 if (macsec_is_offloaded(macsec)) {
2712 const struct macsec_ops *ops;
2713 struct macsec_context ctx;
2714
2715 ops = macsec_get_ops(macsec, &ctx);
2716 if (ops) {
2717 ctx.sa.assoc_num = an;
2718 ctx.sa.rx_sa = rx_sa;
2719 ctx.stats.rx_sa_stats = sum;
2720 ctx.secy = &macsec_priv(dev)->secy;
2721 ctx.rx_sc = rx_sc;
2722 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2723 }
2724 return;
2725 }
2726
c09440f7 2727 for_each_possible_cpu(cpu) {
b62c3624
DB
2728 const struct macsec_rx_sa_stats *stats =
2729 per_cpu_ptr(rx_sa->stats, cpu);
c09440f7 2730
b62c3624
DB
2731 sum->InPktsOK += stats->InPktsOK;
2732 sum->InPktsInvalid += stats->InPktsInvalid;
2733 sum->InPktsNotValid += stats->InPktsNotValid;
2734 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2735 sum->InPktsUnusedSA += stats->InPktsUnusedSA;
c09440f7 2736 }
b62c3624 2737}
c09440f7 2738
b62c3624
DB
2739static int copy_rx_sa_stats(struct sk_buff *skb,
2740 struct macsec_rx_sa_stats *sum)
2741{
2742 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2743 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2744 sum->InPktsInvalid) ||
2745 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2746 sum->InPktsNotValid) ||
2747 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2748 sum->InPktsNotUsingSA) ||
2749 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2750 sum->InPktsUnusedSA))
c09440f7
SD
2751 return -EMSGSIZE;
2752
2753 return 0;
2754}
2755
b62c3624
DB
2756static void get_rx_sc_stats(struct net_device *dev,
2757 struct macsec_rx_sc *rx_sc,
2758 struct macsec_rx_sc_stats *sum)
c09440f7 2759{
b62c3624 2760 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
2761 int cpu;
2762
b62c3624
DB
2763 /* If h/w offloading is available, propagate to the device */
2764 if (macsec_is_offloaded(macsec)) {
2765 const struct macsec_ops *ops;
2766 struct macsec_context ctx;
2767
2768 ops = macsec_get_ops(macsec, &ctx);
2769 if (ops) {
2770 ctx.stats.rx_sc_stats = sum;
2771 ctx.secy = &macsec_priv(dev)->secy;
2772 ctx.rx_sc = rx_sc;
2773 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2774 }
2775 return;
2776 }
2777
c09440f7
SD
2778 for_each_possible_cpu(cpu) {
2779 const struct pcpu_rx_sc_stats *stats;
2780 struct macsec_rx_sc_stats tmp;
2781 unsigned int start;
2782
b62c3624 2783 stats = per_cpu_ptr(rx_sc->stats, cpu);
c09440f7
SD
2784 do {
2785 start = u64_stats_fetch_begin_irq(&stats->syncp);
2786 memcpy(&tmp, &stats->stats, sizeof(tmp));
2787 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2788
b62c3624
DB
2789 sum->InOctetsValidated += tmp.InOctetsValidated;
2790 sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2791 sum->InPktsUnchecked += tmp.InPktsUnchecked;
2792 sum->InPktsDelayed += tmp.InPktsDelayed;
2793 sum->InPktsOK += tmp.InPktsOK;
2794 sum->InPktsInvalid += tmp.InPktsInvalid;
2795 sum->InPktsLate += tmp.InPktsLate;
2796 sum->InPktsNotValid += tmp.InPktsNotValid;
2797 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
2798 sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
c09440f7 2799 }
b62c3624 2800}
c09440f7 2801
b62c3624
DB
2802static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2803{
f60d94c0 2804 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
b62c3624 2805 sum->InOctetsValidated,
f60d94c0
ND
2806 MACSEC_RXSC_STATS_ATTR_PAD) ||
2807 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
b62c3624 2808 sum->InOctetsDecrypted,
f60d94c0
ND
2809 MACSEC_RXSC_STATS_ATTR_PAD) ||
2810 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
b62c3624 2811 sum->InPktsUnchecked,
f60d94c0
ND
2812 MACSEC_RXSC_STATS_ATTR_PAD) ||
2813 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
b62c3624 2814 sum->InPktsDelayed,
f60d94c0
ND
2815 MACSEC_RXSC_STATS_ATTR_PAD) ||
2816 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
b62c3624 2817 sum->InPktsOK,
f60d94c0
ND
2818 MACSEC_RXSC_STATS_ATTR_PAD) ||
2819 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
b62c3624 2820 sum->InPktsInvalid,
f60d94c0
ND
2821 MACSEC_RXSC_STATS_ATTR_PAD) ||
2822 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
b62c3624 2823 sum->InPktsLate,
f60d94c0
ND
2824 MACSEC_RXSC_STATS_ATTR_PAD) ||
2825 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
b62c3624 2826 sum->InPktsNotValid,
f60d94c0
ND
2827 MACSEC_RXSC_STATS_ATTR_PAD) ||
2828 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
b62c3624 2829 sum->InPktsNotUsingSA,
f60d94c0
ND
2830 MACSEC_RXSC_STATS_ATTR_PAD) ||
2831 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
b62c3624 2832 sum->InPktsUnusedSA,
f60d94c0 2833 MACSEC_RXSC_STATS_ATTR_PAD))
c09440f7
SD
2834 return -EMSGSIZE;
2835
2836 return 0;
2837}
2838
b62c3624
DB
2839static void get_tx_sc_stats(struct net_device *dev,
2840 struct macsec_tx_sc_stats *sum)
c09440f7 2841{
b62c3624 2842 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
2843 int cpu;
2844
b62c3624
DB
2845 /* If h/w offloading is available, propagate to the device */
2846 if (macsec_is_offloaded(macsec)) {
2847 const struct macsec_ops *ops;
2848 struct macsec_context ctx;
2849
2850 ops = macsec_get_ops(macsec, &ctx);
2851 if (ops) {
2852 ctx.stats.tx_sc_stats = sum;
2853 ctx.secy = &macsec_priv(dev)->secy;
2854 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2855 }
2856 return;
2857 }
2858
c09440f7
SD
2859 for_each_possible_cpu(cpu) {
2860 const struct pcpu_tx_sc_stats *stats;
2861 struct macsec_tx_sc_stats tmp;
2862 unsigned int start;
2863
b62c3624 2864 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
c09440f7
SD
2865 do {
2866 start = u64_stats_fetch_begin_irq(&stats->syncp);
2867 memcpy(&tmp, &stats->stats, sizeof(tmp));
2868 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2869
b62c3624
DB
2870 sum->OutPktsProtected += tmp.OutPktsProtected;
2871 sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
2872 sum->OutOctetsProtected += tmp.OutOctetsProtected;
2873 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
c09440f7 2874 }
b62c3624 2875}
c09440f7 2876
b62c3624
DB
2877static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2878{
f60d94c0 2879 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
b62c3624 2880 sum->OutPktsProtected,
f60d94c0
ND
2881 MACSEC_TXSC_STATS_ATTR_PAD) ||
2882 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
b62c3624 2883 sum->OutPktsEncrypted,
f60d94c0
ND
2884 MACSEC_TXSC_STATS_ATTR_PAD) ||
2885 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
b62c3624 2886 sum->OutOctetsProtected,
f60d94c0
ND
2887 MACSEC_TXSC_STATS_ATTR_PAD) ||
2888 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
b62c3624 2889 sum->OutOctetsEncrypted,
f60d94c0 2890 MACSEC_TXSC_STATS_ATTR_PAD))
c09440f7
SD
2891 return -EMSGSIZE;
2892
2893 return 0;
2894}
2895
b62c3624 2896static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
c09440f7 2897{
b62c3624 2898 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
2899 int cpu;
2900
b62c3624
DB
2901 /* If h/w offloading is available, propagate to the device */
2902 if (macsec_is_offloaded(macsec)) {
2903 const struct macsec_ops *ops;
2904 struct macsec_context ctx;
2905
2906 ops = macsec_get_ops(macsec, &ctx);
2907 if (ops) {
2908 ctx.stats.dev_stats = sum;
2909 ctx.secy = &macsec_priv(dev)->secy;
2910 macsec_offload(ops->mdo_get_dev_stats, &ctx);
2911 }
2912 return;
2913 }
2914
c09440f7
SD
2915 for_each_possible_cpu(cpu) {
2916 const struct pcpu_secy_stats *stats;
2917 struct macsec_dev_stats tmp;
2918 unsigned int start;
2919
b62c3624 2920 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
c09440f7
SD
2921 do {
2922 start = u64_stats_fetch_begin_irq(&stats->syncp);
2923 memcpy(&tmp, &stats->stats, sizeof(tmp));
2924 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2925
b62c3624
DB
2926 sum->OutPktsUntagged += tmp.OutPktsUntagged;
2927 sum->InPktsUntagged += tmp.InPktsUntagged;
2928 sum->OutPktsTooLong += tmp.OutPktsTooLong;
2929 sum->InPktsNoTag += tmp.InPktsNoTag;
2930 sum->InPktsBadTag += tmp.InPktsBadTag;
2931 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2932 sum->InPktsNoSCI += tmp.InPktsNoSCI;
2933 sum->InPktsOverrun += tmp.InPktsOverrun;
c09440f7 2934 }
b62c3624 2935}
c09440f7 2936
b62c3624
DB
2937static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
2938{
f60d94c0 2939 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
b62c3624 2940 sum->OutPktsUntagged,
f60d94c0
ND
2941 MACSEC_SECY_STATS_ATTR_PAD) ||
2942 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
b62c3624 2943 sum->InPktsUntagged,
f60d94c0
ND
2944 MACSEC_SECY_STATS_ATTR_PAD) ||
2945 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
b62c3624 2946 sum->OutPktsTooLong,
f60d94c0
ND
2947 MACSEC_SECY_STATS_ATTR_PAD) ||
2948 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
b62c3624 2949 sum->InPktsNoTag,
f60d94c0
ND
2950 MACSEC_SECY_STATS_ATTR_PAD) ||
2951 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
b62c3624 2952 sum->InPktsBadTag,
f60d94c0
ND
2953 MACSEC_SECY_STATS_ATTR_PAD) ||
2954 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
b62c3624 2955 sum->InPktsUnknownSCI,
f60d94c0
ND
2956 MACSEC_SECY_STATS_ATTR_PAD) ||
2957 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
b62c3624 2958 sum->InPktsNoSCI,
f60d94c0
ND
2959 MACSEC_SECY_STATS_ATTR_PAD) ||
2960 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
b62c3624 2961 sum->InPktsOverrun,
f60d94c0 2962 MACSEC_SECY_STATS_ATTR_PAD))
c09440f7
SD
2963 return -EMSGSIZE;
2964
2965 return 0;
2966}
2967
2968static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2969{
2970 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
ae0be8de
MK
2971 struct nlattr *secy_nest = nla_nest_start_noflag(skb,
2972 MACSEC_ATTR_SECY);
ccfdec90 2973 u64 csid;
c09440f7
SD
2974
2975 if (!secy_nest)
2976 return 1;
2977
ccfdec90
FW
2978 switch (secy->key_len) {
2979 case MACSEC_GCM_AES_128_SAK_LEN:
48ef50fa 2980 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
ccfdec90
FW
2981 break;
2982 case MACSEC_GCM_AES_256_SAK_LEN:
48ef50fa 2983 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
ccfdec90
FW
2984 break;
2985 default:
2986 goto cancel;
2987 }
2988
f60d94c0
ND
2989 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
2990 MACSEC_SECY_ATTR_PAD) ||
2991 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
ccfdec90 2992 csid, MACSEC_SECY_ATTR_PAD) ||
c09440f7
SD
2993 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2994 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2995 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
2996 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
2997 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
2998 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
2999 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3000 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3001 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3002 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3003 goto cancel;
3004
3005 if (secy->replay_protect) {
3006 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3007 goto cancel;
3008 }
3009
3010 nla_nest_end(skb, secy_nest);
3011 return 0;
3012
3013cancel:
3014 nla_nest_cancel(skb, secy_nest);
3015 return 1;
3016}
3017
e1427237
FW
3018static noinline_for_stack int
3019dump_secy(struct macsec_secy *secy, struct net_device *dev,
3020 struct sk_buff *skb, struct netlink_callback *cb)
c09440f7 3021{
b62c3624
DB
3022 struct macsec_tx_sc_stats tx_sc_stats = {0, };
3023 struct macsec_tx_sa_stats tx_sa_stats = {0, };
3024 struct macsec_rx_sc_stats rx_sc_stats = {0, };
3025 struct macsec_rx_sa_stats rx_sa_stats = {0, };
dcb780fb 3026 struct macsec_dev *macsec = netdev_priv(dev);
b62c3624 3027 struct macsec_dev_stats dev_stats = {0, };
c09440f7
SD
3028 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3029 struct nlattr *txsa_list, *rxsc_list;
dcb780fb 3030 struct macsec_rx_sc *rx_sc;
c09440f7 3031 struct nlattr *attr;
dcb780fb
AT
3032 void *hdr;
3033 int i, j;
c09440f7
SD
3034
3035 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3036 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3037 if (!hdr)
3038 return -EMSGSIZE;
3039
0a833c29 3040 genl_dump_check_consistent(cb, hdr);
c09440f7
SD
3041
3042 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3043 goto nla_put_failure;
3044
dcb780fb
AT
3045 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3046 if (!attr)
3047 goto nla_put_failure;
3048 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3049 goto nla_put_failure;
3050 nla_nest_end(skb, attr);
3051
c09440f7
SD
3052 if (nla_put_secy(secy, skb))
3053 goto nla_put_failure;
3054
ae0be8de 3055 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
c09440f7
SD
3056 if (!attr)
3057 goto nla_put_failure;
b62c3624
DB
3058
3059 get_tx_sc_stats(dev, &tx_sc_stats);
3060 if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
c09440f7
SD
3061 nla_nest_cancel(skb, attr);
3062 goto nla_put_failure;
3063 }
3064 nla_nest_end(skb, attr);
3065
ae0be8de 3066 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
c09440f7
SD
3067 if (!attr)
3068 goto nla_put_failure;
b62c3624
DB
3069 get_secy_stats(dev, &dev_stats);
3070 if (copy_secy_stats(skb, &dev_stats)) {
c09440f7
SD
3071 nla_nest_cancel(skb, attr);
3072 goto nla_put_failure;
3073 }
3074 nla_nest_end(skb, attr);
3075
ae0be8de 3076 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
c09440f7
SD
3077 if (!txsa_list)
3078 goto nla_put_failure;
3079 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3080 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3081 struct nlattr *txsa_nest;
48ef50fa
EM
3082 u64 pn;
3083 int pn_len;
c09440f7
SD
3084
3085 if (!tx_sa)
3086 continue;
3087
ae0be8de 3088 txsa_nest = nla_nest_start_noflag(skb, j++);
c09440f7
SD
3089 if (!txsa_nest) {
3090 nla_nest_cancel(skb, txsa_list);
3091 goto nla_put_failure;
3092 }
3093
b62c3624
DB
3094 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3095 if (!attr) {
3096 nla_nest_cancel(skb, txsa_nest);
3097 nla_nest_cancel(skb, txsa_list);
3098 goto nla_put_failure;
3099 }
3100 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3101 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3102 if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3103 nla_nest_cancel(skb, attr);
3104 nla_nest_cancel(skb, txsa_nest);
3105 nla_nest_cancel(skb, txsa_list);
3106 goto nla_put_failure;
3107 }
3108 nla_nest_end(skb, attr);
3109
48ef50fa
EM
3110 if (secy->xpn) {
3111 pn = tx_sa->next_pn;
3112 pn_len = MACSEC_XPN_PN_LEN;
3113 } else {
3114 pn = tx_sa->next_pn_halves.lower;
3115 pn_len = MACSEC_DEFAULT_PN_LEN;
3116 }
3117
c09440f7 3118 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
48ef50fa 3119 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
8acca6ac 3120 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
48ef50fa 3121 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
c09440f7
SD
3122 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3123 nla_nest_cancel(skb, txsa_nest);
3124 nla_nest_cancel(skb, txsa_list);
3125 goto nla_put_failure;
3126 }
3127
c09440f7
SD
3128 nla_nest_end(skb, txsa_nest);
3129 }
3130 nla_nest_end(skb, txsa_list);
3131
ae0be8de 3132 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
c09440f7
SD
3133 if (!rxsc_list)
3134 goto nla_put_failure;
3135
3136 j = 1;
3137 for_each_rxsc_rtnl(secy, rx_sc) {
3138 int k;
3139 struct nlattr *rxsa_list;
ae0be8de 3140 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
c09440f7
SD
3141
3142 if (!rxsc_nest) {
3143 nla_nest_cancel(skb, rxsc_list);
3144 goto nla_put_failure;
3145 }
3146
3147 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
f60d94c0
ND
3148 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3149 MACSEC_RXSC_ATTR_PAD)) {
c09440f7
SD
3150 nla_nest_cancel(skb, rxsc_nest);
3151 nla_nest_cancel(skb, rxsc_list);
3152 goto nla_put_failure;
3153 }
3154
ae0be8de 3155 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
c09440f7
SD
3156 if (!attr) {
3157 nla_nest_cancel(skb, rxsc_nest);
3158 nla_nest_cancel(skb, rxsc_list);
3159 goto nla_put_failure;
3160 }
b62c3624
DB
3161 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3162 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3163 if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
c09440f7
SD
3164 nla_nest_cancel(skb, attr);
3165 nla_nest_cancel(skb, rxsc_nest);
3166 nla_nest_cancel(skb, rxsc_list);
3167 goto nla_put_failure;
3168 }
3169 nla_nest_end(skb, attr);
3170
ae0be8de
MK
3171 rxsa_list = nla_nest_start_noflag(skb,
3172 MACSEC_RXSC_ATTR_SA_LIST);
c09440f7
SD
3173 if (!rxsa_list) {
3174 nla_nest_cancel(skb, rxsc_nest);
3175 nla_nest_cancel(skb, rxsc_list);
3176 goto nla_put_failure;
3177 }
3178
3179 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3180 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3181 struct nlattr *rxsa_nest;
48ef50fa
EM
3182 u64 pn;
3183 int pn_len;
c09440f7
SD
3184
3185 if (!rx_sa)
3186 continue;
3187
ae0be8de 3188 rxsa_nest = nla_nest_start_noflag(skb, k++);
c09440f7
SD
3189 if (!rxsa_nest) {
3190 nla_nest_cancel(skb, rxsa_list);
3191 nla_nest_cancel(skb, rxsc_nest);
3192 nla_nest_cancel(skb, rxsc_list);
3193 goto nla_put_failure;
3194 }
3195
ae0be8de
MK
3196 attr = nla_nest_start_noflag(skb,
3197 MACSEC_SA_ATTR_STATS);
c09440f7
SD
3198 if (!attr) {
3199 nla_nest_cancel(skb, rxsa_list);
3200 nla_nest_cancel(skb, rxsc_nest);
3201 nla_nest_cancel(skb, rxsc_list);
3202 goto nla_put_failure;
3203 }
b62c3624
DB
3204 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3205 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3206 if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
c09440f7
SD
3207 nla_nest_cancel(skb, attr);
3208 nla_nest_cancel(skb, rxsa_list);
3209 nla_nest_cancel(skb, rxsc_nest);
3210 nla_nest_cancel(skb, rxsc_list);
3211 goto nla_put_failure;
3212 }
3213 nla_nest_end(skb, attr);
3214
48ef50fa
EM
3215 if (secy->xpn) {
3216 pn = rx_sa->next_pn;
3217 pn_len = MACSEC_XPN_PN_LEN;
3218 } else {
3219 pn = rx_sa->next_pn_halves.lower;
3220 pn_len = MACSEC_DEFAULT_PN_LEN;
3221 }
3222
c09440f7 3223 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
48ef50fa 3224 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
8acca6ac 3225 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
48ef50fa 3226 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
c09440f7
SD
3227 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3228 nla_nest_cancel(skb, rxsa_nest);
3229 nla_nest_cancel(skb, rxsc_nest);
3230 nla_nest_cancel(skb, rxsc_list);
3231 goto nla_put_failure;
3232 }
3233 nla_nest_end(skb, rxsa_nest);
3234 }
3235
3236 nla_nest_end(skb, rxsa_list);
3237 nla_nest_end(skb, rxsc_nest);
3238 }
3239
3240 nla_nest_end(skb, rxsc_list);
3241
c09440f7
SD
3242 genlmsg_end(skb, hdr);
3243
3244 return 0;
3245
3246nla_put_failure:
c09440f7
SD
3247 genlmsg_cancel(skb, hdr);
3248 return -EMSGSIZE;
3249}
3250
96cfc505
SD
3251static int macsec_generation = 1; /* protected by RTNL */
3252
c09440f7
SD
3253static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3254{
3255 struct net *net = sock_net(skb->sk);
3256 struct net_device *dev;
3257 int dev_idx, d;
3258
3259 dev_idx = cb->args[0];
3260
3261 d = 0;
c10c63ea 3262 rtnl_lock();
96cfc505
SD
3263
3264 cb->seq = macsec_generation;
3265
c09440f7
SD
3266 for_each_netdev(net, dev) {
3267 struct macsec_secy *secy;
3268
3269 if (d < dev_idx)
3270 goto next;
3271
3272 if (!netif_is_macsec(dev))
3273 goto next;
3274
3275 secy = &macsec_priv(dev)->secy;
3276 if (dump_secy(secy, dev, skb, cb) < 0)
3277 goto done;
3278next:
3279 d++;
3280 }
3281
3282done:
c10c63ea 3283 rtnl_unlock();
c09440f7
SD
3284 cb->args[0] = d;
3285 return skb->len;
3286}
3287
66a9b928 3288static const struct genl_small_ops macsec_genl_ops[] = {
c09440f7
SD
3289 {
3290 .cmd = MACSEC_CMD_GET_TXSC,
ef6243ac 3291 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3292 .dumpit = macsec_dump_txsc,
c09440f7
SD
3293 },
3294 {
3295 .cmd = MACSEC_CMD_ADD_RXSC,
ef6243ac 3296 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3297 .doit = macsec_add_rxsc,
c09440f7
SD
3298 .flags = GENL_ADMIN_PERM,
3299 },
3300 {
3301 .cmd = MACSEC_CMD_DEL_RXSC,
ef6243ac 3302 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3303 .doit = macsec_del_rxsc,
c09440f7
SD
3304 .flags = GENL_ADMIN_PERM,
3305 },
3306 {
3307 .cmd = MACSEC_CMD_UPD_RXSC,
ef6243ac 3308 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3309 .doit = macsec_upd_rxsc,
c09440f7
SD
3310 .flags = GENL_ADMIN_PERM,
3311 },
3312 {
3313 .cmd = MACSEC_CMD_ADD_TXSA,
ef6243ac 3314 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3315 .doit = macsec_add_txsa,
c09440f7
SD
3316 .flags = GENL_ADMIN_PERM,
3317 },
3318 {
3319 .cmd = MACSEC_CMD_DEL_TXSA,
ef6243ac 3320 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3321 .doit = macsec_del_txsa,
c09440f7
SD
3322 .flags = GENL_ADMIN_PERM,
3323 },
3324 {
3325 .cmd = MACSEC_CMD_UPD_TXSA,
ef6243ac 3326 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3327 .doit = macsec_upd_txsa,
c09440f7
SD
3328 .flags = GENL_ADMIN_PERM,
3329 },
3330 {
3331 .cmd = MACSEC_CMD_ADD_RXSA,
ef6243ac 3332 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3333 .doit = macsec_add_rxsa,
c09440f7
SD
3334 .flags = GENL_ADMIN_PERM,
3335 },
3336 {
3337 .cmd = MACSEC_CMD_DEL_RXSA,
ef6243ac 3338 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3339 .doit = macsec_del_rxsa,
c09440f7
SD
3340 .flags = GENL_ADMIN_PERM,
3341 },
3342 {
3343 .cmd = MACSEC_CMD_UPD_RXSA,
ef6243ac 3344 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
c09440f7 3345 .doit = macsec_upd_rxsa,
c09440f7
SD
3346 .flags = GENL_ADMIN_PERM,
3347 },
dcb780fb
AT
3348 {
3349 .cmd = MACSEC_CMD_UPD_OFFLOAD,
3350 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3351 .doit = macsec_upd_offload,
3352 .flags = GENL_ADMIN_PERM,
3353 },
c09440f7
SD
3354};
3355
56989f6d 3356static struct genl_family macsec_fam __ro_after_init = {
489111e5
JB
3357 .name = MACSEC_GENL_NAME,
3358 .hdrsize = 0,
3359 .version = MACSEC_GENL_VERSION,
3360 .maxattr = MACSEC_ATTR_MAX,
3b0f31f2 3361 .policy = macsec_genl_policy,
489111e5
JB
3362 .netnsok = true,
3363 .module = THIS_MODULE,
66a9b928
JK
3364 .small_ops = macsec_genl_ops,
3365 .n_small_ops = ARRAY_SIZE(macsec_genl_ops),
489111e5
JB
3366};
3367
c09440f7
SD
3368static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3369 struct net_device *dev)
3370{
3371 struct macsec_dev *macsec = netdev_priv(dev);
3372 struct macsec_secy *secy = &macsec->secy;
3373 struct pcpu_secy_stats *secy_stats;
3374 int ret, len;
3375
3cf3227a
AT
3376 if (macsec_is_offloaded(netdev_priv(dev))) {
3377 skb->dev = macsec->real_dev;
3378 return dev_queue_xmit(skb);
3379 }
3380
c09440f7
SD
3381 /* 10.5 */
3382 if (!secy->protect_frames) {
3383 secy_stats = this_cpu_ptr(macsec->stats);
3384 u64_stats_update_begin(&secy_stats->syncp);
3385 secy_stats->stats.OutPktsUntagged++;
3386 u64_stats_update_end(&secy_stats->syncp);
79c62220 3387 skb->dev = macsec->real_dev;
c09440f7
SD
3388 len = skb->len;
3389 ret = dev_queue_xmit(skb);
3390 count_tx(dev, ret, len);
3391 return ret;
3392 }
3393
3394 if (!secy->operational) {
3395 kfree_skb(skb);
3396 dev->stats.tx_dropped++;
3397 return NETDEV_TX_OK;
3398 }
3399
3400 skb = macsec_encrypt(skb, dev);
3401 if (IS_ERR(skb)) {
3402 if (PTR_ERR(skb) != -EINPROGRESS)
3403 dev->stats.tx_dropped++;
3404 return NETDEV_TX_OK;
3405 }
3406
3407 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3408
3409 macsec_encrypt_finish(skb, dev);
3410 len = skb->len;
3411 ret = dev_queue_xmit(skb);
3412 count_tx(dev, ret, len);
3413 return ret;
3414}
3415
c850240b 3416#define SW_MACSEC_FEATURES \
c09440f7 3417 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
e2003872 3418
c850240b
MS
3419/* If h/w offloading is enabled, use real device features save for
3420 * VLAN_FEATURES - they require additional ops
3421 * HW_MACSEC - no reason to report it
3422 */
3423#define REAL_DEV_FEATURES(dev) \
3424 ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
3425
c09440f7
SD
3426static int macsec_dev_init(struct net_device *dev)
3427{
3428 struct macsec_dev *macsec = macsec_priv(dev);
3429 struct net_device *real_dev = macsec->real_dev;
5491e7c6 3430 int err;
c09440f7
SD
3431
3432 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3433 if (!dev->tstats)
3434 return -ENOMEM;
3435
5491e7c6
PA
3436 err = gro_cells_init(&macsec->gro_cells, dev);
3437 if (err) {
3438 free_percpu(dev->tstats);
3439 return err;
3440 }
3441
c850240b
MS
3442 if (macsec_is_offloaded(macsec)) {
3443 dev->features = REAL_DEV_FEATURES(real_dev);
3444 } else {
3445 dev->features = real_dev->features & SW_MACSEC_FEATURES;
3446 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
3447 }
c09440f7
SD
3448
3449 dev->needed_headroom = real_dev->needed_headroom +
3450 MACSEC_NEEDED_HEADROOM;
3451 dev->needed_tailroom = real_dev->needed_tailroom +
3452 MACSEC_NEEDED_TAILROOM;
3453
3454 if (is_zero_ether_addr(dev->dev_addr))
3455 eth_hw_addr_inherit(dev, real_dev);
3456 if (is_zero_ether_addr(dev->broadcast))
3457 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3458
3459 return 0;
3460}
3461
3462static void macsec_dev_uninit(struct net_device *dev)
3463{
5491e7c6
PA
3464 struct macsec_dev *macsec = macsec_priv(dev);
3465
3466 gro_cells_destroy(&macsec->gro_cells);
c09440f7
SD
3467 free_percpu(dev->tstats);
3468}
3469
3470static netdev_features_t macsec_fix_features(struct net_device *dev,
3471 netdev_features_t features)
3472{
3473 struct macsec_dev *macsec = macsec_priv(dev);
3474 struct net_device *real_dev = macsec->real_dev;
3475
c850240b
MS
3476 if (macsec_is_offloaded(macsec))
3477 return REAL_DEV_FEATURES(real_dev);
3478
3479 features &= (real_dev->features & SW_MACSEC_FEATURES) |
5491e7c6
PA
3480 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
3481 features |= NETIF_F_LLTX;
c09440f7
SD
3482
3483 return features;
3484}
3485
3486static int macsec_dev_open(struct net_device *dev)
3487{
3488 struct macsec_dev *macsec = macsec_priv(dev);
3489 struct net_device *real_dev = macsec->real_dev;
3490 int err;
3491
c09440f7
SD
3492 err = dev_uc_add(real_dev, dev->dev_addr);
3493 if (err < 0)
3494 return err;
3495
3496 if (dev->flags & IFF_ALLMULTI) {
3497 err = dev_set_allmulti(real_dev, 1);
3498 if (err < 0)
3499 goto del_unicast;
3500 }
3501
3502 if (dev->flags & IFF_PROMISC) {
3503 err = dev_set_promiscuity(real_dev, 1);
3504 if (err < 0)
3505 goto clear_allmulti;
3506 }
3507
3cf3227a
AT
3508 /* If h/w offloading is available, propagate to the device */
3509 if (macsec_is_offloaded(macsec)) {
3510 const struct macsec_ops *ops;
3511 struct macsec_context ctx;
3512
3513 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3514 if (!ops) {
3515 err = -EOPNOTSUPP;
3516 goto clear_allmulti;
3517 }
3518
182879f8 3519 ctx.secy = &macsec->secy;
3cf3227a
AT
3520 err = macsec_offload(ops->mdo_dev_open, &ctx);
3521 if (err)
3522 goto clear_allmulti;
3523 }
3524
c09440f7
SD
3525 if (netif_carrier_ok(real_dev))
3526 netif_carrier_on(dev);
3527
3528 return 0;
3529clear_allmulti:
3530 if (dev->flags & IFF_ALLMULTI)
3531 dev_set_allmulti(real_dev, -1);
3532del_unicast:
3533 dev_uc_del(real_dev, dev->dev_addr);
3534 netif_carrier_off(dev);
3535 return err;
3536}
3537
3538static int macsec_dev_stop(struct net_device *dev)
3539{
3540 struct macsec_dev *macsec = macsec_priv(dev);
3541 struct net_device *real_dev = macsec->real_dev;
3542
3543 netif_carrier_off(dev);
3544
3cf3227a
AT
3545 /* If h/w offloading is available, propagate to the device */
3546 if (macsec_is_offloaded(macsec)) {
3547 const struct macsec_ops *ops;
3548 struct macsec_context ctx;
3549
3550 ops = macsec_get_ops(macsec, &ctx);
182879f8
DB
3551 if (ops) {
3552 ctx.secy = &macsec->secy;
3cf3227a 3553 macsec_offload(ops->mdo_dev_stop, &ctx);
182879f8 3554 }
3cf3227a
AT
3555 }
3556
c09440f7
SD
3557 dev_mc_unsync(real_dev, dev);
3558 dev_uc_unsync(real_dev, dev);
3559
3560 if (dev->flags & IFF_ALLMULTI)
3561 dev_set_allmulti(real_dev, -1);
3562
3563 if (dev->flags & IFF_PROMISC)
3564 dev_set_promiscuity(real_dev, -1);
3565
3566 dev_uc_del(real_dev, dev->dev_addr);
3567
3568 return 0;
3569}
3570
3571static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3572{
3573 struct net_device *real_dev = macsec_priv(dev)->real_dev;
3574
3575 if (!(dev->flags & IFF_UP))
3576 return;
3577
3578 if (change & IFF_ALLMULTI)
3579 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3580
3581 if (change & IFF_PROMISC)
3582 dev_set_promiscuity(real_dev,
3583 dev->flags & IFF_PROMISC ? 1 : -1);
3584}
3585
3586static void macsec_dev_set_rx_mode(struct net_device *dev)
3587{
3588 struct net_device *real_dev = macsec_priv(dev)->real_dev;
3589
3590 dev_mc_sync(real_dev, dev);
3591 dev_uc_sync(real_dev, dev);
3592}
3593
3594static int macsec_set_mac_address(struct net_device *dev, void *p)
3595{
3596 struct macsec_dev *macsec = macsec_priv(dev);
3597 struct net_device *real_dev = macsec->real_dev;
3598 struct sockaddr *addr = p;
3599 int err;
3600
3601 if (!is_valid_ether_addr(addr->sa_data))
3602 return -EADDRNOTAVAIL;
3603
3604 if (!(dev->flags & IFF_UP))
3605 goto out;
3606
3607 err = dev_uc_add(real_dev, addr->sa_data);
3608 if (err < 0)
3609 return err;
3610
3611 dev_uc_del(real_dev, dev->dev_addr);
3612
3613out:
c49555ee 3614 eth_hw_addr_set(dev, addr->sa_data);
09f4136c
DB
3615
3616 /* If h/w offloading is available, propagate to the device */
3617 if (macsec_is_offloaded(macsec)) {
3618 const struct macsec_ops *ops;
3619 struct macsec_context ctx;
3620
3621 ops = macsec_get_ops(macsec, &ctx);
3622 if (ops) {
3623 ctx.secy = &macsec->secy;
3624 macsec_offload(ops->mdo_upd_secy, &ctx);
3625 }
3626 }
3627
c09440f7
SD
3628 return 0;
3629}
3630
3631static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3632{
3633 struct macsec_dev *macsec = macsec_priv(dev);
3634 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3635
3636 if (macsec->real_dev->mtu - extra < new_mtu)
3637 return -ERANGE;
3638
3639 dev->mtu = new_mtu;
3640
3641 return 0;
3642}
3643
bc1f4470 3644static void macsec_get_stats64(struct net_device *dev,
3645 struct rtnl_link_stats64 *s)
c09440f7 3646{
c09440f7 3647 if (!dev->tstats)
bc1f4470 3648 return;
c09440f7 3649
9d015167 3650 dev_fetch_sw_netstats(s, dev->tstats);
c09440f7
SD
3651
3652 s->rx_dropped = dev->stats.rx_dropped;
3653 s->tx_dropped = dev->stats.tx_dropped;
c09440f7
SD
3654}
3655
3656static int macsec_get_iflink(const struct net_device *dev)
3657{
3658 return macsec_priv(dev)->real_dev->ifindex;
3659}
3660
3661static const struct net_device_ops macsec_netdev_ops = {
3662 .ndo_init = macsec_dev_init,
3663 .ndo_uninit = macsec_dev_uninit,
3664 .ndo_open = macsec_dev_open,
3665 .ndo_stop = macsec_dev_stop,
3666 .ndo_fix_features = macsec_fix_features,
3667 .ndo_change_mtu = macsec_change_mtu,
3668 .ndo_set_rx_mode = macsec_dev_set_rx_mode,
3669 .ndo_change_rx_flags = macsec_dev_change_rx_flags,
3670 .ndo_set_mac_address = macsec_set_mac_address,
3671 .ndo_start_xmit = macsec_start_xmit,
3672 .ndo_get_stats64 = macsec_get_stats64,
3673 .ndo_get_iflink = macsec_get_iflink,
3674};
3675
3676static const struct device_type macsec_type = {
3677 .name = "macsec",
3678};
3679
3680static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3681 [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
31d9a1c5 3682 [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
c09440f7
SD
3683 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3684 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3685 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3686 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3687 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3688 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3689 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3690 [IFLA_MACSEC_ES] = { .type = NLA_U8 },
3691 [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3692 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3693 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3694};
3695
3696static void macsec_free_netdev(struct net_device *dev)
3697{
3698 struct macsec_dev *macsec = macsec_priv(dev);
c09440f7
SD
3699
3700 free_percpu(macsec->stats);
3701 free_percpu(macsec->secy.tx_sc.stats);
3702
c09440f7
SD
3703}
3704
3705static void macsec_setup(struct net_device *dev)
3706{
3707 ether_setup(dev);
91572088
JW
3708 dev->min_mtu = 0;
3709 dev->max_mtu = ETH_MAX_MTU;
e425974f 3710 dev->priv_flags |= IFF_NO_QUEUE;
c09440f7 3711 dev->netdev_ops = &macsec_netdev_ops;
cf124db5
DM
3712 dev->needs_free_netdev = true;
3713 dev->priv_destructor = macsec_free_netdev;
c24acf03 3714 SET_NETDEV_DEVTYPE(dev, &macsec_type);
c09440f7
SD
3715
3716 eth_zero_addr(dev->broadcast);
3717}
3718
ccfdec90
FW
3719static int macsec_changelink_common(struct net_device *dev,
3720 struct nlattr *data[])
c09440f7
SD
3721{
3722 struct macsec_secy *secy;
3723 struct macsec_tx_sc *tx_sc;
3724
3725 secy = &macsec_priv(dev)->secy;
3726 tx_sc = &secy->tx_sc;
3727
3728 if (data[IFLA_MACSEC_ENCODING_SA]) {
3729 struct macsec_tx_sa *tx_sa;
3730
3731 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3732 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3733
3734 secy->operational = tx_sa && tx_sa->active;
3735 }
3736
c09440f7
SD
3737 if (data[IFLA_MACSEC_ENCRYPT])
3738 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3739
3740 if (data[IFLA_MACSEC_PROTECT])
3741 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3742
3743 if (data[IFLA_MACSEC_INC_SCI])
3744 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3745
3746 if (data[IFLA_MACSEC_ES])
3747 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3748
3749 if (data[IFLA_MACSEC_SCB])
3750 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3751
3752 if (data[IFLA_MACSEC_REPLAY_PROTECT])
3753 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3754
3755 if (data[IFLA_MACSEC_VALIDATION])
3756 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
ccfdec90
FW
3757
3758 if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3759 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3760 case MACSEC_CIPHER_ID_GCM_AES_128:
e8660ded 3761 case MACSEC_DEFAULT_CIPHER_ID:
ccfdec90 3762 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
48ef50fa 3763 secy->xpn = false;
ccfdec90
FW
3764 break;
3765 case MACSEC_CIPHER_ID_GCM_AES_256:
3766 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
48ef50fa
EM
3767 secy->xpn = false;
3768 break;
3769 case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3770 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3771 secy->xpn = true;
3772 break;
3773 case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3774 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3775 secy->xpn = true;
ccfdec90
FW
3776 break;
3777 default:
3778 return -EINVAL;
3779 }
3780 }
3781
0b52e10a
SD
3782 if (data[IFLA_MACSEC_WINDOW]) {
3783 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3784
3785 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
3786 * for XPN cipher suites */
3787 if (secy->xpn &&
3788 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
3789 return -EINVAL;
3790 }
3791
ccfdec90 3792 return 0;
c09440f7
SD
3793}
3794
3795static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
ad744b22
MS
3796 struct nlattr *data[],
3797 struct netlink_ext_ack *extack)
c09440f7 3798{
3cf3227a 3799 struct macsec_dev *macsec = macsec_priv(dev);
022e9d60 3800 struct macsec_tx_sc tx_sc;
3cf3227a
AT
3801 struct macsec_secy secy;
3802 int ret;
3803
c09440f7
SD
3804 if (!data)
3805 return 0;
3806
3807 if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3808 data[IFLA_MACSEC_ICV_LEN] ||
3809 data[IFLA_MACSEC_SCI] ||
3810 data[IFLA_MACSEC_PORT])
3811 return -EINVAL;
3812
3cf3227a
AT
3813 /* Keep a copy of unmodified secy and tx_sc, in case the offload
3814 * propagation fails, to revert macsec_changelink_common.
3815 */
3816 memcpy(&secy, &macsec->secy, sizeof(secy));
3817 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3818
3819 ret = macsec_changelink_common(dev, data);
3820 if (ret)
0b52e10a 3821 goto cleanup;
3cf3227a
AT
3822
3823 /* If h/w offloading is available, propagate to the device */
3824 if (macsec_is_offloaded(macsec)) {
3825 const struct macsec_ops *ops;
3826 struct macsec_context ctx;
3827 int ret;
3828
3829 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3830 if (!ops) {
3831 ret = -EOPNOTSUPP;
3832 goto cleanup;
3833 }
3834
3835 ctx.secy = &macsec->secy;
3836 ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3837 if (ret)
3838 goto cleanup;
3839 }
3840
3841 return 0;
3842
3843cleanup:
3844 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3845 memcpy(&macsec->secy, &secy, sizeof(secy));
3846
3847 return ret;
c09440f7
SD
3848}
3849
3850static void macsec_del_dev(struct macsec_dev *macsec)
3851{
3852 int i;
3853
3854 while (macsec->secy.rx_sc) {
3855 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3856
3857 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3858 free_rx_sc(rx_sc);
3859 }
3860
3861 for (i = 0; i < MACSEC_NUM_AN; i++) {
3862 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3863
3864 if (sa) {
3865 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3866 clear_tx_sa(sa);
3867 }
3868 }
3869}
3870
bbe11fab
SD
3871static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3872{
3873 struct macsec_dev *macsec = macsec_priv(dev);
e2003872 3874 struct net_device *real_dev = macsec->real_dev;
bbe11fab 3875
19ee9054
LN
3876 /* If h/w offloading is available, propagate to the device */
3877 if (macsec_is_offloaded(macsec)) {
3878 const struct macsec_ops *ops;
3879 struct macsec_context ctx;
3880
3881 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3882 if (ops) {
3883 ctx.secy = &macsec->secy;
3884 macsec_offload(ops->mdo_del_secy, &ctx);
3885 }
3886 }
3887
bbe11fab
SD
3888 unregister_netdevice_queue(dev, head);
3889 list_del_rcu(&macsec->secys);
3890 macsec_del_dev(macsec);
e2003872 3891 netdev_upper_dev_unlink(real_dev, dev);
bbe11fab
SD
3892
3893 macsec_generation++;
3894}
3895
c09440f7
SD
3896static void macsec_dellink(struct net_device *dev, struct list_head *head)
3897{
3898 struct macsec_dev *macsec = macsec_priv(dev);
3899 struct net_device *real_dev = macsec->real_dev;
3900 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3901
bbe11fab 3902 macsec_common_dellink(dev, head);
96cfc505 3903
960d5848 3904 if (list_empty(&rxd->secys)) {
c09440f7 3905 netdev_rx_handler_unregister(real_dev);
960d5848
SD
3906 kfree(rxd);
3907 }
c09440f7
SD
3908}
3909
3910static int register_macsec_dev(struct net_device *real_dev,
3911 struct net_device *dev)
3912{
3913 struct macsec_dev *macsec = macsec_priv(dev);
3914 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3915
3916 if (!rxd) {
3917 int err;
3918
3919 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3920 if (!rxd)
3921 return -ENOMEM;
3922
3923 INIT_LIST_HEAD(&rxd->secys);
3924
3925 err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3926 rxd);
960d5848
SD
3927 if (err < 0) {
3928 kfree(rxd);
c09440f7 3929 return err;
960d5848 3930 }
c09440f7
SD
3931 }
3932
3933 list_add_tail_rcu(&macsec->secys, &rxd->secys);
3934 return 0;
3935}
3936
3937static bool sci_exists(struct net_device *dev, sci_t sci)
3938{
3939 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3940 struct macsec_dev *macsec;
3941
3942 list_for_each_entry(macsec, &rxd->secys, secys) {
3943 if (macsec->secy.sci == sci)
3944 return true;
3945 }
3946
3947 return false;
3948}
3949
034cc03b
SD
3950static sci_t dev_to_sci(struct net_device *dev, __be16 port)
3951{
3952 return make_sci(dev->dev_addr, port);
3953}
3954
c09440f7
SD
3955static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3956{
3957 struct macsec_dev *macsec = macsec_priv(dev);
3958 struct macsec_secy *secy = &macsec->secy;
3959
3960 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3961 if (!macsec->stats)
3962 return -ENOMEM;
3963
3964 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3965 if (!secy->tx_sc.stats) {
3966 free_percpu(macsec->stats);
3967 return -ENOMEM;
3968 }
3969
3970 if (sci == MACSEC_UNDEF_SCI)
3971 sci = dev_to_sci(dev, MACSEC_PORT_ES);
3972
3973 secy->netdev = dev;
3974 secy->operational = true;
3975 secy->key_len = DEFAULT_SAK_LEN;
3976 secy->icv_len = icv_len;
3977 secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
3978 secy->protect_frames = true;
3979 secy->replay_protect = false;
48ef50fa 3980 secy->xpn = DEFAULT_XPN;
c09440f7
SD
3981
3982 secy->sci = sci;
3983 secy->tx_sc.active = true;
3984 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
3985 secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
3986 secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
3987 secy->tx_sc.end_station = false;
3988 secy->tx_sc.scb = false;
3989
3990 return 0;
3991}
3992
845e0ebb
CW
3993static struct lock_class_key macsec_netdev_addr_lock_key;
3994
c09440f7 3995static int macsec_newlink(struct net *net, struct net_device *dev,
7a3f4a18
MS
3996 struct nlattr *tb[], struct nlattr *data[],
3997 struct netlink_ext_ack *extack)
c09440f7
SD
3998{
3999 struct macsec_dev *macsec = macsec_priv(dev);
7f327080
TY
4000 rx_handler_func_t *rx_handler;
4001 u8 icv_len = DEFAULT_ICV_LEN;
c09440f7 4002 struct net_device *real_dev;
7f327080 4003 int err, mtu;
c09440f7 4004 sci_t sci;
c09440f7
SD
4005
4006 if (!tb[IFLA_LINK])
4007 return -EINVAL;
4008 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
4009 if (!real_dev)
4010 return -ENODEV;
b06d072c
WB
4011 if (real_dev->type != ARPHRD_ETHER)
4012 return -EINVAL;
c09440f7
SD
4013
4014 dev->priv_flags |= IFF_MACSEC;
4015
4016 macsec->real_dev = real_dev;
4017
791bb3fc
MS
4018 if (data && data[IFLA_MACSEC_OFFLOAD])
4019 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4020 else
4021 /* MACsec offloading is off by default */
4022 macsec->offload = MACSEC_OFFLOAD_OFF;
4023
4024 /* Check if the offloading mode is supported by the underlying layers */
4025 if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4026 !macsec_check_offload(macsec->offload, macsec))
4027 return -EOPNOTSUPP;
3cf3227a 4028
e4f40642
LN
4029 /* send_sci must be set to true when transmit sci explicitly is set */
4030 if ((data && data[IFLA_MACSEC_SCI]) &&
4031 (data && data[IFLA_MACSEC_INC_SCI])) {
4032 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
4033
4034 if (!send_sci)
4035 return -EINVAL;
4036 }
4037
c09440f7
SD
4038 if (data && data[IFLA_MACSEC_ICV_LEN])
4039 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
7f327080
TY
4040 mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4041 if (mtu < 0)
4042 dev->mtu = 0;
4043 else
4044 dev->mtu = mtu;
c09440f7
SD
4045
4046 rx_handler = rtnl_dereference(real_dev->rx_handler);
4047 if (rx_handler && rx_handler != macsec_handle_frame)
4048 return -EBUSY;
4049
4050 err = register_netdevice(dev);
4051 if (err < 0)
4052 return err;
4053
1a33e10e 4054 netdev_lockdep_set_classes(dev);
be74294f
CW
4055 lockdep_set_class(&dev->addr_list_lock,
4056 &macsec_netdev_addr_lock_key);
1a33e10e 4057
42ab19ee 4058 err = netdev_upper_dev_link(real_dev, dev, extack);
e2003872 4059 if (err < 0)
bd28899d 4060 goto unregister;
e2003872 4061
c09440f7
SD
4062 /* need to be already registered so that ->init has run and
4063 * the MAC addr is set
4064 */
4065 if (data && data[IFLA_MACSEC_SCI])
4066 sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4067 else if (data && data[IFLA_MACSEC_PORT])
4068 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4069 else
4070 sci = dev_to_sci(dev, MACSEC_PORT_ES);
4071
4072 if (rx_handler && sci_exists(real_dev, sci)) {
4073 err = -EBUSY;
e2003872 4074 goto unlink;
c09440f7
SD
4075 }
4076
4077 err = macsec_add_dev(dev, sci, icv_len);
4078 if (err)
e2003872 4079 goto unlink;
c09440f7 4080
ccfdec90
FW
4081 if (data) {
4082 err = macsec_changelink_common(dev, data);
4083 if (err)
4084 goto del_dev;
4085 }
c09440f7 4086
791bb3fc
MS
4087 /* If h/w offloading is available, propagate to the device */
4088 if (macsec_is_offloaded(macsec)) {
4089 const struct macsec_ops *ops;
4090 struct macsec_context ctx;
4091
4092 ops = macsec_get_ops(macsec, &ctx);
4093 if (ops) {
4094 ctx.secy = &macsec->secy;
4095 err = macsec_offload(ops->mdo_add_secy, &ctx);
4096 if (err)
4097 goto del_dev;
4098 }
4099 }
4100
c09440f7
SD
4101 err = register_macsec_dev(real_dev, dev);
4102 if (err < 0)
4103 goto del_dev;
4104
e6ac0758
SD
4105 netif_stacked_transfer_operstate(real_dev, dev);
4106 linkwatch_fire_event(dev);
4107
96cfc505
SD
4108 macsec_generation++;
4109
c09440f7
SD
4110 return 0;
4111
4112del_dev:
4113 macsec_del_dev(macsec);
e2003872
SD
4114unlink:
4115 netdev_upper_dev_unlink(real_dev, dev);
bd28899d 4116unregister:
c09440f7
SD
4117 unregister_netdevice(dev);
4118 return err;
4119}
4120
a8b8a889
MS
4121static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4122 struct netlink_ext_ack *extack)
c09440f7 4123{
74816480 4124 u64 csid = MACSEC_DEFAULT_CIPHER_ID;
c09440f7
SD
4125 u8 icv_len = DEFAULT_ICV_LEN;
4126 int flag;
4127 bool es, scb, sci;
4128
4129 if (!data)
4130 return 0;
4131
4132 if (data[IFLA_MACSEC_CIPHER_SUITE])
4133 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4134
f04c392d 4135 if (data[IFLA_MACSEC_ICV_LEN]) {
c09440f7 4136 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
f04c392d
DC
4137 if (icv_len != DEFAULT_ICV_LEN) {
4138 char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4139 struct crypto_aead *dummy_tfm;
4140
4141 dummy_tfm = macsec_alloc_tfm(dummy_key,
4142 DEFAULT_SAK_LEN,
4143 icv_len);
4144 if (IS_ERR(dummy_tfm))
4145 return PTR_ERR(dummy_tfm);
4146 crypto_free_aead(dummy_tfm);
4147 }
4148 }
c09440f7
SD
4149
4150 switch (csid) {
ccfdec90
FW
4151 case MACSEC_CIPHER_ID_GCM_AES_128:
4152 case MACSEC_CIPHER_ID_GCM_AES_256:
48ef50fa
EM
4153 case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4154 case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
e8660ded 4155 case MACSEC_DEFAULT_CIPHER_ID:
c09440f7 4156 if (icv_len < MACSEC_MIN_ICV_LEN ||
2ccbe2cb 4157 icv_len > MACSEC_STD_ICV_LEN)
c09440f7
SD
4158 return -EINVAL;
4159 break;
4160 default:
4161 return -EINVAL;
4162 }
4163
4164 if (data[IFLA_MACSEC_ENCODING_SA]) {
4165 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4166 return -EINVAL;
4167 }
4168
4169 for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4170 flag < IFLA_MACSEC_VALIDATION;
4171 flag++) {
4172 if (data[flag]) {
4173 if (nla_get_u8(data[flag]) > 1)
4174 return -EINVAL;
4175 }
4176 }
4177
4178 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
4179 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
4180 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
4181
4182 if ((sci && (scb || es)) || (scb && es))
4183 return -EINVAL;
4184
4185 if (data[IFLA_MACSEC_VALIDATION] &&
4186 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4187 return -EINVAL;
4188
4b1fb935
SD
4189 if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4190 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
c09440f7
SD
4191 !data[IFLA_MACSEC_WINDOW])
4192 return -EINVAL;
4193
4194 return 0;
4195}
4196
4197static struct net *macsec_get_link_net(const struct net_device *dev)
4198{
4199 return dev_net(macsec_priv(dev)->real_dev);
4200}
4201
4202static size_t macsec_get_size(const struct net_device *dev)
4203{
c9fba3ed
ZS
4204 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4205 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4206 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4207 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4208 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4209 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4210 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4211 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4212 nla_total_size(1) + /* IFLA_MACSEC_ES */
4213 nla_total_size(1) + /* IFLA_MACSEC_SCB */
4214 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4215 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
c09440f7
SD
4216 0;
4217}
4218
4219static int macsec_fill_info(struct sk_buff *skb,
4220 const struct net_device *dev)
4221{
4222 struct macsec_secy *secy = &macsec_priv(dev)->secy;
4223 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
ccfdec90
FW
4224 u64 csid;
4225
4226 switch (secy->key_len) {
4227 case MACSEC_GCM_AES_128_SAK_LEN:
48ef50fa 4228 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
ccfdec90
FW
4229 break;
4230 case MACSEC_GCM_AES_256_SAK_LEN:
48ef50fa 4231 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
ccfdec90
FW
4232 break;
4233 default:
4234 goto nla_put_failure;
4235 }
c09440f7 4236
f60d94c0
ND
4237 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4238 IFLA_MACSEC_PAD) ||
c09440f7 4239 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
f60d94c0 4240 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
ccfdec90 4241 csid, IFLA_MACSEC_PAD) ||
c09440f7
SD
4242 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4243 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4244 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4245 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4246 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4247 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4248 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4249 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
4250 0)
4251 goto nla_put_failure;
4252
4253 if (secy->replay_protect) {
4254 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4255 goto nla_put_failure;
4256 }
4257
4258 return 0;
4259
4260nla_put_failure:
4261 return -EMSGSIZE;
4262}
4263
4264static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4265 .kind = "macsec",
4266 .priv_size = sizeof(struct macsec_dev),
4267 .maxtype = IFLA_MACSEC_MAX,
4268 .policy = macsec_rtnl_policy,
4269 .setup = macsec_setup,
4270 .validate = macsec_validate_attr,
4271 .newlink = macsec_newlink,
4272 .changelink = macsec_changelink,
4273 .dellink = macsec_dellink,
4274 .get_size = macsec_get_size,
4275 .fill_info = macsec_fill_info,
4276 .get_link_net = macsec_get_link_net,
4277};
4278
4279static bool is_macsec_master(struct net_device *dev)
4280{
4281 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4282}
4283
4284static int macsec_notify(struct notifier_block *this, unsigned long event,
4285 void *ptr)
4286{
4287 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
4288 LIST_HEAD(head);
4289
4290 if (!is_macsec_master(real_dev))
4291 return NOTIFY_DONE;
4292
4293 switch (event) {
e6ac0758
SD
4294 case NETDEV_DOWN:
4295 case NETDEV_UP:
4296 case NETDEV_CHANGE: {
4297 struct macsec_dev *m, *n;
4298 struct macsec_rxh_data *rxd;
4299
4300 rxd = macsec_data_rtnl(real_dev);
4301 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4302 struct net_device *dev = m->secy.netdev;
4303
4304 netif_stacked_transfer_operstate(real_dev, dev);
4305 }
4306 break;
4307 }
c09440f7
SD
4308 case NETDEV_UNREGISTER: {
4309 struct macsec_dev *m, *n;
4310 struct macsec_rxh_data *rxd;
4311
4312 rxd = macsec_data_rtnl(real_dev);
4313 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
bbe11fab 4314 macsec_common_dellink(m->secy.netdev, &head);
c09440f7 4315 }
bbe11fab
SD
4316
4317 netdev_rx_handler_unregister(real_dev);
4318 kfree(rxd);
4319
c09440f7
SD
4320 unregister_netdevice_many(&head);
4321 break;
4322 }
4323 case NETDEV_CHANGEMTU: {
4324 struct macsec_dev *m;
4325 struct macsec_rxh_data *rxd;
4326
4327 rxd = macsec_data_rtnl(real_dev);
4328 list_for_each_entry(m, &rxd->secys, secys) {
4329 struct net_device *dev = m->secy.netdev;
4330 unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4331 macsec_extra_len(true));
4332
4333 if (dev->mtu > mtu)
4334 dev_set_mtu(dev, mtu);
4335 }
4336 }
4337 }
4338
4339 return NOTIFY_OK;
4340}
4341
4342static struct notifier_block macsec_notifier = {
4343 .notifier_call = macsec_notify,
4344};
4345
4346static int __init macsec_init(void)
4347{
4348 int err;
4349
4350 pr_info("MACsec IEEE 802.1AE\n");
4351 err = register_netdevice_notifier(&macsec_notifier);
4352 if (err)
4353 return err;
4354
4355 err = rtnl_link_register(&macsec_link_ops);
4356 if (err)
4357 goto notifier;
4358
489111e5 4359 err = genl_register_family(&macsec_fam);
c09440f7
SD
4360 if (err)
4361 goto rtnl;
4362
4363 return 0;
4364
4365rtnl:
4366 rtnl_link_unregister(&macsec_link_ops);
4367notifier:
4368 unregister_netdevice_notifier(&macsec_notifier);
4369 return err;
4370}
4371
4372static void __exit macsec_exit(void)
4373{
4374 genl_unregister_family(&macsec_fam);
4375 rtnl_link_unregister(&macsec_link_ops);
4376 unregister_netdevice_notifier(&macsec_notifier);
b196c22a 4377 rcu_barrier();
c09440f7
SD
4378}
4379
4380module_init(macsec_init);
4381module_exit(macsec_exit);
4382
4383MODULE_ALIAS_RTNL_LINK("macsec");
78362998 4384MODULE_ALIAS_GENL_FAMILY("macsec");
c09440f7
SD
4385
4386MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4387MODULE_LICENSE("GPL v2");