]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/macsec.c
MAINTAINERS: Update MAX77802 PMIC entry
[mirror_ubuntu-artful-kernel.git] / drivers / net / macsec.c
1 /*
2 * drivers/net/macsec.c - MACsec device
3 *
4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/socket.h>
15 #include <linux/module.h>
16 #include <crypto/aead.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rtnetlink.h>
19 #include <net/genetlink.h>
20 #include <net/sock.h>
21 #include <net/gro_cells.h>
22
23 #include <uapi/linux/if_macsec.h>
24
25 typedef u64 __bitwise sci_t;
26
27 #define MACSEC_SCI_LEN 8
28
29 /* SecTAG length = macsec_eth_header without the optional SCI */
30 #define MACSEC_TAG_LEN 6
31
32 struct macsec_eth_header {
33 struct ethhdr eth;
34 /* SecTAG */
35 u8 tci_an;
36 #if defined(__LITTLE_ENDIAN_BITFIELD)
37 u8 short_length:6,
38 unused:2;
39 #elif defined(__BIG_ENDIAN_BITFIELD)
40 u8 unused:2,
41 short_length:6;
42 #else
43 #error "Please fix <asm/byteorder.h>"
44 #endif
45 __be32 packet_number;
46 u8 secure_channel_id[8]; /* optional */
47 } __packed;
48
49 #define MACSEC_TCI_VERSION 0x80
50 #define MACSEC_TCI_ES 0x40 /* end station */
51 #define MACSEC_TCI_SC 0x20 /* SCI present */
52 #define MACSEC_TCI_SCB 0x10 /* epon */
53 #define MACSEC_TCI_E 0x08 /* encryption */
54 #define MACSEC_TCI_C 0x04 /* changed text */
55 #define MACSEC_AN_MASK 0x03 /* association number */
56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
57
58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
59 #define MIN_NON_SHORT_LEN 48
60
61 #define GCM_AES_IV_LEN 12
62 #define DEFAULT_ICV_LEN 16
63
64 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */
65
66 #define for_each_rxsc(secy, sc) \
67 for (sc = rcu_dereference_bh(secy->rx_sc); \
68 sc; \
69 sc = rcu_dereference_bh(sc->next))
70 #define for_each_rxsc_rtnl(secy, sc) \
71 for (sc = rtnl_dereference(secy->rx_sc); \
72 sc; \
73 sc = rtnl_dereference(sc->next))
74
75 struct gcm_iv {
76 union {
77 u8 secure_channel_id[8];
78 sci_t sci;
79 };
80 __be32 pn;
81 };
82
83 /**
84 * struct macsec_key - SA key
85 * @id: user-provided key identifier
86 * @tfm: crypto struct, key storage
87 */
88 struct macsec_key {
89 u8 id[MACSEC_KEYID_LEN];
90 struct crypto_aead *tfm;
91 };
92
93 struct macsec_rx_sc_stats {
94 __u64 InOctetsValidated;
95 __u64 InOctetsDecrypted;
96 __u64 InPktsUnchecked;
97 __u64 InPktsDelayed;
98 __u64 InPktsOK;
99 __u64 InPktsInvalid;
100 __u64 InPktsLate;
101 __u64 InPktsNotValid;
102 __u64 InPktsNotUsingSA;
103 __u64 InPktsUnusedSA;
104 };
105
106 struct macsec_rx_sa_stats {
107 __u32 InPktsOK;
108 __u32 InPktsInvalid;
109 __u32 InPktsNotValid;
110 __u32 InPktsNotUsingSA;
111 __u32 InPktsUnusedSA;
112 };
113
114 struct macsec_tx_sa_stats {
115 __u32 OutPktsProtected;
116 __u32 OutPktsEncrypted;
117 };
118
119 struct macsec_tx_sc_stats {
120 __u64 OutPktsProtected;
121 __u64 OutPktsEncrypted;
122 __u64 OutOctetsProtected;
123 __u64 OutOctetsEncrypted;
124 };
125
126 struct macsec_dev_stats {
127 __u64 OutPktsUntagged;
128 __u64 InPktsUntagged;
129 __u64 OutPktsTooLong;
130 __u64 InPktsNoTag;
131 __u64 InPktsBadTag;
132 __u64 InPktsUnknownSCI;
133 __u64 InPktsNoSCI;
134 __u64 InPktsOverrun;
135 };
136
137 /**
138 * struct macsec_rx_sa - receive secure association
139 * @active:
140 * @next_pn: packet number expected for the next packet
141 * @lock: protects next_pn manipulations
142 * @key: key structure
143 * @stats: per-SA stats
144 */
145 struct macsec_rx_sa {
146 struct macsec_key key;
147 spinlock_t lock;
148 u32 next_pn;
149 atomic_t refcnt;
150 bool active;
151 struct macsec_rx_sa_stats __percpu *stats;
152 struct macsec_rx_sc *sc;
153 struct rcu_head rcu;
154 };
155
156 struct pcpu_rx_sc_stats {
157 struct macsec_rx_sc_stats stats;
158 struct u64_stats_sync syncp;
159 };
160
161 /**
162 * struct macsec_rx_sc - receive secure channel
163 * @sci: secure channel identifier for this SC
164 * @active: channel is active
165 * @sa: array of secure associations
166 * @stats: per-SC stats
167 */
168 struct macsec_rx_sc {
169 struct macsec_rx_sc __rcu *next;
170 sci_t sci;
171 bool active;
172 struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
173 struct pcpu_rx_sc_stats __percpu *stats;
174 atomic_t refcnt;
175 struct rcu_head rcu_head;
176 };
177
178 /**
179 * struct macsec_tx_sa - transmit secure association
180 * @active:
181 * @next_pn: packet number to use for the next packet
182 * @lock: protects next_pn manipulations
183 * @key: key structure
184 * @stats: per-SA stats
185 */
186 struct macsec_tx_sa {
187 struct macsec_key key;
188 spinlock_t lock;
189 u32 next_pn;
190 atomic_t refcnt;
191 bool active;
192 struct macsec_tx_sa_stats __percpu *stats;
193 struct rcu_head rcu;
194 };
195
196 struct pcpu_tx_sc_stats {
197 struct macsec_tx_sc_stats stats;
198 struct u64_stats_sync syncp;
199 };
200
201 /**
202 * struct macsec_tx_sc - transmit secure channel
203 * @active:
204 * @encoding_sa: association number of the SA currently in use
205 * @encrypt: encrypt packets on transmit, or authenticate only
206 * @send_sci: always include the SCI in the SecTAG
207 * @end_station:
208 * @scb: single copy broadcast flag
209 * @sa: array of secure associations
210 * @stats: stats for this TXSC
211 */
212 struct macsec_tx_sc {
213 bool active;
214 u8 encoding_sa;
215 bool encrypt;
216 bool send_sci;
217 bool end_station;
218 bool scb;
219 struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
220 struct pcpu_tx_sc_stats __percpu *stats;
221 };
222
223 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
224
225 /**
226 * struct macsec_secy - MACsec Security Entity
227 * @netdev: netdevice for this SecY
228 * @n_rx_sc: number of receive secure channels configured on this SecY
229 * @sci: secure channel identifier used for tx
230 * @key_len: length of keys used by the cipher suite
231 * @icv_len: length of ICV used by the cipher suite
232 * @validate_frames: validation mode
233 * @operational: MAC_Operational flag
234 * @protect_frames: enable protection for this SecY
235 * @replay_protect: enable packet number checks on receive
236 * @replay_window: size of the replay window
237 * @tx_sc: transmit secure channel
238 * @rx_sc: linked list of receive secure channels
239 */
240 struct macsec_secy {
241 struct net_device *netdev;
242 unsigned int n_rx_sc;
243 sci_t sci;
244 u16 key_len;
245 u16 icv_len;
246 enum macsec_validation_type validate_frames;
247 bool operational;
248 bool protect_frames;
249 bool replay_protect;
250 u32 replay_window;
251 struct macsec_tx_sc tx_sc;
252 struct macsec_rx_sc __rcu *rx_sc;
253 };
254
255 struct pcpu_secy_stats {
256 struct macsec_dev_stats stats;
257 struct u64_stats_sync syncp;
258 };
259
260 /**
261 * struct macsec_dev - private data
262 * @secy: SecY config
263 * @real_dev: pointer to underlying netdevice
264 * @stats: MACsec device stats
265 * @secys: linked list of SecY's on the underlying device
266 */
267 struct macsec_dev {
268 struct macsec_secy secy;
269 struct net_device *real_dev;
270 struct pcpu_secy_stats __percpu *stats;
271 struct list_head secys;
272 struct gro_cells gro_cells;
273 unsigned int nest_level;
274 };
275
276 /**
277 * struct macsec_rxh_data - rx_handler private argument
278 * @secys: linked list of SecY's on this underlying device
279 */
280 struct macsec_rxh_data {
281 struct list_head secys;
282 };
283
284 static struct macsec_dev *macsec_priv(const struct net_device *dev)
285 {
286 return (struct macsec_dev *)netdev_priv(dev);
287 }
288
289 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
290 {
291 return rcu_dereference_bh(dev->rx_handler_data);
292 }
293
294 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
295 {
296 return rtnl_dereference(dev->rx_handler_data);
297 }
298
299 struct macsec_cb {
300 struct aead_request *req;
301 union {
302 struct macsec_tx_sa *tx_sa;
303 struct macsec_rx_sa *rx_sa;
304 };
305 u8 assoc_num;
306 bool valid;
307 bool has_sci;
308 };
309
310 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
311 {
312 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
313
314 if (!sa || !sa->active)
315 return NULL;
316
317 if (!atomic_inc_not_zero(&sa->refcnt))
318 return NULL;
319
320 return sa;
321 }
322
323 static void free_rx_sc_rcu(struct rcu_head *head)
324 {
325 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
326
327 free_percpu(rx_sc->stats);
328 kfree(rx_sc);
329 }
330
331 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
332 {
333 return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL;
334 }
335
336 static void macsec_rxsc_put(struct macsec_rx_sc *sc)
337 {
338 if (atomic_dec_and_test(&sc->refcnt))
339 call_rcu(&sc->rcu_head, free_rx_sc_rcu);
340 }
341
342 static void free_rxsa(struct rcu_head *head)
343 {
344 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
345
346 crypto_free_aead(sa->key.tfm);
347 free_percpu(sa->stats);
348 kfree(sa);
349 }
350
351 static void macsec_rxsa_put(struct macsec_rx_sa *sa)
352 {
353 if (atomic_dec_and_test(&sa->refcnt))
354 call_rcu(&sa->rcu, free_rxsa);
355 }
356
357 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
358 {
359 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
360
361 if (!sa || !sa->active)
362 return NULL;
363
364 if (!atomic_inc_not_zero(&sa->refcnt))
365 return NULL;
366
367 return sa;
368 }
369
370 static void free_txsa(struct rcu_head *head)
371 {
372 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
373
374 crypto_free_aead(sa->key.tfm);
375 free_percpu(sa->stats);
376 kfree(sa);
377 }
378
379 static void macsec_txsa_put(struct macsec_tx_sa *sa)
380 {
381 if (atomic_dec_and_test(&sa->refcnt))
382 call_rcu(&sa->rcu, free_txsa);
383 }
384
385 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
386 {
387 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
388 return (struct macsec_cb *)skb->cb;
389 }
390
391 #define MACSEC_PORT_ES (htons(0x0001))
392 #define MACSEC_PORT_SCB (0x0000)
393 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
394
395 #define DEFAULT_SAK_LEN 16
396 #define DEFAULT_SEND_SCI true
397 #define DEFAULT_ENCRYPT false
398 #define DEFAULT_ENCODING_SA 0
399
400 static bool send_sci(const struct macsec_secy *secy)
401 {
402 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
403
404 return tx_sc->send_sci ||
405 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
406 }
407
408 static sci_t make_sci(u8 *addr, __be16 port)
409 {
410 sci_t sci;
411
412 memcpy(&sci, addr, ETH_ALEN);
413 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
414
415 return sci;
416 }
417
418 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
419 {
420 sci_t sci;
421
422 if (sci_present)
423 memcpy(&sci, hdr->secure_channel_id,
424 sizeof(hdr->secure_channel_id));
425 else
426 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
427
428 return sci;
429 }
430
431 static unsigned int macsec_sectag_len(bool sci_present)
432 {
433 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
434 }
435
436 static unsigned int macsec_hdr_len(bool sci_present)
437 {
438 return macsec_sectag_len(sci_present) + ETH_HLEN;
439 }
440
441 static unsigned int macsec_extra_len(bool sci_present)
442 {
443 return macsec_sectag_len(sci_present) + sizeof(__be16);
444 }
445
446 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
447 static void macsec_fill_sectag(struct macsec_eth_header *h,
448 const struct macsec_secy *secy, u32 pn,
449 bool sci_present)
450 {
451 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
452
453 memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
454 h->eth.h_proto = htons(ETH_P_MACSEC);
455
456 if (sci_present) {
457 h->tci_an |= MACSEC_TCI_SC;
458 memcpy(&h->secure_channel_id, &secy->sci,
459 sizeof(h->secure_channel_id));
460 } else {
461 if (tx_sc->end_station)
462 h->tci_an |= MACSEC_TCI_ES;
463 if (tx_sc->scb)
464 h->tci_an |= MACSEC_TCI_SCB;
465 }
466
467 h->packet_number = htonl(pn);
468
469 /* with GCM, C/E clear for !encrypt, both set for encrypt */
470 if (tx_sc->encrypt)
471 h->tci_an |= MACSEC_TCI_CONFID;
472 else if (secy->icv_len != DEFAULT_ICV_LEN)
473 h->tci_an |= MACSEC_TCI_C;
474
475 h->tci_an |= tx_sc->encoding_sa;
476 }
477
478 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
479 {
480 if (data_len < MIN_NON_SHORT_LEN)
481 h->short_length = data_len;
482 }
483
484 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
485 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
486 {
487 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
488 int len = skb->len - 2 * ETH_ALEN;
489 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
490
491 /* a) It comprises at least 17 octets */
492 if (skb->len <= 16)
493 return false;
494
495 /* b) MACsec EtherType: already checked */
496
497 /* c) V bit is clear */
498 if (h->tci_an & MACSEC_TCI_VERSION)
499 return false;
500
501 /* d) ES or SCB => !SC */
502 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
503 (h->tci_an & MACSEC_TCI_SC))
504 return false;
505
506 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
507 if (h->unused)
508 return false;
509
510 /* rx.pn != 0 (figure 10-5) */
511 if (!h->packet_number)
512 return false;
513
514 /* length check, f) g) h) i) */
515 if (h->short_length)
516 return len == extra_len + h->short_length;
517 return len >= extra_len + MIN_NON_SHORT_LEN;
518 }
519
520 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
521 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
522
523 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
524 {
525 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
526
527 gcm_iv->sci = sci;
528 gcm_iv->pn = htonl(pn);
529 }
530
531 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
532 {
533 return (struct macsec_eth_header *)skb_mac_header(skb);
534 }
535
536 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
537 {
538 u32 pn;
539
540 spin_lock_bh(&tx_sa->lock);
541 pn = tx_sa->next_pn;
542
543 tx_sa->next_pn++;
544 if (tx_sa->next_pn == 0) {
545 pr_debug("PN wrapped, transitioning to !oper\n");
546 tx_sa->active = false;
547 if (secy->protect_frames)
548 secy->operational = false;
549 }
550 spin_unlock_bh(&tx_sa->lock);
551
552 return pn;
553 }
554
555 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
556 {
557 struct macsec_dev *macsec = netdev_priv(dev);
558
559 skb->dev = macsec->real_dev;
560 skb_reset_mac_header(skb);
561 skb->protocol = eth_hdr(skb)->h_proto;
562 }
563
564 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
565 struct macsec_tx_sa *tx_sa)
566 {
567 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
568
569 u64_stats_update_begin(&txsc_stats->syncp);
570 if (tx_sc->encrypt) {
571 txsc_stats->stats.OutOctetsEncrypted += skb->len;
572 txsc_stats->stats.OutPktsEncrypted++;
573 this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
574 } else {
575 txsc_stats->stats.OutOctetsProtected += skb->len;
576 txsc_stats->stats.OutPktsProtected++;
577 this_cpu_inc(tx_sa->stats->OutPktsProtected);
578 }
579 u64_stats_update_end(&txsc_stats->syncp);
580 }
581
582 static void count_tx(struct net_device *dev, int ret, int len)
583 {
584 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
585 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
586
587 u64_stats_update_begin(&stats->syncp);
588 stats->tx_packets++;
589 stats->tx_bytes += len;
590 u64_stats_update_end(&stats->syncp);
591 } else {
592 dev->stats.tx_dropped++;
593 }
594 }
595
596 static void macsec_encrypt_done(struct crypto_async_request *base, int err)
597 {
598 struct sk_buff *skb = base->data;
599 struct net_device *dev = skb->dev;
600 struct macsec_dev *macsec = macsec_priv(dev);
601 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
602 int len, ret;
603
604 aead_request_free(macsec_skb_cb(skb)->req);
605
606 rcu_read_lock_bh();
607 macsec_encrypt_finish(skb, dev);
608 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
609 len = skb->len;
610 ret = dev_queue_xmit(skb);
611 count_tx(dev, ret, len);
612 rcu_read_unlock_bh();
613
614 macsec_txsa_put(sa);
615 dev_put(dev);
616 }
617
618 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
619 unsigned char **iv,
620 struct scatterlist **sg,
621 int num_frags)
622 {
623 size_t size, iv_offset, sg_offset;
624 struct aead_request *req;
625 void *tmp;
626
627 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
628 iv_offset = size;
629 size += GCM_AES_IV_LEN;
630
631 size = ALIGN(size, __alignof__(struct scatterlist));
632 sg_offset = size;
633 size += sizeof(struct scatterlist) * num_frags;
634
635 tmp = kmalloc(size, GFP_ATOMIC);
636 if (!tmp)
637 return NULL;
638
639 *iv = (unsigned char *)(tmp + iv_offset);
640 *sg = (struct scatterlist *)(tmp + sg_offset);
641 req = tmp;
642
643 aead_request_set_tfm(req, tfm);
644
645 return req;
646 }
647
648 static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
649 struct net_device *dev)
650 {
651 int ret;
652 struct scatterlist *sg;
653 struct sk_buff *trailer;
654 unsigned char *iv;
655 struct ethhdr *eth;
656 struct macsec_eth_header *hh;
657 size_t unprotected_len;
658 struct aead_request *req;
659 struct macsec_secy *secy;
660 struct macsec_tx_sc *tx_sc;
661 struct macsec_tx_sa *tx_sa;
662 struct macsec_dev *macsec = macsec_priv(dev);
663 bool sci_present;
664 u32 pn;
665
666 secy = &macsec->secy;
667 tx_sc = &secy->tx_sc;
668
669 /* 10.5.1 TX SA assignment */
670 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
671 if (!tx_sa) {
672 secy->operational = false;
673 kfree_skb(skb);
674 return ERR_PTR(-EINVAL);
675 }
676
677 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
678 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
679 struct sk_buff *nskb = skb_copy_expand(skb,
680 MACSEC_NEEDED_HEADROOM,
681 MACSEC_NEEDED_TAILROOM,
682 GFP_ATOMIC);
683 if (likely(nskb)) {
684 consume_skb(skb);
685 skb = nskb;
686 } else {
687 macsec_txsa_put(tx_sa);
688 kfree_skb(skb);
689 return ERR_PTR(-ENOMEM);
690 }
691 } else {
692 skb = skb_unshare(skb, GFP_ATOMIC);
693 if (!skb) {
694 macsec_txsa_put(tx_sa);
695 return ERR_PTR(-ENOMEM);
696 }
697 }
698
699 unprotected_len = skb->len;
700 eth = eth_hdr(skb);
701 sci_present = send_sci(secy);
702 hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
703 memmove(hh, eth, 2 * ETH_ALEN);
704
705 pn = tx_sa_update_pn(tx_sa, secy);
706 if (pn == 0) {
707 macsec_txsa_put(tx_sa);
708 kfree_skb(skb);
709 return ERR_PTR(-ENOLINK);
710 }
711 macsec_fill_sectag(hh, secy, pn, sci_present);
712 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
713
714 skb_put(skb, secy->icv_len);
715
716 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
717 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
718
719 u64_stats_update_begin(&secy_stats->syncp);
720 secy_stats->stats.OutPktsTooLong++;
721 u64_stats_update_end(&secy_stats->syncp);
722
723 macsec_txsa_put(tx_sa);
724 kfree_skb(skb);
725 return ERR_PTR(-EINVAL);
726 }
727
728 ret = skb_cow_data(skb, 0, &trailer);
729 if (unlikely(ret < 0)) {
730 macsec_txsa_put(tx_sa);
731 kfree_skb(skb);
732 return ERR_PTR(ret);
733 }
734
735 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
736 if (!req) {
737 macsec_txsa_put(tx_sa);
738 kfree_skb(skb);
739 return ERR_PTR(-ENOMEM);
740 }
741
742 macsec_fill_iv(iv, secy->sci, pn);
743
744 sg_init_table(sg, ret);
745 skb_to_sgvec(skb, sg, 0, skb->len);
746
747 if (tx_sc->encrypt) {
748 int len = skb->len - macsec_hdr_len(sci_present) -
749 secy->icv_len;
750 aead_request_set_crypt(req, sg, sg, len, iv);
751 aead_request_set_ad(req, macsec_hdr_len(sci_present));
752 } else {
753 aead_request_set_crypt(req, sg, sg, 0, iv);
754 aead_request_set_ad(req, skb->len - secy->icv_len);
755 }
756
757 macsec_skb_cb(skb)->req = req;
758 macsec_skb_cb(skb)->tx_sa = tx_sa;
759 aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
760
761 dev_hold(skb->dev);
762 ret = crypto_aead_encrypt(req);
763 if (ret == -EINPROGRESS) {
764 return ERR_PTR(ret);
765 } else if (ret != 0) {
766 dev_put(skb->dev);
767 kfree_skb(skb);
768 aead_request_free(req);
769 macsec_txsa_put(tx_sa);
770 return ERR_PTR(-EINVAL);
771 }
772
773 dev_put(skb->dev);
774 aead_request_free(req);
775 macsec_txsa_put(tx_sa);
776
777 return skb;
778 }
779
780 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
781 {
782 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
783 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
784 struct macsec_eth_header *hdr = macsec_ethhdr(skb);
785 u32 lowest_pn = 0;
786
787 spin_lock(&rx_sa->lock);
788 if (rx_sa->next_pn >= secy->replay_window)
789 lowest_pn = rx_sa->next_pn - secy->replay_window;
790
791 /* Now perform replay protection check again
792 * (see IEEE 802.1AE-2006 figure 10-5)
793 */
794 if (secy->replay_protect && pn < lowest_pn) {
795 spin_unlock(&rx_sa->lock);
796 u64_stats_update_begin(&rxsc_stats->syncp);
797 rxsc_stats->stats.InPktsLate++;
798 u64_stats_update_end(&rxsc_stats->syncp);
799 return false;
800 }
801
802 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
803 u64_stats_update_begin(&rxsc_stats->syncp);
804 if (hdr->tci_an & MACSEC_TCI_E)
805 rxsc_stats->stats.InOctetsDecrypted += skb->len;
806 else
807 rxsc_stats->stats.InOctetsValidated += skb->len;
808 u64_stats_update_end(&rxsc_stats->syncp);
809 }
810
811 if (!macsec_skb_cb(skb)->valid) {
812 spin_unlock(&rx_sa->lock);
813
814 /* 10.6.5 */
815 if (hdr->tci_an & MACSEC_TCI_C ||
816 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
817 u64_stats_update_begin(&rxsc_stats->syncp);
818 rxsc_stats->stats.InPktsNotValid++;
819 u64_stats_update_end(&rxsc_stats->syncp);
820 return false;
821 }
822
823 u64_stats_update_begin(&rxsc_stats->syncp);
824 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
825 rxsc_stats->stats.InPktsInvalid++;
826 this_cpu_inc(rx_sa->stats->InPktsInvalid);
827 } else if (pn < lowest_pn) {
828 rxsc_stats->stats.InPktsDelayed++;
829 } else {
830 rxsc_stats->stats.InPktsUnchecked++;
831 }
832 u64_stats_update_end(&rxsc_stats->syncp);
833 } else {
834 u64_stats_update_begin(&rxsc_stats->syncp);
835 if (pn < lowest_pn) {
836 rxsc_stats->stats.InPktsDelayed++;
837 } else {
838 rxsc_stats->stats.InPktsOK++;
839 this_cpu_inc(rx_sa->stats->InPktsOK);
840 }
841 u64_stats_update_end(&rxsc_stats->syncp);
842
843 if (pn >= rx_sa->next_pn)
844 rx_sa->next_pn = pn + 1;
845 spin_unlock(&rx_sa->lock);
846 }
847
848 return true;
849 }
850
851 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
852 {
853 skb->pkt_type = PACKET_HOST;
854 skb->protocol = eth_type_trans(skb, dev);
855
856 skb_reset_network_header(skb);
857 if (!skb_transport_header_was_set(skb))
858 skb_reset_transport_header(skb);
859 skb_reset_mac_len(skb);
860 }
861
862 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
863 {
864 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
865 skb_pull(skb, hdr_len);
866 pskb_trim_unique(skb, skb->len - icv_len);
867 }
868
869 static void count_rx(struct net_device *dev, int len)
870 {
871 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
872
873 u64_stats_update_begin(&stats->syncp);
874 stats->rx_packets++;
875 stats->rx_bytes += len;
876 u64_stats_update_end(&stats->syncp);
877 }
878
879 static void macsec_decrypt_done(struct crypto_async_request *base, int err)
880 {
881 struct sk_buff *skb = base->data;
882 struct net_device *dev = skb->dev;
883 struct macsec_dev *macsec = macsec_priv(dev);
884 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
885 struct macsec_rx_sc *rx_sc = rx_sa->sc;
886 int len, ret;
887 u32 pn;
888
889 aead_request_free(macsec_skb_cb(skb)->req);
890
891 if (!err)
892 macsec_skb_cb(skb)->valid = true;
893
894 rcu_read_lock_bh();
895 pn = ntohl(macsec_ethhdr(skb)->packet_number);
896 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
897 rcu_read_unlock_bh();
898 kfree_skb(skb);
899 goto out;
900 }
901
902 macsec_finalize_skb(skb, macsec->secy.icv_len,
903 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
904 macsec_reset_skb(skb, macsec->secy.netdev);
905
906 len = skb->len;
907 ret = gro_cells_receive(&macsec->gro_cells, skb);
908 if (ret == NET_RX_SUCCESS)
909 count_rx(dev, len);
910 else
911 macsec->secy.netdev->stats.rx_dropped++;
912
913 rcu_read_unlock_bh();
914
915 out:
916 macsec_rxsa_put(rx_sa);
917 macsec_rxsc_put(rx_sc);
918 dev_put(dev);
919 }
920
921 static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
922 struct net_device *dev,
923 struct macsec_rx_sa *rx_sa,
924 sci_t sci,
925 struct macsec_secy *secy)
926 {
927 int ret;
928 struct scatterlist *sg;
929 struct sk_buff *trailer;
930 unsigned char *iv;
931 struct aead_request *req;
932 struct macsec_eth_header *hdr;
933 u16 icv_len = secy->icv_len;
934
935 macsec_skb_cb(skb)->valid = false;
936 skb = skb_share_check(skb, GFP_ATOMIC);
937 if (!skb)
938 return ERR_PTR(-ENOMEM);
939
940 ret = skb_cow_data(skb, 0, &trailer);
941 if (unlikely(ret < 0)) {
942 kfree_skb(skb);
943 return ERR_PTR(ret);
944 }
945 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
946 if (!req) {
947 kfree_skb(skb);
948 return ERR_PTR(-ENOMEM);
949 }
950
951 hdr = (struct macsec_eth_header *)skb->data;
952 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
953
954 sg_init_table(sg, ret);
955 skb_to_sgvec(skb, sg, 0, skb->len);
956
957 if (hdr->tci_an & MACSEC_TCI_E) {
958 /* confidentiality: ethernet + macsec header
959 * authenticated, encrypted payload
960 */
961 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
962
963 aead_request_set_crypt(req, sg, sg, len, iv);
964 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
965 skb = skb_unshare(skb, GFP_ATOMIC);
966 if (!skb) {
967 aead_request_free(req);
968 return ERR_PTR(-ENOMEM);
969 }
970 } else {
971 /* integrity only: all headers + data authenticated */
972 aead_request_set_crypt(req, sg, sg, icv_len, iv);
973 aead_request_set_ad(req, skb->len - icv_len);
974 }
975
976 macsec_skb_cb(skb)->req = req;
977 skb->dev = dev;
978 aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
979
980 dev_hold(dev);
981 ret = crypto_aead_decrypt(req);
982 if (ret == -EINPROGRESS) {
983 return ERR_PTR(ret);
984 } else if (ret != 0) {
985 /* decryption/authentication failed
986 * 10.6 if validateFrames is disabled, deliver anyway
987 */
988 if (ret != -EBADMSG) {
989 kfree_skb(skb);
990 skb = ERR_PTR(ret);
991 }
992 } else {
993 macsec_skb_cb(skb)->valid = true;
994 }
995 dev_put(dev);
996
997 aead_request_free(req);
998
999 return skb;
1000 }
1001
1002 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
1003 {
1004 struct macsec_rx_sc *rx_sc;
1005
1006 for_each_rxsc(secy, rx_sc) {
1007 if (rx_sc->sci == sci)
1008 return rx_sc;
1009 }
1010
1011 return NULL;
1012 }
1013
1014 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
1015 {
1016 struct macsec_rx_sc *rx_sc;
1017
1018 for_each_rxsc_rtnl(secy, rx_sc) {
1019 if (rx_sc->sci == sci)
1020 return rx_sc;
1021 }
1022
1023 return NULL;
1024 }
1025
1026 static void handle_not_macsec(struct sk_buff *skb)
1027 {
1028 struct macsec_rxh_data *rxd;
1029 struct macsec_dev *macsec;
1030
1031 rcu_read_lock();
1032 rxd = macsec_data_rcu(skb->dev);
1033
1034 /* 10.6 If the management control validateFrames is not
1035 * Strict, frames without a SecTAG are received, counted, and
1036 * delivered to the Controlled Port
1037 */
1038 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1039 struct sk_buff *nskb;
1040 int ret;
1041 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1042
1043 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1044 u64_stats_update_begin(&secy_stats->syncp);
1045 secy_stats->stats.InPktsNoTag++;
1046 u64_stats_update_end(&secy_stats->syncp);
1047 continue;
1048 }
1049
1050 /* deliver on this port */
1051 nskb = skb_clone(skb, GFP_ATOMIC);
1052 if (!nskb)
1053 break;
1054
1055 nskb->dev = macsec->secy.netdev;
1056
1057 ret = netif_rx(nskb);
1058 if (ret == NET_RX_SUCCESS) {
1059 u64_stats_update_begin(&secy_stats->syncp);
1060 secy_stats->stats.InPktsUntagged++;
1061 u64_stats_update_end(&secy_stats->syncp);
1062 } else {
1063 macsec->secy.netdev->stats.rx_dropped++;
1064 }
1065 }
1066
1067 rcu_read_unlock();
1068 }
1069
1070 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1071 {
1072 struct sk_buff *skb = *pskb;
1073 struct net_device *dev = skb->dev;
1074 struct macsec_eth_header *hdr;
1075 struct macsec_secy *secy = NULL;
1076 struct macsec_rx_sc *rx_sc;
1077 struct macsec_rx_sa *rx_sa;
1078 struct macsec_rxh_data *rxd;
1079 struct macsec_dev *macsec;
1080 sci_t sci;
1081 u32 pn;
1082 bool cbit;
1083 struct pcpu_rx_sc_stats *rxsc_stats;
1084 struct pcpu_secy_stats *secy_stats;
1085 bool pulled_sci;
1086 int ret;
1087
1088 if (skb_headroom(skb) < ETH_HLEN)
1089 goto drop_direct;
1090
1091 hdr = macsec_ethhdr(skb);
1092 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
1093 handle_not_macsec(skb);
1094
1095 /* and deliver to the uncontrolled port */
1096 return RX_HANDLER_PASS;
1097 }
1098
1099 skb = skb_unshare(skb, GFP_ATOMIC);
1100 if (!skb) {
1101 *pskb = NULL;
1102 return RX_HANDLER_CONSUMED;
1103 }
1104
1105 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1106 if (!pulled_sci) {
1107 if (!pskb_may_pull(skb, macsec_extra_len(false)))
1108 goto drop_direct;
1109 }
1110
1111 hdr = macsec_ethhdr(skb);
1112
1113 /* Frames with a SecTAG that has the TCI E bit set but the C
1114 * bit clear are discarded, as this reserved encoding is used
1115 * to identify frames with a SecTAG that are not to be
1116 * delivered to the Controlled Port.
1117 */
1118 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1119 return RX_HANDLER_PASS;
1120
1121 /* now, pull the extra length */
1122 if (hdr->tci_an & MACSEC_TCI_SC) {
1123 if (!pulled_sci)
1124 goto drop_direct;
1125 }
1126
1127 /* ethernet header is part of crypto processing */
1128 skb_push(skb, ETH_HLEN);
1129
1130 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1131 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1132 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1133
1134 rcu_read_lock();
1135 rxd = macsec_data_rcu(skb->dev);
1136
1137 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1138 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1139 sc = sc ? macsec_rxsc_get(sc) : NULL;
1140
1141 if (sc) {
1142 secy = &macsec->secy;
1143 rx_sc = sc;
1144 break;
1145 }
1146 }
1147
1148 if (!secy)
1149 goto nosci;
1150
1151 dev = secy->netdev;
1152 macsec = macsec_priv(dev);
1153 secy_stats = this_cpu_ptr(macsec->stats);
1154 rxsc_stats = this_cpu_ptr(rx_sc->stats);
1155
1156 if (!macsec_validate_skb(skb, secy->icv_len)) {
1157 u64_stats_update_begin(&secy_stats->syncp);
1158 secy_stats->stats.InPktsBadTag++;
1159 u64_stats_update_end(&secy_stats->syncp);
1160 goto drop_nosa;
1161 }
1162
1163 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1164 if (!rx_sa) {
1165 /* 10.6.1 if the SA is not in use */
1166
1167 /* If validateFrames is Strict or the C bit in the
1168 * SecTAG is set, discard
1169 */
1170 if (hdr->tci_an & MACSEC_TCI_C ||
1171 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1172 u64_stats_update_begin(&rxsc_stats->syncp);
1173 rxsc_stats->stats.InPktsNotUsingSA++;
1174 u64_stats_update_end(&rxsc_stats->syncp);
1175 goto drop_nosa;
1176 }
1177
1178 /* not Strict, the frame (with the SecTAG and ICV
1179 * removed) is delivered to the Controlled Port.
1180 */
1181 u64_stats_update_begin(&rxsc_stats->syncp);
1182 rxsc_stats->stats.InPktsUnusedSA++;
1183 u64_stats_update_end(&rxsc_stats->syncp);
1184 goto deliver;
1185 }
1186
1187 /* First, PN check to avoid decrypting obviously wrong packets */
1188 pn = ntohl(hdr->packet_number);
1189 if (secy->replay_protect) {
1190 bool late;
1191
1192 spin_lock(&rx_sa->lock);
1193 late = rx_sa->next_pn >= secy->replay_window &&
1194 pn < (rx_sa->next_pn - secy->replay_window);
1195 spin_unlock(&rx_sa->lock);
1196
1197 if (late) {
1198 u64_stats_update_begin(&rxsc_stats->syncp);
1199 rxsc_stats->stats.InPktsLate++;
1200 u64_stats_update_end(&rxsc_stats->syncp);
1201 goto drop;
1202 }
1203 }
1204
1205 macsec_skb_cb(skb)->rx_sa = rx_sa;
1206
1207 /* Disabled && !changed text => skip validation */
1208 if (hdr->tci_an & MACSEC_TCI_C ||
1209 secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1210 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1211
1212 if (IS_ERR(skb)) {
1213 /* the decrypt callback needs the reference */
1214 if (PTR_ERR(skb) != -EINPROGRESS) {
1215 macsec_rxsa_put(rx_sa);
1216 macsec_rxsc_put(rx_sc);
1217 }
1218 rcu_read_unlock();
1219 *pskb = NULL;
1220 return RX_HANDLER_CONSUMED;
1221 }
1222
1223 if (!macsec_post_decrypt(skb, secy, pn))
1224 goto drop;
1225
1226 deliver:
1227 macsec_finalize_skb(skb, secy->icv_len,
1228 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1229 macsec_reset_skb(skb, secy->netdev);
1230
1231 if (rx_sa)
1232 macsec_rxsa_put(rx_sa);
1233 macsec_rxsc_put(rx_sc);
1234
1235 ret = gro_cells_receive(&macsec->gro_cells, skb);
1236 if (ret == NET_RX_SUCCESS)
1237 count_rx(dev, skb->len);
1238 else
1239 macsec->secy.netdev->stats.rx_dropped++;
1240
1241 rcu_read_unlock();
1242
1243 *pskb = NULL;
1244 return RX_HANDLER_CONSUMED;
1245
1246 drop:
1247 macsec_rxsa_put(rx_sa);
1248 drop_nosa:
1249 macsec_rxsc_put(rx_sc);
1250 rcu_read_unlock();
1251 drop_direct:
1252 kfree_skb(skb);
1253 *pskb = NULL;
1254 return RX_HANDLER_CONSUMED;
1255
1256 nosci:
1257 /* 10.6.1 if the SC is not found */
1258 cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1259 if (!cbit)
1260 macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1261 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1262
1263 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1264 struct sk_buff *nskb;
1265
1266 secy_stats = this_cpu_ptr(macsec->stats);
1267
1268 /* If validateFrames is Strict or the C bit in the
1269 * SecTAG is set, discard
1270 */
1271 if (cbit ||
1272 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1273 u64_stats_update_begin(&secy_stats->syncp);
1274 secy_stats->stats.InPktsNoSCI++;
1275 u64_stats_update_end(&secy_stats->syncp);
1276 continue;
1277 }
1278
1279 /* not strict, the frame (with the SecTAG and ICV
1280 * removed) is delivered to the Controlled Port.
1281 */
1282 nskb = skb_clone(skb, GFP_ATOMIC);
1283 if (!nskb)
1284 break;
1285
1286 macsec_reset_skb(nskb, macsec->secy.netdev);
1287
1288 ret = netif_rx(nskb);
1289 if (ret == NET_RX_SUCCESS) {
1290 u64_stats_update_begin(&secy_stats->syncp);
1291 secy_stats->stats.InPktsUnknownSCI++;
1292 u64_stats_update_end(&secy_stats->syncp);
1293 } else {
1294 macsec->secy.netdev->stats.rx_dropped++;
1295 }
1296 }
1297
1298 rcu_read_unlock();
1299 *pskb = skb;
1300 return RX_HANDLER_PASS;
1301 }
1302
1303 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1304 {
1305 struct crypto_aead *tfm;
1306 int ret;
1307
1308 tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
1309
1310 if (IS_ERR(tfm))
1311 return tfm;
1312
1313 ret = crypto_aead_setkey(tfm, key, key_len);
1314 if (ret < 0)
1315 goto fail;
1316
1317 ret = crypto_aead_setauthsize(tfm, icv_len);
1318 if (ret < 0)
1319 goto fail;
1320
1321 return tfm;
1322 fail:
1323 crypto_free_aead(tfm);
1324 return ERR_PTR(ret);
1325 }
1326
1327 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1328 int icv_len)
1329 {
1330 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1331 if (!rx_sa->stats)
1332 return -ENOMEM;
1333
1334 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1335 if (IS_ERR(rx_sa->key.tfm)) {
1336 free_percpu(rx_sa->stats);
1337 return PTR_ERR(rx_sa->key.tfm);
1338 }
1339
1340 rx_sa->active = false;
1341 rx_sa->next_pn = 1;
1342 atomic_set(&rx_sa->refcnt, 1);
1343 spin_lock_init(&rx_sa->lock);
1344
1345 return 0;
1346 }
1347
1348 static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1349 {
1350 rx_sa->active = false;
1351
1352 macsec_rxsa_put(rx_sa);
1353 }
1354
1355 static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1356 {
1357 int i;
1358
1359 for (i = 0; i < MACSEC_NUM_AN; i++) {
1360 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1361
1362 RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1363 if (sa)
1364 clear_rx_sa(sa);
1365 }
1366
1367 macsec_rxsc_put(rx_sc);
1368 }
1369
1370 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1371 {
1372 struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1373
1374 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1375 rx_sc;
1376 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1377 if (rx_sc->sci == sci) {
1378 if (rx_sc->active)
1379 secy->n_rx_sc--;
1380 rcu_assign_pointer(*rx_scp, rx_sc->next);
1381 return rx_sc;
1382 }
1383 }
1384
1385 return NULL;
1386 }
1387
1388 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
1389 {
1390 struct macsec_rx_sc *rx_sc;
1391 struct macsec_dev *macsec;
1392 struct net_device *real_dev = macsec_priv(dev)->real_dev;
1393 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1394 struct macsec_secy *secy;
1395
1396 list_for_each_entry(macsec, &rxd->secys, secys) {
1397 if (find_rx_sc_rtnl(&macsec->secy, sci))
1398 return ERR_PTR(-EEXIST);
1399 }
1400
1401 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1402 if (!rx_sc)
1403 return ERR_PTR(-ENOMEM);
1404
1405 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1406 if (!rx_sc->stats) {
1407 kfree(rx_sc);
1408 return ERR_PTR(-ENOMEM);
1409 }
1410
1411 rx_sc->sci = sci;
1412 rx_sc->active = true;
1413 atomic_set(&rx_sc->refcnt, 1);
1414
1415 secy = &macsec_priv(dev)->secy;
1416 rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1417 rcu_assign_pointer(secy->rx_sc, rx_sc);
1418
1419 if (rx_sc->active)
1420 secy->n_rx_sc++;
1421
1422 return rx_sc;
1423 }
1424
1425 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1426 int icv_len)
1427 {
1428 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1429 if (!tx_sa->stats)
1430 return -ENOMEM;
1431
1432 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1433 if (IS_ERR(tx_sa->key.tfm)) {
1434 free_percpu(tx_sa->stats);
1435 return PTR_ERR(tx_sa->key.tfm);
1436 }
1437
1438 tx_sa->active = false;
1439 atomic_set(&tx_sa->refcnt, 1);
1440 spin_lock_init(&tx_sa->lock);
1441
1442 return 0;
1443 }
1444
1445 static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1446 {
1447 tx_sa->active = false;
1448
1449 macsec_txsa_put(tx_sa);
1450 }
1451
1452 static struct genl_family macsec_fam;
1453
1454 static struct net_device *get_dev_from_nl(struct net *net,
1455 struct nlattr **attrs)
1456 {
1457 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1458 struct net_device *dev;
1459
1460 dev = __dev_get_by_index(net, ifindex);
1461 if (!dev)
1462 return ERR_PTR(-ENODEV);
1463
1464 if (!netif_is_macsec(dev))
1465 return ERR_PTR(-ENODEV);
1466
1467 return dev;
1468 }
1469
1470 static sci_t nla_get_sci(const struct nlattr *nla)
1471 {
1472 return (__force sci_t)nla_get_u64(nla);
1473 }
1474
1475 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1476 int padattr)
1477 {
1478 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1479 }
1480
1481 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1482 struct nlattr **attrs,
1483 struct nlattr **tb_sa,
1484 struct net_device **devp,
1485 struct macsec_secy **secyp,
1486 struct macsec_tx_sc **scp,
1487 u8 *assoc_num)
1488 {
1489 struct net_device *dev;
1490 struct macsec_secy *secy;
1491 struct macsec_tx_sc *tx_sc;
1492 struct macsec_tx_sa *tx_sa;
1493
1494 if (!tb_sa[MACSEC_SA_ATTR_AN])
1495 return ERR_PTR(-EINVAL);
1496
1497 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1498
1499 dev = get_dev_from_nl(net, attrs);
1500 if (IS_ERR(dev))
1501 return ERR_CAST(dev);
1502
1503 if (*assoc_num >= MACSEC_NUM_AN)
1504 return ERR_PTR(-EINVAL);
1505
1506 secy = &macsec_priv(dev)->secy;
1507 tx_sc = &secy->tx_sc;
1508
1509 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1510 if (!tx_sa)
1511 return ERR_PTR(-ENODEV);
1512
1513 *devp = dev;
1514 *scp = tx_sc;
1515 *secyp = secy;
1516 return tx_sa;
1517 }
1518
1519 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1520 struct nlattr **attrs,
1521 struct nlattr **tb_rxsc,
1522 struct net_device **devp,
1523 struct macsec_secy **secyp)
1524 {
1525 struct net_device *dev;
1526 struct macsec_secy *secy;
1527 struct macsec_rx_sc *rx_sc;
1528 sci_t sci;
1529
1530 dev = get_dev_from_nl(net, attrs);
1531 if (IS_ERR(dev))
1532 return ERR_CAST(dev);
1533
1534 secy = &macsec_priv(dev)->secy;
1535
1536 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1537 return ERR_PTR(-EINVAL);
1538
1539 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1540 rx_sc = find_rx_sc_rtnl(secy, sci);
1541 if (!rx_sc)
1542 return ERR_PTR(-ENODEV);
1543
1544 *secyp = secy;
1545 *devp = dev;
1546
1547 return rx_sc;
1548 }
1549
1550 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1551 struct nlattr **attrs,
1552 struct nlattr **tb_rxsc,
1553 struct nlattr **tb_sa,
1554 struct net_device **devp,
1555 struct macsec_secy **secyp,
1556 struct macsec_rx_sc **scp,
1557 u8 *assoc_num)
1558 {
1559 struct macsec_rx_sc *rx_sc;
1560 struct macsec_rx_sa *rx_sa;
1561
1562 if (!tb_sa[MACSEC_SA_ATTR_AN])
1563 return ERR_PTR(-EINVAL);
1564
1565 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1566 if (*assoc_num >= MACSEC_NUM_AN)
1567 return ERR_PTR(-EINVAL);
1568
1569 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1570 if (IS_ERR(rx_sc))
1571 return ERR_CAST(rx_sc);
1572
1573 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1574 if (!rx_sa)
1575 return ERR_PTR(-ENODEV);
1576
1577 *scp = rx_sc;
1578 return rx_sa;
1579 }
1580
1581
1582 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1583 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1584 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1585 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1586 };
1587
1588 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1589 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1590 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1591 };
1592
1593 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1594 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1595 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1596 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
1597 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1598 .len = MACSEC_KEYID_LEN, },
1599 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1600 .len = MACSEC_MAX_KEY_LEN, },
1601 };
1602
1603 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1604 {
1605 if (!attrs[MACSEC_ATTR_SA_CONFIG])
1606 return -EINVAL;
1607
1608 if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX,
1609 attrs[MACSEC_ATTR_SA_CONFIG],
1610 macsec_genl_sa_policy, NULL))
1611 return -EINVAL;
1612
1613 return 0;
1614 }
1615
1616 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1617 {
1618 if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1619 return -EINVAL;
1620
1621 if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX,
1622 attrs[MACSEC_ATTR_RXSC_CONFIG],
1623 macsec_genl_rxsc_policy, NULL))
1624 return -EINVAL;
1625
1626 return 0;
1627 }
1628
1629 static bool validate_add_rxsa(struct nlattr **attrs)
1630 {
1631 if (!attrs[MACSEC_SA_ATTR_AN] ||
1632 !attrs[MACSEC_SA_ATTR_KEY] ||
1633 !attrs[MACSEC_SA_ATTR_KEYID])
1634 return false;
1635
1636 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1637 return false;
1638
1639 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1640 return false;
1641
1642 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1643 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1644 return false;
1645 }
1646
1647 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1648 return false;
1649
1650 return true;
1651 }
1652
1653 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1654 {
1655 struct net_device *dev;
1656 struct nlattr **attrs = info->attrs;
1657 struct macsec_secy *secy;
1658 struct macsec_rx_sc *rx_sc;
1659 struct macsec_rx_sa *rx_sa;
1660 unsigned char assoc_num;
1661 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1662 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1663 int err;
1664
1665 if (!attrs[MACSEC_ATTR_IFINDEX])
1666 return -EINVAL;
1667
1668 if (parse_sa_config(attrs, tb_sa))
1669 return -EINVAL;
1670
1671 if (parse_rxsc_config(attrs, tb_rxsc))
1672 return -EINVAL;
1673
1674 if (!validate_add_rxsa(tb_sa))
1675 return -EINVAL;
1676
1677 rtnl_lock();
1678 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1679 if (IS_ERR(rx_sc)) {
1680 rtnl_unlock();
1681 return PTR_ERR(rx_sc);
1682 }
1683
1684 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1685
1686 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1687 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1688 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1689 rtnl_unlock();
1690 return -EINVAL;
1691 }
1692
1693 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1694 if (rx_sa) {
1695 rtnl_unlock();
1696 return -EBUSY;
1697 }
1698
1699 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1700 if (!rx_sa) {
1701 rtnl_unlock();
1702 return -ENOMEM;
1703 }
1704
1705 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1706 secy->key_len, secy->icv_len);
1707 if (err < 0) {
1708 kfree(rx_sa);
1709 rtnl_unlock();
1710 return err;
1711 }
1712
1713 if (tb_sa[MACSEC_SA_ATTR_PN]) {
1714 spin_lock_bh(&rx_sa->lock);
1715 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
1716 spin_unlock_bh(&rx_sa->lock);
1717 }
1718
1719 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1720 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1721
1722 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1723 rx_sa->sc = rx_sc;
1724 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1725
1726 rtnl_unlock();
1727
1728 return 0;
1729 }
1730
1731 static bool validate_add_rxsc(struct nlattr **attrs)
1732 {
1733 if (!attrs[MACSEC_RXSC_ATTR_SCI])
1734 return false;
1735
1736 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1737 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1738 return false;
1739 }
1740
1741 return true;
1742 }
1743
1744 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1745 {
1746 struct net_device *dev;
1747 sci_t sci = MACSEC_UNDEF_SCI;
1748 struct nlattr **attrs = info->attrs;
1749 struct macsec_rx_sc *rx_sc;
1750 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1751
1752 if (!attrs[MACSEC_ATTR_IFINDEX])
1753 return -EINVAL;
1754
1755 if (parse_rxsc_config(attrs, tb_rxsc))
1756 return -EINVAL;
1757
1758 if (!validate_add_rxsc(tb_rxsc))
1759 return -EINVAL;
1760
1761 rtnl_lock();
1762 dev = get_dev_from_nl(genl_info_net(info), attrs);
1763 if (IS_ERR(dev)) {
1764 rtnl_unlock();
1765 return PTR_ERR(dev);
1766 }
1767
1768 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1769
1770 rx_sc = create_rx_sc(dev, sci);
1771 if (IS_ERR(rx_sc)) {
1772 rtnl_unlock();
1773 return PTR_ERR(rx_sc);
1774 }
1775
1776 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1777 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1778
1779 rtnl_unlock();
1780
1781 return 0;
1782 }
1783
1784 static bool validate_add_txsa(struct nlattr **attrs)
1785 {
1786 if (!attrs[MACSEC_SA_ATTR_AN] ||
1787 !attrs[MACSEC_SA_ATTR_PN] ||
1788 !attrs[MACSEC_SA_ATTR_KEY] ||
1789 !attrs[MACSEC_SA_ATTR_KEYID])
1790 return false;
1791
1792 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1793 return false;
1794
1795 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1796 return false;
1797
1798 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1799 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1800 return false;
1801 }
1802
1803 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1804 return false;
1805
1806 return true;
1807 }
1808
1809 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1810 {
1811 struct net_device *dev;
1812 struct nlattr **attrs = info->attrs;
1813 struct macsec_secy *secy;
1814 struct macsec_tx_sc *tx_sc;
1815 struct macsec_tx_sa *tx_sa;
1816 unsigned char assoc_num;
1817 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1818 int err;
1819
1820 if (!attrs[MACSEC_ATTR_IFINDEX])
1821 return -EINVAL;
1822
1823 if (parse_sa_config(attrs, tb_sa))
1824 return -EINVAL;
1825
1826 if (!validate_add_txsa(tb_sa))
1827 return -EINVAL;
1828
1829 rtnl_lock();
1830 dev = get_dev_from_nl(genl_info_net(info), attrs);
1831 if (IS_ERR(dev)) {
1832 rtnl_unlock();
1833 return PTR_ERR(dev);
1834 }
1835
1836 secy = &macsec_priv(dev)->secy;
1837 tx_sc = &secy->tx_sc;
1838
1839 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1840
1841 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1842 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1843 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1844 rtnl_unlock();
1845 return -EINVAL;
1846 }
1847
1848 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
1849 if (tx_sa) {
1850 rtnl_unlock();
1851 return -EBUSY;
1852 }
1853
1854 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
1855 if (!tx_sa) {
1856 rtnl_unlock();
1857 return -ENOMEM;
1858 }
1859
1860 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1861 secy->key_len, secy->icv_len);
1862 if (err < 0) {
1863 kfree(tx_sa);
1864 rtnl_unlock();
1865 return err;
1866 }
1867
1868 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1869
1870 spin_lock_bh(&tx_sa->lock);
1871 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
1872 spin_unlock_bh(&tx_sa->lock);
1873
1874 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1875 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1876
1877 if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
1878 secy->operational = true;
1879
1880 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
1881
1882 rtnl_unlock();
1883
1884 return 0;
1885 }
1886
1887 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
1888 {
1889 struct nlattr **attrs = info->attrs;
1890 struct net_device *dev;
1891 struct macsec_secy *secy;
1892 struct macsec_rx_sc *rx_sc;
1893 struct macsec_rx_sa *rx_sa;
1894 u8 assoc_num;
1895 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1896 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1897
1898 if (!attrs[MACSEC_ATTR_IFINDEX])
1899 return -EINVAL;
1900
1901 if (parse_sa_config(attrs, tb_sa))
1902 return -EINVAL;
1903
1904 if (parse_rxsc_config(attrs, tb_rxsc))
1905 return -EINVAL;
1906
1907 rtnl_lock();
1908 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
1909 &dev, &secy, &rx_sc, &assoc_num);
1910 if (IS_ERR(rx_sa)) {
1911 rtnl_unlock();
1912 return PTR_ERR(rx_sa);
1913 }
1914
1915 if (rx_sa->active) {
1916 rtnl_unlock();
1917 return -EBUSY;
1918 }
1919
1920 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
1921 clear_rx_sa(rx_sa);
1922
1923 rtnl_unlock();
1924
1925 return 0;
1926 }
1927
1928 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
1929 {
1930 struct nlattr **attrs = info->attrs;
1931 struct net_device *dev;
1932 struct macsec_secy *secy;
1933 struct macsec_rx_sc *rx_sc;
1934 sci_t sci;
1935 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1936
1937 if (!attrs[MACSEC_ATTR_IFINDEX])
1938 return -EINVAL;
1939
1940 if (parse_rxsc_config(attrs, tb_rxsc))
1941 return -EINVAL;
1942
1943 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1944 return -EINVAL;
1945
1946 rtnl_lock();
1947 dev = get_dev_from_nl(genl_info_net(info), info->attrs);
1948 if (IS_ERR(dev)) {
1949 rtnl_unlock();
1950 return PTR_ERR(dev);
1951 }
1952
1953 secy = &macsec_priv(dev)->secy;
1954 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1955
1956 rx_sc = del_rx_sc(secy, sci);
1957 if (!rx_sc) {
1958 rtnl_unlock();
1959 return -ENODEV;
1960 }
1961
1962 free_rx_sc(rx_sc);
1963 rtnl_unlock();
1964
1965 return 0;
1966 }
1967
1968 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
1969 {
1970 struct nlattr **attrs = info->attrs;
1971 struct net_device *dev;
1972 struct macsec_secy *secy;
1973 struct macsec_tx_sc *tx_sc;
1974 struct macsec_tx_sa *tx_sa;
1975 u8 assoc_num;
1976 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1977
1978 if (!attrs[MACSEC_ATTR_IFINDEX])
1979 return -EINVAL;
1980
1981 if (parse_sa_config(attrs, tb_sa))
1982 return -EINVAL;
1983
1984 rtnl_lock();
1985 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
1986 &dev, &secy, &tx_sc, &assoc_num);
1987 if (IS_ERR(tx_sa)) {
1988 rtnl_unlock();
1989 return PTR_ERR(tx_sa);
1990 }
1991
1992 if (tx_sa->active) {
1993 rtnl_unlock();
1994 return -EBUSY;
1995 }
1996
1997 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
1998 clear_tx_sa(tx_sa);
1999
2000 rtnl_unlock();
2001
2002 return 0;
2003 }
2004
2005 static bool validate_upd_sa(struct nlattr **attrs)
2006 {
2007 if (!attrs[MACSEC_SA_ATTR_AN] ||
2008 attrs[MACSEC_SA_ATTR_KEY] ||
2009 attrs[MACSEC_SA_ATTR_KEYID])
2010 return false;
2011
2012 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2013 return false;
2014
2015 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
2016 return false;
2017
2018 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2019 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2020 return false;
2021 }
2022
2023 return true;
2024 }
2025
2026 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2027 {
2028 struct nlattr **attrs = info->attrs;
2029 struct net_device *dev;
2030 struct macsec_secy *secy;
2031 struct macsec_tx_sc *tx_sc;
2032 struct macsec_tx_sa *tx_sa;
2033 u8 assoc_num;
2034 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2035
2036 if (!attrs[MACSEC_ATTR_IFINDEX])
2037 return -EINVAL;
2038
2039 if (parse_sa_config(attrs, tb_sa))
2040 return -EINVAL;
2041
2042 if (!validate_upd_sa(tb_sa))
2043 return -EINVAL;
2044
2045 rtnl_lock();
2046 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2047 &dev, &secy, &tx_sc, &assoc_num);
2048 if (IS_ERR(tx_sa)) {
2049 rtnl_unlock();
2050 return PTR_ERR(tx_sa);
2051 }
2052
2053 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2054 spin_lock_bh(&tx_sa->lock);
2055 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
2056 spin_unlock_bh(&tx_sa->lock);
2057 }
2058
2059 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2060 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2061
2062 if (assoc_num == tx_sc->encoding_sa)
2063 secy->operational = tx_sa->active;
2064
2065 rtnl_unlock();
2066
2067 return 0;
2068 }
2069
2070 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2071 {
2072 struct nlattr **attrs = info->attrs;
2073 struct net_device *dev;
2074 struct macsec_secy *secy;
2075 struct macsec_rx_sc *rx_sc;
2076 struct macsec_rx_sa *rx_sa;
2077 u8 assoc_num;
2078 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2079 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2080
2081 if (!attrs[MACSEC_ATTR_IFINDEX])
2082 return -EINVAL;
2083
2084 if (parse_rxsc_config(attrs, tb_rxsc))
2085 return -EINVAL;
2086
2087 if (parse_sa_config(attrs, tb_sa))
2088 return -EINVAL;
2089
2090 if (!validate_upd_sa(tb_sa))
2091 return -EINVAL;
2092
2093 rtnl_lock();
2094 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2095 &dev, &secy, &rx_sc, &assoc_num);
2096 if (IS_ERR(rx_sa)) {
2097 rtnl_unlock();
2098 return PTR_ERR(rx_sa);
2099 }
2100
2101 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2102 spin_lock_bh(&rx_sa->lock);
2103 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
2104 spin_unlock_bh(&rx_sa->lock);
2105 }
2106
2107 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2108 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2109
2110 rtnl_unlock();
2111 return 0;
2112 }
2113
2114 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2115 {
2116 struct nlattr **attrs = info->attrs;
2117 struct net_device *dev;
2118 struct macsec_secy *secy;
2119 struct macsec_rx_sc *rx_sc;
2120 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2121
2122 if (!attrs[MACSEC_ATTR_IFINDEX])
2123 return -EINVAL;
2124
2125 if (parse_rxsc_config(attrs, tb_rxsc))
2126 return -EINVAL;
2127
2128 if (!validate_add_rxsc(tb_rxsc))
2129 return -EINVAL;
2130
2131 rtnl_lock();
2132 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2133 if (IS_ERR(rx_sc)) {
2134 rtnl_unlock();
2135 return PTR_ERR(rx_sc);
2136 }
2137
2138 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2139 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2140
2141 if (rx_sc->active != new)
2142 secy->n_rx_sc += new ? 1 : -1;
2143
2144 rx_sc->active = new;
2145 }
2146
2147 rtnl_unlock();
2148
2149 return 0;
2150 }
2151
2152 static int copy_tx_sa_stats(struct sk_buff *skb,
2153 struct macsec_tx_sa_stats __percpu *pstats)
2154 {
2155 struct macsec_tx_sa_stats sum = {0, };
2156 int cpu;
2157
2158 for_each_possible_cpu(cpu) {
2159 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
2160
2161 sum.OutPktsProtected += stats->OutPktsProtected;
2162 sum.OutPktsEncrypted += stats->OutPktsEncrypted;
2163 }
2164
2165 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
2166 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
2167 return -EMSGSIZE;
2168
2169 return 0;
2170 }
2171
2172 static int copy_rx_sa_stats(struct sk_buff *skb,
2173 struct macsec_rx_sa_stats __percpu *pstats)
2174 {
2175 struct macsec_rx_sa_stats sum = {0, };
2176 int cpu;
2177
2178 for_each_possible_cpu(cpu) {
2179 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
2180
2181 sum.InPktsOK += stats->InPktsOK;
2182 sum.InPktsInvalid += stats->InPktsInvalid;
2183 sum.InPktsNotValid += stats->InPktsNotValid;
2184 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
2185 sum.InPktsUnusedSA += stats->InPktsUnusedSA;
2186 }
2187
2188 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
2189 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
2190 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
2191 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
2192 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
2193 return -EMSGSIZE;
2194
2195 return 0;
2196 }
2197
2198 static int copy_rx_sc_stats(struct sk_buff *skb,
2199 struct pcpu_rx_sc_stats __percpu *pstats)
2200 {
2201 struct macsec_rx_sc_stats sum = {0, };
2202 int cpu;
2203
2204 for_each_possible_cpu(cpu) {
2205 const struct pcpu_rx_sc_stats *stats;
2206 struct macsec_rx_sc_stats tmp;
2207 unsigned int start;
2208
2209 stats = per_cpu_ptr(pstats, cpu);
2210 do {
2211 start = u64_stats_fetch_begin_irq(&stats->syncp);
2212 memcpy(&tmp, &stats->stats, sizeof(tmp));
2213 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2214
2215 sum.InOctetsValidated += tmp.InOctetsValidated;
2216 sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
2217 sum.InPktsUnchecked += tmp.InPktsUnchecked;
2218 sum.InPktsDelayed += tmp.InPktsDelayed;
2219 sum.InPktsOK += tmp.InPktsOK;
2220 sum.InPktsInvalid += tmp.InPktsInvalid;
2221 sum.InPktsLate += tmp.InPktsLate;
2222 sum.InPktsNotValid += tmp.InPktsNotValid;
2223 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA;
2224 sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
2225 }
2226
2227 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2228 sum.InOctetsValidated,
2229 MACSEC_RXSC_STATS_ATTR_PAD) ||
2230 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2231 sum.InOctetsDecrypted,
2232 MACSEC_RXSC_STATS_ATTR_PAD) ||
2233 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2234 sum.InPktsUnchecked,
2235 MACSEC_RXSC_STATS_ATTR_PAD) ||
2236 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2237 sum.InPktsDelayed,
2238 MACSEC_RXSC_STATS_ATTR_PAD) ||
2239 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2240 sum.InPktsOK,
2241 MACSEC_RXSC_STATS_ATTR_PAD) ||
2242 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2243 sum.InPktsInvalid,
2244 MACSEC_RXSC_STATS_ATTR_PAD) ||
2245 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2246 sum.InPktsLate,
2247 MACSEC_RXSC_STATS_ATTR_PAD) ||
2248 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2249 sum.InPktsNotValid,
2250 MACSEC_RXSC_STATS_ATTR_PAD) ||
2251 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2252 sum.InPktsNotUsingSA,
2253 MACSEC_RXSC_STATS_ATTR_PAD) ||
2254 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2255 sum.InPktsUnusedSA,
2256 MACSEC_RXSC_STATS_ATTR_PAD))
2257 return -EMSGSIZE;
2258
2259 return 0;
2260 }
2261
2262 static int copy_tx_sc_stats(struct sk_buff *skb,
2263 struct pcpu_tx_sc_stats __percpu *pstats)
2264 {
2265 struct macsec_tx_sc_stats sum = {0, };
2266 int cpu;
2267
2268 for_each_possible_cpu(cpu) {
2269 const struct pcpu_tx_sc_stats *stats;
2270 struct macsec_tx_sc_stats tmp;
2271 unsigned int start;
2272
2273 stats = per_cpu_ptr(pstats, cpu);
2274 do {
2275 start = u64_stats_fetch_begin_irq(&stats->syncp);
2276 memcpy(&tmp, &stats->stats, sizeof(tmp));
2277 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2278
2279 sum.OutPktsProtected += tmp.OutPktsProtected;
2280 sum.OutPktsEncrypted += tmp.OutPktsEncrypted;
2281 sum.OutOctetsProtected += tmp.OutOctetsProtected;
2282 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2283 }
2284
2285 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2286 sum.OutPktsProtected,
2287 MACSEC_TXSC_STATS_ATTR_PAD) ||
2288 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2289 sum.OutPktsEncrypted,
2290 MACSEC_TXSC_STATS_ATTR_PAD) ||
2291 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2292 sum.OutOctetsProtected,
2293 MACSEC_TXSC_STATS_ATTR_PAD) ||
2294 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2295 sum.OutOctetsEncrypted,
2296 MACSEC_TXSC_STATS_ATTR_PAD))
2297 return -EMSGSIZE;
2298
2299 return 0;
2300 }
2301
2302 static int copy_secy_stats(struct sk_buff *skb,
2303 struct pcpu_secy_stats __percpu *pstats)
2304 {
2305 struct macsec_dev_stats sum = {0, };
2306 int cpu;
2307
2308 for_each_possible_cpu(cpu) {
2309 const struct pcpu_secy_stats *stats;
2310 struct macsec_dev_stats tmp;
2311 unsigned int start;
2312
2313 stats = per_cpu_ptr(pstats, cpu);
2314 do {
2315 start = u64_stats_fetch_begin_irq(&stats->syncp);
2316 memcpy(&tmp, &stats->stats, sizeof(tmp));
2317 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2318
2319 sum.OutPktsUntagged += tmp.OutPktsUntagged;
2320 sum.InPktsUntagged += tmp.InPktsUntagged;
2321 sum.OutPktsTooLong += tmp.OutPktsTooLong;
2322 sum.InPktsNoTag += tmp.InPktsNoTag;
2323 sum.InPktsBadTag += tmp.InPktsBadTag;
2324 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2325 sum.InPktsNoSCI += tmp.InPktsNoSCI;
2326 sum.InPktsOverrun += tmp.InPktsOverrun;
2327 }
2328
2329 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2330 sum.OutPktsUntagged,
2331 MACSEC_SECY_STATS_ATTR_PAD) ||
2332 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2333 sum.InPktsUntagged,
2334 MACSEC_SECY_STATS_ATTR_PAD) ||
2335 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2336 sum.OutPktsTooLong,
2337 MACSEC_SECY_STATS_ATTR_PAD) ||
2338 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2339 sum.InPktsNoTag,
2340 MACSEC_SECY_STATS_ATTR_PAD) ||
2341 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
2342 sum.InPktsBadTag,
2343 MACSEC_SECY_STATS_ATTR_PAD) ||
2344 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
2345 sum.InPktsUnknownSCI,
2346 MACSEC_SECY_STATS_ATTR_PAD) ||
2347 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
2348 sum.InPktsNoSCI,
2349 MACSEC_SECY_STATS_ATTR_PAD) ||
2350 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
2351 sum.InPktsOverrun,
2352 MACSEC_SECY_STATS_ATTR_PAD))
2353 return -EMSGSIZE;
2354
2355 return 0;
2356 }
2357
2358 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2359 {
2360 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2361 struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
2362
2363 if (!secy_nest)
2364 return 1;
2365
2366 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
2367 MACSEC_SECY_ATTR_PAD) ||
2368 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
2369 MACSEC_DEFAULT_CIPHER_ID,
2370 MACSEC_SECY_ATTR_PAD) ||
2371 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2372 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2373 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
2374 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
2375 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
2376 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
2377 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
2378 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
2379 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
2380 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
2381 goto cancel;
2382
2383 if (secy->replay_protect) {
2384 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
2385 goto cancel;
2386 }
2387
2388 nla_nest_end(skb, secy_nest);
2389 return 0;
2390
2391 cancel:
2392 nla_nest_cancel(skb, secy_nest);
2393 return 1;
2394 }
2395
2396 static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
2397 struct sk_buff *skb, struct netlink_callback *cb)
2398 {
2399 struct macsec_rx_sc *rx_sc;
2400 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2401 struct nlattr *txsa_list, *rxsc_list;
2402 int i, j;
2403 void *hdr;
2404 struct nlattr *attr;
2405
2406 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2407 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
2408 if (!hdr)
2409 return -EMSGSIZE;
2410
2411 genl_dump_check_consistent(cb, hdr, &macsec_fam);
2412
2413 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
2414 goto nla_put_failure;
2415
2416 if (nla_put_secy(secy, skb))
2417 goto nla_put_failure;
2418
2419 attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS);
2420 if (!attr)
2421 goto nla_put_failure;
2422 if (copy_tx_sc_stats(skb, tx_sc->stats)) {
2423 nla_nest_cancel(skb, attr);
2424 goto nla_put_failure;
2425 }
2426 nla_nest_end(skb, attr);
2427
2428 attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS);
2429 if (!attr)
2430 goto nla_put_failure;
2431 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
2432 nla_nest_cancel(skb, attr);
2433 goto nla_put_failure;
2434 }
2435 nla_nest_end(skb, attr);
2436
2437 txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST);
2438 if (!txsa_list)
2439 goto nla_put_failure;
2440 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
2441 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
2442 struct nlattr *txsa_nest;
2443
2444 if (!tx_sa)
2445 continue;
2446
2447 txsa_nest = nla_nest_start(skb, j++);
2448 if (!txsa_nest) {
2449 nla_nest_cancel(skb, txsa_list);
2450 goto nla_put_failure;
2451 }
2452
2453 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2454 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
2455 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
2456 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
2457 nla_nest_cancel(skb, txsa_nest);
2458 nla_nest_cancel(skb, txsa_list);
2459 goto nla_put_failure;
2460 }
2461
2462 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
2463 if (!attr) {
2464 nla_nest_cancel(skb, txsa_nest);
2465 nla_nest_cancel(skb, txsa_list);
2466 goto nla_put_failure;
2467 }
2468 if (copy_tx_sa_stats(skb, tx_sa->stats)) {
2469 nla_nest_cancel(skb, attr);
2470 nla_nest_cancel(skb, txsa_nest);
2471 nla_nest_cancel(skb, txsa_list);
2472 goto nla_put_failure;
2473 }
2474 nla_nest_end(skb, attr);
2475
2476 nla_nest_end(skb, txsa_nest);
2477 }
2478 nla_nest_end(skb, txsa_list);
2479
2480 rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST);
2481 if (!rxsc_list)
2482 goto nla_put_failure;
2483
2484 j = 1;
2485 for_each_rxsc_rtnl(secy, rx_sc) {
2486 int k;
2487 struct nlattr *rxsa_list;
2488 struct nlattr *rxsc_nest = nla_nest_start(skb, j++);
2489
2490 if (!rxsc_nest) {
2491 nla_nest_cancel(skb, rxsc_list);
2492 goto nla_put_failure;
2493 }
2494
2495 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
2496 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
2497 MACSEC_RXSC_ATTR_PAD)) {
2498 nla_nest_cancel(skb, rxsc_nest);
2499 nla_nest_cancel(skb, rxsc_list);
2500 goto nla_put_failure;
2501 }
2502
2503 attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS);
2504 if (!attr) {
2505 nla_nest_cancel(skb, rxsc_nest);
2506 nla_nest_cancel(skb, rxsc_list);
2507 goto nla_put_failure;
2508 }
2509 if (copy_rx_sc_stats(skb, rx_sc->stats)) {
2510 nla_nest_cancel(skb, attr);
2511 nla_nest_cancel(skb, rxsc_nest);
2512 nla_nest_cancel(skb, rxsc_list);
2513 goto nla_put_failure;
2514 }
2515 nla_nest_end(skb, attr);
2516
2517 rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST);
2518 if (!rxsa_list) {
2519 nla_nest_cancel(skb, rxsc_nest);
2520 nla_nest_cancel(skb, rxsc_list);
2521 goto nla_put_failure;
2522 }
2523
2524 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
2525 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
2526 struct nlattr *rxsa_nest;
2527
2528 if (!rx_sa)
2529 continue;
2530
2531 rxsa_nest = nla_nest_start(skb, k++);
2532 if (!rxsa_nest) {
2533 nla_nest_cancel(skb, rxsa_list);
2534 nla_nest_cancel(skb, rxsc_nest);
2535 nla_nest_cancel(skb, rxsc_list);
2536 goto nla_put_failure;
2537 }
2538
2539 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
2540 if (!attr) {
2541 nla_nest_cancel(skb, rxsa_list);
2542 nla_nest_cancel(skb, rxsc_nest);
2543 nla_nest_cancel(skb, rxsc_list);
2544 goto nla_put_failure;
2545 }
2546 if (copy_rx_sa_stats(skb, rx_sa->stats)) {
2547 nla_nest_cancel(skb, attr);
2548 nla_nest_cancel(skb, rxsa_list);
2549 nla_nest_cancel(skb, rxsc_nest);
2550 nla_nest_cancel(skb, rxsc_list);
2551 goto nla_put_failure;
2552 }
2553 nla_nest_end(skb, attr);
2554
2555 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2556 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
2557 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
2558 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
2559 nla_nest_cancel(skb, rxsa_nest);
2560 nla_nest_cancel(skb, rxsc_nest);
2561 nla_nest_cancel(skb, rxsc_list);
2562 goto nla_put_failure;
2563 }
2564 nla_nest_end(skb, rxsa_nest);
2565 }
2566
2567 nla_nest_end(skb, rxsa_list);
2568 nla_nest_end(skb, rxsc_nest);
2569 }
2570
2571 nla_nest_end(skb, rxsc_list);
2572
2573 genlmsg_end(skb, hdr);
2574
2575 return 0;
2576
2577 nla_put_failure:
2578 genlmsg_cancel(skb, hdr);
2579 return -EMSGSIZE;
2580 }
2581
2582 static int macsec_generation = 1; /* protected by RTNL */
2583
2584 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
2585 {
2586 struct net *net = sock_net(skb->sk);
2587 struct net_device *dev;
2588 int dev_idx, d;
2589
2590 dev_idx = cb->args[0];
2591
2592 d = 0;
2593 rtnl_lock();
2594
2595 cb->seq = macsec_generation;
2596
2597 for_each_netdev(net, dev) {
2598 struct macsec_secy *secy;
2599
2600 if (d < dev_idx)
2601 goto next;
2602
2603 if (!netif_is_macsec(dev))
2604 goto next;
2605
2606 secy = &macsec_priv(dev)->secy;
2607 if (dump_secy(secy, dev, skb, cb) < 0)
2608 goto done;
2609 next:
2610 d++;
2611 }
2612
2613 done:
2614 rtnl_unlock();
2615 cb->args[0] = d;
2616 return skb->len;
2617 }
2618
2619 static const struct genl_ops macsec_genl_ops[] = {
2620 {
2621 .cmd = MACSEC_CMD_GET_TXSC,
2622 .dumpit = macsec_dump_txsc,
2623 .policy = macsec_genl_policy,
2624 },
2625 {
2626 .cmd = MACSEC_CMD_ADD_RXSC,
2627 .doit = macsec_add_rxsc,
2628 .policy = macsec_genl_policy,
2629 .flags = GENL_ADMIN_PERM,
2630 },
2631 {
2632 .cmd = MACSEC_CMD_DEL_RXSC,
2633 .doit = macsec_del_rxsc,
2634 .policy = macsec_genl_policy,
2635 .flags = GENL_ADMIN_PERM,
2636 },
2637 {
2638 .cmd = MACSEC_CMD_UPD_RXSC,
2639 .doit = macsec_upd_rxsc,
2640 .policy = macsec_genl_policy,
2641 .flags = GENL_ADMIN_PERM,
2642 },
2643 {
2644 .cmd = MACSEC_CMD_ADD_TXSA,
2645 .doit = macsec_add_txsa,
2646 .policy = macsec_genl_policy,
2647 .flags = GENL_ADMIN_PERM,
2648 },
2649 {
2650 .cmd = MACSEC_CMD_DEL_TXSA,
2651 .doit = macsec_del_txsa,
2652 .policy = macsec_genl_policy,
2653 .flags = GENL_ADMIN_PERM,
2654 },
2655 {
2656 .cmd = MACSEC_CMD_UPD_TXSA,
2657 .doit = macsec_upd_txsa,
2658 .policy = macsec_genl_policy,
2659 .flags = GENL_ADMIN_PERM,
2660 },
2661 {
2662 .cmd = MACSEC_CMD_ADD_RXSA,
2663 .doit = macsec_add_rxsa,
2664 .policy = macsec_genl_policy,
2665 .flags = GENL_ADMIN_PERM,
2666 },
2667 {
2668 .cmd = MACSEC_CMD_DEL_RXSA,
2669 .doit = macsec_del_rxsa,
2670 .policy = macsec_genl_policy,
2671 .flags = GENL_ADMIN_PERM,
2672 },
2673 {
2674 .cmd = MACSEC_CMD_UPD_RXSA,
2675 .doit = macsec_upd_rxsa,
2676 .policy = macsec_genl_policy,
2677 .flags = GENL_ADMIN_PERM,
2678 },
2679 };
2680
2681 static struct genl_family macsec_fam __ro_after_init = {
2682 .name = MACSEC_GENL_NAME,
2683 .hdrsize = 0,
2684 .version = MACSEC_GENL_VERSION,
2685 .maxattr = MACSEC_ATTR_MAX,
2686 .netnsok = true,
2687 .module = THIS_MODULE,
2688 .ops = macsec_genl_ops,
2689 .n_ops = ARRAY_SIZE(macsec_genl_ops),
2690 };
2691
2692 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
2693 struct net_device *dev)
2694 {
2695 struct macsec_dev *macsec = netdev_priv(dev);
2696 struct macsec_secy *secy = &macsec->secy;
2697 struct pcpu_secy_stats *secy_stats;
2698 int ret, len;
2699
2700 /* 10.5 */
2701 if (!secy->protect_frames) {
2702 secy_stats = this_cpu_ptr(macsec->stats);
2703 u64_stats_update_begin(&secy_stats->syncp);
2704 secy_stats->stats.OutPktsUntagged++;
2705 u64_stats_update_end(&secy_stats->syncp);
2706 skb->dev = macsec->real_dev;
2707 len = skb->len;
2708 ret = dev_queue_xmit(skb);
2709 count_tx(dev, ret, len);
2710 return ret;
2711 }
2712
2713 if (!secy->operational) {
2714 kfree_skb(skb);
2715 dev->stats.tx_dropped++;
2716 return NETDEV_TX_OK;
2717 }
2718
2719 skb = macsec_encrypt(skb, dev);
2720 if (IS_ERR(skb)) {
2721 if (PTR_ERR(skb) != -EINPROGRESS)
2722 dev->stats.tx_dropped++;
2723 return NETDEV_TX_OK;
2724 }
2725
2726 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
2727
2728 macsec_encrypt_finish(skb, dev);
2729 len = skb->len;
2730 ret = dev_queue_xmit(skb);
2731 count_tx(dev, ret, len);
2732 return ret;
2733 }
2734
2735 #define MACSEC_FEATURES \
2736 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2737 static struct lock_class_key macsec_netdev_addr_lock_key;
2738
2739 static int macsec_dev_init(struct net_device *dev)
2740 {
2741 struct macsec_dev *macsec = macsec_priv(dev);
2742 struct net_device *real_dev = macsec->real_dev;
2743 int err;
2744
2745 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2746 if (!dev->tstats)
2747 return -ENOMEM;
2748
2749 err = gro_cells_init(&macsec->gro_cells, dev);
2750 if (err) {
2751 free_percpu(dev->tstats);
2752 return err;
2753 }
2754
2755 dev->features = real_dev->features & MACSEC_FEATURES;
2756 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
2757
2758 dev->needed_headroom = real_dev->needed_headroom +
2759 MACSEC_NEEDED_HEADROOM;
2760 dev->needed_tailroom = real_dev->needed_tailroom +
2761 MACSEC_NEEDED_TAILROOM;
2762
2763 if (is_zero_ether_addr(dev->dev_addr))
2764 eth_hw_addr_inherit(dev, real_dev);
2765 if (is_zero_ether_addr(dev->broadcast))
2766 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
2767
2768 return 0;
2769 }
2770
2771 static void macsec_dev_uninit(struct net_device *dev)
2772 {
2773 struct macsec_dev *macsec = macsec_priv(dev);
2774
2775 gro_cells_destroy(&macsec->gro_cells);
2776 free_percpu(dev->tstats);
2777 }
2778
2779 static netdev_features_t macsec_fix_features(struct net_device *dev,
2780 netdev_features_t features)
2781 {
2782 struct macsec_dev *macsec = macsec_priv(dev);
2783 struct net_device *real_dev = macsec->real_dev;
2784
2785 features &= (real_dev->features & MACSEC_FEATURES) |
2786 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
2787 features |= NETIF_F_LLTX;
2788
2789 return features;
2790 }
2791
2792 static int macsec_dev_open(struct net_device *dev)
2793 {
2794 struct macsec_dev *macsec = macsec_priv(dev);
2795 struct net_device *real_dev = macsec->real_dev;
2796 int err;
2797
2798 if (!(real_dev->flags & IFF_UP))
2799 return -ENETDOWN;
2800
2801 err = dev_uc_add(real_dev, dev->dev_addr);
2802 if (err < 0)
2803 return err;
2804
2805 if (dev->flags & IFF_ALLMULTI) {
2806 err = dev_set_allmulti(real_dev, 1);
2807 if (err < 0)
2808 goto del_unicast;
2809 }
2810
2811 if (dev->flags & IFF_PROMISC) {
2812 err = dev_set_promiscuity(real_dev, 1);
2813 if (err < 0)
2814 goto clear_allmulti;
2815 }
2816
2817 if (netif_carrier_ok(real_dev))
2818 netif_carrier_on(dev);
2819
2820 return 0;
2821 clear_allmulti:
2822 if (dev->flags & IFF_ALLMULTI)
2823 dev_set_allmulti(real_dev, -1);
2824 del_unicast:
2825 dev_uc_del(real_dev, dev->dev_addr);
2826 netif_carrier_off(dev);
2827 return err;
2828 }
2829
2830 static int macsec_dev_stop(struct net_device *dev)
2831 {
2832 struct macsec_dev *macsec = macsec_priv(dev);
2833 struct net_device *real_dev = macsec->real_dev;
2834
2835 netif_carrier_off(dev);
2836
2837 dev_mc_unsync(real_dev, dev);
2838 dev_uc_unsync(real_dev, dev);
2839
2840 if (dev->flags & IFF_ALLMULTI)
2841 dev_set_allmulti(real_dev, -1);
2842
2843 if (dev->flags & IFF_PROMISC)
2844 dev_set_promiscuity(real_dev, -1);
2845
2846 dev_uc_del(real_dev, dev->dev_addr);
2847
2848 return 0;
2849 }
2850
2851 static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
2852 {
2853 struct net_device *real_dev = macsec_priv(dev)->real_dev;
2854
2855 if (!(dev->flags & IFF_UP))
2856 return;
2857
2858 if (change & IFF_ALLMULTI)
2859 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
2860
2861 if (change & IFF_PROMISC)
2862 dev_set_promiscuity(real_dev,
2863 dev->flags & IFF_PROMISC ? 1 : -1);
2864 }
2865
2866 static void macsec_dev_set_rx_mode(struct net_device *dev)
2867 {
2868 struct net_device *real_dev = macsec_priv(dev)->real_dev;
2869
2870 dev_mc_sync(real_dev, dev);
2871 dev_uc_sync(real_dev, dev);
2872 }
2873
2874 static int macsec_set_mac_address(struct net_device *dev, void *p)
2875 {
2876 struct macsec_dev *macsec = macsec_priv(dev);
2877 struct net_device *real_dev = macsec->real_dev;
2878 struct sockaddr *addr = p;
2879 int err;
2880
2881 if (!is_valid_ether_addr(addr->sa_data))
2882 return -EADDRNOTAVAIL;
2883
2884 if (!(dev->flags & IFF_UP))
2885 goto out;
2886
2887 err = dev_uc_add(real_dev, addr->sa_data);
2888 if (err < 0)
2889 return err;
2890
2891 dev_uc_del(real_dev, dev->dev_addr);
2892
2893 out:
2894 ether_addr_copy(dev->dev_addr, addr->sa_data);
2895 return 0;
2896 }
2897
2898 static int macsec_change_mtu(struct net_device *dev, int new_mtu)
2899 {
2900 struct macsec_dev *macsec = macsec_priv(dev);
2901 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
2902
2903 if (macsec->real_dev->mtu - extra < new_mtu)
2904 return -ERANGE;
2905
2906 dev->mtu = new_mtu;
2907
2908 return 0;
2909 }
2910
2911 static void macsec_get_stats64(struct net_device *dev,
2912 struct rtnl_link_stats64 *s)
2913 {
2914 int cpu;
2915
2916 if (!dev->tstats)
2917 return;
2918
2919 for_each_possible_cpu(cpu) {
2920 struct pcpu_sw_netstats *stats;
2921 struct pcpu_sw_netstats tmp;
2922 int start;
2923
2924 stats = per_cpu_ptr(dev->tstats, cpu);
2925 do {
2926 start = u64_stats_fetch_begin_irq(&stats->syncp);
2927 tmp.rx_packets = stats->rx_packets;
2928 tmp.rx_bytes = stats->rx_bytes;
2929 tmp.tx_packets = stats->tx_packets;
2930 tmp.tx_bytes = stats->tx_bytes;
2931 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2932
2933 s->rx_packets += tmp.rx_packets;
2934 s->rx_bytes += tmp.rx_bytes;
2935 s->tx_packets += tmp.tx_packets;
2936 s->tx_bytes += tmp.tx_bytes;
2937 }
2938
2939 s->rx_dropped = dev->stats.rx_dropped;
2940 s->tx_dropped = dev->stats.tx_dropped;
2941 }
2942
2943 static int macsec_get_iflink(const struct net_device *dev)
2944 {
2945 return macsec_priv(dev)->real_dev->ifindex;
2946 }
2947
2948
2949 static int macsec_get_nest_level(struct net_device *dev)
2950 {
2951 return macsec_priv(dev)->nest_level;
2952 }
2953
2954
2955 static const struct net_device_ops macsec_netdev_ops = {
2956 .ndo_init = macsec_dev_init,
2957 .ndo_uninit = macsec_dev_uninit,
2958 .ndo_open = macsec_dev_open,
2959 .ndo_stop = macsec_dev_stop,
2960 .ndo_fix_features = macsec_fix_features,
2961 .ndo_change_mtu = macsec_change_mtu,
2962 .ndo_set_rx_mode = macsec_dev_set_rx_mode,
2963 .ndo_change_rx_flags = macsec_dev_change_rx_flags,
2964 .ndo_set_mac_address = macsec_set_mac_address,
2965 .ndo_start_xmit = macsec_start_xmit,
2966 .ndo_get_stats64 = macsec_get_stats64,
2967 .ndo_get_iflink = macsec_get_iflink,
2968 .ndo_get_lock_subclass = macsec_get_nest_level,
2969 };
2970
2971 static const struct device_type macsec_type = {
2972 .name = "macsec",
2973 };
2974
2975 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
2976 [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
2977 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
2978 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
2979 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
2980 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
2981 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
2982 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
2983 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
2984 [IFLA_MACSEC_ES] = { .type = NLA_U8 },
2985 [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
2986 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
2987 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
2988 };
2989
2990 static void macsec_free_netdev(struct net_device *dev)
2991 {
2992 struct macsec_dev *macsec = macsec_priv(dev);
2993 struct net_device *real_dev = macsec->real_dev;
2994
2995 free_percpu(macsec->stats);
2996 free_percpu(macsec->secy.tx_sc.stats);
2997
2998 dev_put(real_dev);
2999 free_netdev(dev);
3000 }
3001
3002 static void macsec_setup(struct net_device *dev)
3003 {
3004 ether_setup(dev);
3005 dev->min_mtu = 0;
3006 dev->max_mtu = ETH_MAX_MTU;
3007 dev->priv_flags |= IFF_NO_QUEUE;
3008 dev->netdev_ops = &macsec_netdev_ops;
3009 dev->destructor = macsec_free_netdev;
3010 SET_NETDEV_DEVTYPE(dev, &macsec_type);
3011
3012 eth_zero_addr(dev->broadcast);
3013 }
3014
3015 static void macsec_changelink_common(struct net_device *dev,
3016 struct nlattr *data[])
3017 {
3018 struct macsec_secy *secy;
3019 struct macsec_tx_sc *tx_sc;
3020
3021 secy = &macsec_priv(dev)->secy;
3022 tx_sc = &secy->tx_sc;
3023
3024 if (data[IFLA_MACSEC_ENCODING_SA]) {
3025 struct macsec_tx_sa *tx_sa;
3026
3027 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3028 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3029
3030 secy->operational = tx_sa && tx_sa->active;
3031 }
3032
3033 if (data[IFLA_MACSEC_WINDOW])
3034 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3035
3036 if (data[IFLA_MACSEC_ENCRYPT])
3037 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3038
3039 if (data[IFLA_MACSEC_PROTECT])
3040 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3041
3042 if (data[IFLA_MACSEC_INC_SCI])
3043 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3044
3045 if (data[IFLA_MACSEC_ES])
3046 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3047
3048 if (data[IFLA_MACSEC_SCB])
3049 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3050
3051 if (data[IFLA_MACSEC_REPLAY_PROTECT])
3052 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3053
3054 if (data[IFLA_MACSEC_VALIDATION])
3055 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3056 }
3057
3058 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3059 struct nlattr *data[])
3060 {
3061 if (!data)
3062 return 0;
3063
3064 if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3065 data[IFLA_MACSEC_ICV_LEN] ||
3066 data[IFLA_MACSEC_SCI] ||
3067 data[IFLA_MACSEC_PORT])
3068 return -EINVAL;
3069
3070 macsec_changelink_common(dev, data);
3071
3072 return 0;
3073 }
3074
3075 static void macsec_del_dev(struct macsec_dev *macsec)
3076 {
3077 int i;
3078
3079 while (macsec->secy.rx_sc) {
3080 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3081
3082 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3083 free_rx_sc(rx_sc);
3084 }
3085
3086 for (i = 0; i < MACSEC_NUM_AN; i++) {
3087 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3088
3089 if (sa) {
3090 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3091 clear_tx_sa(sa);
3092 }
3093 }
3094 }
3095
3096 static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3097 {
3098 struct macsec_dev *macsec = macsec_priv(dev);
3099 struct net_device *real_dev = macsec->real_dev;
3100
3101 unregister_netdevice_queue(dev, head);
3102 list_del_rcu(&macsec->secys);
3103 macsec_del_dev(macsec);
3104 netdev_upper_dev_unlink(real_dev, dev);
3105
3106 macsec_generation++;
3107 }
3108
3109 static void macsec_dellink(struct net_device *dev, struct list_head *head)
3110 {
3111 struct macsec_dev *macsec = macsec_priv(dev);
3112 struct net_device *real_dev = macsec->real_dev;
3113 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3114
3115 macsec_common_dellink(dev, head);
3116
3117 if (list_empty(&rxd->secys)) {
3118 netdev_rx_handler_unregister(real_dev);
3119 kfree(rxd);
3120 }
3121 }
3122
3123 static int register_macsec_dev(struct net_device *real_dev,
3124 struct net_device *dev)
3125 {
3126 struct macsec_dev *macsec = macsec_priv(dev);
3127 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3128
3129 if (!rxd) {
3130 int err;
3131
3132 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3133 if (!rxd)
3134 return -ENOMEM;
3135
3136 INIT_LIST_HEAD(&rxd->secys);
3137
3138 err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3139 rxd);
3140 if (err < 0) {
3141 kfree(rxd);
3142 return err;
3143 }
3144 }
3145
3146 list_add_tail_rcu(&macsec->secys, &rxd->secys);
3147 return 0;
3148 }
3149
3150 static bool sci_exists(struct net_device *dev, sci_t sci)
3151 {
3152 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3153 struct macsec_dev *macsec;
3154
3155 list_for_each_entry(macsec, &rxd->secys, secys) {
3156 if (macsec->secy.sci == sci)
3157 return true;
3158 }
3159
3160 return false;
3161 }
3162
3163 static sci_t dev_to_sci(struct net_device *dev, __be16 port)
3164 {
3165 return make_sci(dev->dev_addr, port);
3166 }
3167
3168 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3169 {
3170 struct macsec_dev *macsec = macsec_priv(dev);
3171 struct macsec_secy *secy = &macsec->secy;
3172
3173 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3174 if (!macsec->stats)
3175 return -ENOMEM;
3176
3177 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3178 if (!secy->tx_sc.stats) {
3179 free_percpu(macsec->stats);
3180 return -ENOMEM;
3181 }
3182
3183 if (sci == MACSEC_UNDEF_SCI)
3184 sci = dev_to_sci(dev, MACSEC_PORT_ES);
3185
3186 secy->netdev = dev;
3187 secy->operational = true;
3188 secy->key_len = DEFAULT_SAK_LEN;
3189 secy->icv_len = icv_len;
3190 secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
3191 secy->protect_frames = true;
3192 secy->replay_protect = false;
3193
3194 secy->sci = sci;
3195 secy->tx_sc.active = true;
3196 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
3197 secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
3198 secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
3199 secy->tx_sc.end_station = false;
3200 secy->tx_sc.scb = false;
3201
3202 return 0;
3203 }
3204
3205 static int macsec_newlink(struct net *net, struct net_device *dev,
3206 struct nlattr *tb[], struct nlattr *data[])
3207 {
3208 struct macsec_dev *macsec = macsec_priv(dev);
3209 struct net_device *real_dev;
3210 int err;
3211 sci_t sci;
3212 u8 icv_len = DEFAULT_ICV_LEN;
3213 rx_handler_func_t *rx_handler;
3214
3215 if (!tb[IFLA_LINK])
3216 return -EINVAL;
3217 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
3218 if (!real_dev)
3219 return -ENODEV;
3220
3221 dev->priv_flags |= IFF_MACSEC;
3222
3223 macsec->real_dev = real_dev;
3224
3225 if (data && data[IFLA_MACSEC_ICV_LEN])
3226 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
3227 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
3228
3229 rx_handler = rtnl_dereference(real_dev->rx_handler);
3230 if (rx_handler && rx_handler != macsec_handle_frame)
3231 return -EBUSY;
3232
3233 err = register_netdevice(dev);
3234 if (err < 0)
3235 return err;
3236
3237 dev_hold(real_dev);
3238
3239 macsec->nest_level = dev_get_nest_level(real_dev) + 1;
3240 netdev_lockdep_set_classes(dev);
3241 lockdep_set_class_and_subclass(&dev->addr_list_lock,
3242 &macsec_netdev_addr_lock_key,
3243 macsec_get_nest_level(dev));
3244
3245 err = netdev_upper_dev_link(real_dev, dev);
3246 if (err < 0)
3247 goto unregister;
3248
3249 /* need to be already registered so that ->init has run and
3250 * the MAC addr is set
3251 */
3252 if (data && data[IFLA_MACSEC_SCI])
3253 sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
3254 else if (data && data[IFLA_MACSEC_PORT])
3255 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
3256 else
3257 sci = dev_to_sci(dev, MACSEC_PORT_ES);
3258
3259 if (rx_handler && sci_exists(real_dev, sci)) {
3260 err = -EBUSY;
3261 goto unlink;
3262 }
3263
3264 err = macsec_add_dev(dev, sci, icv_len);
3265 if (err)
3266 goto unlink;
3267
3268 if (data)
3269 macsec_changelink_common(dev, data);
3270
3271 err = register_macsec_dev(real_dev, dev);
3272 if (err < 0)
3273 goto del_dev;
3274
3275 macsec_generation++;
3276
3277 return 0;
3278
3279 del_dev:
3280 macsec_del_dev(macsec);
3281 unlink:
3282 netdev_upper_dev_unlink(real_dev, dev);
3283 unregister:
3284 unregister_netdevice(dev);
3285 return err;
3286 }
3287
3288 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
3289 {
3290 u64 csid = MACSEC_DEFAULT_CIPHER_ID;
3291 u8 icv_len = DEFAULT_ICV_LEN;
3292 int flag;
3293 bool es, scb, sci;
3294
3295 if (!data)
3296 return 0;
3297
3298 if (data[IFLA_MACSEC_CIPHER_SUITE])
3299 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
3300
3301 if (data[IFLA_MACSEC_ICV_LEN]) {
3302 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
3303 if (icv_len != DEFAULT_ICV_LEN) {
3304 char dummy_key[DEFAULT_SAK_LEN] = { 0 };
3305 struct crypto_aead *dummy_tfm;
3306
3307 dummy_tfm = macsec_alloc_tfm(dummy_key,
3308 DEFAULT_SAK_LEN,
3309 icv_len);
3310 if (IS_ERR(dummy_tfm))
3311 return PTR_ERR(dummy_tfm);
3312 crypto_free_aead(dummy_tfm);
3313 }
3314 }
3315
3316 switch (csid) {
3317 case MACSEC_DEFAULT_CIPHER_ID:
3318 case MACSEC_DEFAULT_CIPHER_ALT:
3319 if (icv_len < MACSEC_MIN_ICV_LEN ||
3320 icv_len > MACSEC_STD_ICV_LEN)
3321 return -EINVAL;
3322 break;
3323 default:
3324 return -EINVAL;
3325 }
3326
3327 if (data[IFLA_MACSEC_ENCODING_SA]) {
3328 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
3329 return -EINVAL;
3330 }
3331
3332 for (flag = IFLA_MACSEC_ENCODING_SA + 1;
3333 flag < IFLA_MACSEC_VALIDATION;
3334 flag++) {
3335 if (data[flag]) {
3336 if (nla_get_u8(data[flag]) > 1)
3337 return -EINVAL;
3338 }
3339 }
3340
3341 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
3342 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
3343 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
3344
3345 if ((sci && (scb || es)) || (scb && es))
3346 return -EINVAL;
3347
3348 if (data[IFLA_MACSEC_VALIDATION] &&
3349 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
3350 return -EINVAL;
3351
3352 if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
3353 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
3354 !data[IFLA_MACSEC_WINDOW])
3355 return -EINVAL;
3356
3357 return 0;
3358 }
3359
3360 static struct net *macsec_get_link_net(const struct net_device *dev)
3361 {
3362 return dev_net(macsec_priv(dev)->real_dev);
3363 }
3364
3365 static size_t macsec_get_size(const struct net_device *dev)
3366 {
3367 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
3368 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
3369 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
3370 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
3371 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
3372 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
3373 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
3374 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
3375 nla_total_size(1) + /* IFLA_MACSEC_ES */
3376 nla_total_size(1) + /* IFLA_MACSEC_SCB */
3377 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
3378 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
3379 0;
3380 }
3381
3382 static int macsec_fill_info(struct sk_buff *skb,
3383 const struct net_device *dev)
3384 {
3385 struct macsec_secy *secy = &macsec_priv(dev)->secy;
3386 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3387
3388 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
3389 IFLA_MACSEC_PAD) ||
3390 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
3391 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
3392 MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
3393 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
3394 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
3395 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
3396 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
3397 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
3398 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
3399 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
3400 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
3401 0)
3402 goto nla_put_failure;
3403
3404 if (secy->replay_protect) {
3405 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
3406 goto nla_put_failure;
3407 }
3408
3409 return 0;
3410
3411 nla_put_failure:
3412 return -EMSGSIZE;
3413 }
3414
3415 static struct rtnl_link_ops macsec_link_ops __read_mostly = {
3416 .kind = "macsec",
3417 .priv_size = sizeof(struct macsec_dev),
3418 .maxtype = IFLA_MACSEC_MAX,
3419 .policy = macsec_rtnl_policy,
3420 .setup = macsec_setup,
3421 .validate = macsec_validate_attr,
3422 .newlink = macsec_newlink,
3423 .changelink = macsec_changelink,
3424 .dellink = macsec_dellink,
3425 .get_size = macsec_get_size,
3426 .fill_info = macsec_fill_info,
3427 .get_link_net = macsec_get_link_net,
3428 };
3429
3430 static bool is_macsec_master(struct net_device *dev)
3431 {
3432 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
3433 }
3434
3435 static int macsec_notify(struct notifier_block *this, unsigned long event,
3436 void *ptr)
3437 {
3438 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
3439 LIST_HEAD(head);
3440
3441 if (!is_macsec_master(real_dev))
3442 return NOTIFY_DONE;
3443
3444 switch (event) {
3445 case NETDEV_UNREGISTER: {
3446 struct macsec_dev *m, *n;
3447 struct macsec_rxh_data *rxd;
3448
3449 rxd = macsec_data_rtnl(real_dev);
3450 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
3451 macsec_common_dellink(m->secy.netdev, &head);
3452 }
3453
3454 netdev_rx_handler_unregister(real_dev);
3455 kfree(rxd);
3456
3457 unregister_netdevice_many(&head);
3458 break;
3459 }
3460 case NETDEV_CHANGEMTU: {
3461 struct macsec_dev *m;
3462 struct macsec_rxh_data *rxd;
3463
3464 rxd = macsec_data_rtnl(real_dev);
3465 list_for_each_entry(m, &rxd->secys, secys) {
3466 struct net_device *dev = m->secy.netdev;
3467 unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
3468 macsec_extra_len(true));
3469
3470 if (dev->mtu > mtu)
3471 dev_set_mtu(dev, mtu);
3472 }
3473 }
3474 }
3475
3476 return NOTIFY_OK;
3477 }
3478
3479 static struct notifier_block macsec_notifier = {
3480 .notifier_call = macsec_notify,
3481 };
3482
3483 static int __init macsec_init(void)
3484 {
3485 int err;
3486
3487 pr_info("MACsec IEEE 802.1AE\n");
3488 err = register_netdevice_notifier(&macsec_notifier);
3489 if (err)
3490 return err;
3491
3492 err = rtnl_link_register(&macsec_link_ops);
3493 if (err)
3494 goto notifier;
3495
3496 err = genl_register_family(&macsec_fam);
3497 if (err)
3498 goto rtnl;
3499
3500 return 0;
3501
3502 rtnl:
3503 rtnl_link_unregister(&macsec_link_ops);
3504 notifier:
3505 unregister_netdevice_notifier(&macsec_notifier);
3506 return err;
3507 }
3508
3509 static void __exit macsec_exit(void)
3510 {
3511 genl_unregister_family(&macsec_fam);
3512 rtnl_link_unregister(&macsec_link_ops);
3513 unregister_netdevice_notifier(&macsec_notifier);
3514 rcu_barrier();
3515 }
3516
3517 module_init(macsec_init);
3518 module_exit(macsec_exit);
3519
3520 MODULE_ALIAS_RTNL_LINK("macsec");
3521
3522 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3523 MODULE_LICENSE("GPL v2");