]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - net/sched/sch_taprio.c
Merge branches 'acpi-apei', 'acpi-processor', 'acpi-tables', 'acpi-pci' and 'acpi...
[mirror_ubuntu-focal-kernel.git] / net / sched / sch_taprio.c
CommitLineData
5a781ccb
VCG
1// SPDX-License-Identifier: GPL-2.0
2
3/* net/sched/sch_taprio.c Time Aware Priority Scheduler
4 *
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
6 *
7 */
8
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/list.h>
14#include <linux/errno.h>
15#include <linux/skbuff.h>
23bddf69 16#include <linux/math64.h>
5a781ccb
VCG
17#include <linux/module.h>
18#include <linux/spinlock.h>
a3d43c0d 19#include <linux/rcupdate.h>
5a781ccb
VCG
20#include <net/netlink.h>
21#include <net/pkt_sched.h>
22#include <net/pkt_cls.h>
23#include <net/sch_generic.h>
4cfd5779 24#include <net/sock.h>
54002066 25#include <net/tcp.h>
5a781ccb 26
7b9eba7b
LD
27static LIST_HEAD(taprio_list);
28static DEFINE_SPINLOCK(taprio_list_lock);
29
5a781ccb
VCG
30#define TAPRIO_ALL_GATES_OPEN -1
31
4cfd5779
VP
32#define FLAGS_VALID(flags) (!((flags) & ~TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST))
33#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
34
5a781ccb
VCG
35struct sched_entry {
36 struct list_head list;
37
38 /* The instant that this entry "closes" and the next one
39 * should open, the qdisc will make some effort so that no
40 * packet leaves after this time.
41 */
42 ktime_t close_time;
4cfd5779 43 ktime_t next_txtime;
5a781ccb
VCG
44 atomic_t budget;
45 int index;
46 u32 gate_mask;
47 u32 interval;
48 u8 command;
49};
50
a3d43c0d
VCG
51struct sched_gate_list {
52 struct rcu_head rcu;
53 struct list_head entries;
54 size_t num_entries;
6ca6a665
VCG
55 ktime_t cycle_close_time;
56 s64 cycle_time;
c25031e9 57 s64 cycle_time_extension;
a3d43c0d
VCG
58 s64 base_time;
59};
60
5a781ccb
VCG
61struct taprio_sched {
62 struct Qdisc **qdiscs;
63 struct Qdisc *root;
4cfd5779 64 u32 flags;
7ede7b03 65 enum tk_offsets tk_offset;
5a781ccb 66 int clockid;
7b9eba7b
LD
67 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
68 * speeds it's sub-nanoseconds per byte
69 */
5a781ccb
VCG
70
71 /* Protects the update side of the RCU protected current_entry */
72 spinlock_t current_entry_lock;
73 struct sched_entry __rcu *current_entry;
a3d43c0d
VCG
74 struct sched_gate_list __rcu *oper_sched;
75 struct sched_gate_list __rcu *admin_sched;
5a781ccb 76 struct hrtimer advance_timer;
7b9eba7b 77 struct list_head taprio_list;
a5b64700 78 u32 txtime_delay;
5a781ccb
VCG
79};
80
a3d43c0d
VCG
81static ktime_t sched_base_time(const struct sched_gate_list *sched)
82{
83 if (!sched)
84 return KTIME_MAX;
85
86 return ns_to_ktime(sched->base_time);
87}
88
7ede7b03
VP
89static ktime_t taprio_get_time(struct taprio_sched *q)
90{
91 ktime_t mono = ktime_get();
92
93 switch (q->tk_offset) {
94 case TK_OFFS_MAX:
95 return mono;
96 default:
97 return ktime_mono_to_any(mono, q->tk_offset);
98 }
99
100 return KTIME_MAX;
101}
102
a3d43c0d
VCG
103static void taprio_free_sched_cb(struct rcu_head *head)
104{
105 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
106 struct sched_entry *entry, *n;
107
108 if (!sched)
109 return;
110
111 list_for_each_entry_safe(entry, n, &sched->entries, list) {
112 list_del(&entry->list);
113 kfree(entry);
114 }
115
116 kfree(sched);
117}
118
119static void switch_schedules(struct taprio_sched *q,
120 struct sched_gate_list **admin,
121 struct sched_gate_list **oper)
122{
123 rcu_assign_pointer(q->oper_sched, *admin);
124 rcu_assign_pointer(q->admin_sched, NULL);
125
126 if (*oper)
127 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
128
129 *oper = *admin;
130 *admin = NULL;
131}
132
4cfd5779
VP
133/* Get how much time has been already elapsed in the current cycle. */
134static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
135{
136 ktime_t time_since_sched_start;
137 s32 time_elapsed;
138
139 time_since_sched_start = ktime_sub(time, sched->base_time);
140 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
141
142 return time_elapsed;
143}
144
145static ktime_t get_interval_end_time(struct sched_gate_list *sched,
146 struct sched_gate_list *admin,
147 struct sched_entry *entry,
148 ktime_t intv_start)
149{
150 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
151 ktime_t intv_end, cycle_ext_end, cycle_end;
152
153 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
154 intv_end = ktime_add_ns(intv_start, entry->interval);
155 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
156
157 if (ktime_before(intv_end, cycle_end))
158 return intv_end;
159 else if (admin && admin != sched &&
160 ktime_after(admin->base_time, cycle_end) &&
161 ktime_before(admin->base_time, cycle_ext_end))
162 return admin->base_time;
163 else
164 return cycle_end;
165}
166
167static int length_to_duration(struct taprio_sched *q, int len)
168{
169 return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
170}
171
172/* Returns the entry corresponding to next available interval. If
173 * validate_interval is set, it only validates whether the timestamp occurs
174 * when the gate corresponding to the skb's traffic class is open.
175 */
176static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
177 struct Qdisc *sch,
178 struct sched_gate_list *sched,
179 struct sched_gate_list *admin,
180 ktime_t time,
181 ktime_t *interval_start,
182 ktime_t *interval_end,
183 bool validate_interval)
184{
185 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
186 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
187 struct sched_entry *entry = NULL, *entry_found = NULL;
188 struct taprio_sched *q = qdisc_priv(sch);
189 struct net_device *dev = qdisc_dev(sch);
190 bool entry_available = false;
191 s32 cycle_elapsed;
192 int tc, n;
193
194 tc = netdev_get_prio_tc_map(dev, skb->priority);
195 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
196
197 *interval_start = 0;
198 *interval_end = 0;
199
200 if (!sched)
201 return NULL;
202
203 cycle = sched->cycle_time;
204 cycle_elapsed = get_cycle_time_elapsed(sched, time);
205 curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
206 cycle_end = ktime_add_ns(curr_intv_end, cycle);
207
208 list_for_each_entry(entry, &sched->entries, list) {
209 curr_intv_start = curr_intv_end;
210 curr_intv_end = get_interval_end_time(sched, admin, entry,
211 curr_intv_start);
212
213 if (ktime_after(curr_intv_start, cycle_end))
214 break;
215
216 if (!(entry->gate_mask & BIT(tc)) ||
217 packet_transmit_time > entry->interval)
218 continue;
219
220 txtime = entry->next_txtime;
221
222 if (ktime_before(txtime, time) || validate_interval) {
223 transmit_end_time = ktime_add_ns(time, packet_transmit_time);
224 if ((ktime_before(curr_intv_start, time) &&
225 ktime_before(transmit_end_time, curr_intv_end)) ||
226 (ktime_after(curr_intv_start, time) && !validate_interval)) {
227 entry_found = entry;
228 *interval_start = curr_intv_start;
229 *interval_end = curr_intv_end;
230 break;
231 } else if (!entry_available && !validate_interval) {
232 /* Here, we are just trying to find out the
233 * first available interval in the next cycle.
234 */
235 entry_available = 1;
236 entry_found = entry;
237 *interval_start = ktime_add_ns(curr_intv_start, cycle);
238 *interval_end = ktime_add_ns(curr_intv_end, cycle);
239 }
240 } else if (ktime_before(txtime, earliest_txtime) &&
241 !entry_available) {
242 earliest_txtime = txtime;
243 entry_found = entry;
244 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
245 *interval_start = ktime_add(curr_intv_start, n * cycle);
246 *interval_end = ktime_add(curr_intv_end, n * cycle);
247 }
248 }
249
250 return entry_found;
251}
252
253static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
254{
255 struct taprio_sched *q = qdisc_priv(sch);
256 struct sched_gate_list *sched, *admin;
257 ktime_t interval_start, interval_end;
258 struct sched_entry *entry;
259
260 rcu_read_lock();
261 sched = rcu_dereference(q->oper_sched);
262 admin = rcu_dereference(q->admin_sched);
263
264 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
265 &interval_start, &interval_end, true);
266 rcu_read_unlock();
267
268 return entry;
269}
270
54002066
VP
271/* This returns the tstamp value set by TCP in terms of the set clock. */
272static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
273{
274 unsigned int offset = skb_network_offset(skb);
275 const struct ipv6hdr *ipv6h;
276 const struct iphdr *iph;
277 struct ipv6hdr _ipv6h;
278
279 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
280 if (!ipv6h)
281 return 0;
282
283 if (ipv6h->version == 4) {
284 iph = (struct iphdr *)ipv6h;
285 offset += iph->ihl * 4;
286
287 /* special-case 6in4 tunnelling, as that is a common way to get
288 * v6 connectivity in the home
289 */
290 if (iph->protocol == IPPROTO_IPV6) {
291 ipv6h = skb_header_pointer(skb, offset,
292 sizeof(_ipv6h), &_ipv6h);
293
294 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
295 return 0;
296 } else if (iph->protocol != IPPROTO_TCP) {
297 return 0;
298 }
299 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
300 return 0;
301 }
302
303 return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset);
304}
305
4cfd5779
VP
306/* There are a few scenarios where we will have to modify the txtime from
307 * what is read from next_txtime in sched_entry. They are:
308 * 1. If txtime is in the past,
309 * a. The gate for the traffic class is currently open and packet can be
310 * transmitted before it closes, schedule the packet right away.
311 * b. If the gate corresponding to the traffic class is going to open later
312 * in the cycle, set the txtime of packet to the interval start.
313 * 2. If txtime is in the future, there are packets corresponding to the
314 * current traffic class waiting to be transmitted. So, the following
315 * possibilities exist:
316 * a. We can transmit the packet before the window containing the txtime
317 * closes.
318 * b. The window might close before the transmission can be completed
319 * successfully. So, schedule the packet in the next open window.
320 */
321static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
322{
54002066 323 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
4cfd5779
VP
324 struct taprio_sched *q = qdisc_priv(sch);
325 struct sched_gate_list *sched, *admin;
326 ktime_t minimum_time, now, txtime;
327 int len, packet_transmit_time;
328 struct sched_entry *entry;
329 bool sched_changed;
330
7ede7b03 331 now = taprio_get_time(q);
4cfd5779
VP
332 minimum_time = ktime_add_ns(now, q->txtime_delay);
333
54002066
VP
334 tcp_tstamp = get_tcp_tstamp(q, skb);
335 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
336
4cfd5779
VP
337 rcu_read_lock();
338 admin = rcu_dereference(q->admin_sched);
339 sched = rcu_dereference(q->oper_sched);
340 if (admin && ktime_after(minimum_time, admin->base_time))
341 switch_schedules(q, &admin, &sched);
342
343 /* Until the schedule starts, all the queues are open */
344 if (!sched || ktime_before(minimum_time, sched->base_time)) {
345 txtime = minimum_time;
346 goto done;
347 }
348
349 len = qdisc_pkt_len(skb);
350 packet_transmit_time = length_to_duration(q, len);
351
352 do {
353 sched_changed = 0;
354
355 entry = find_entry_to_transmit(skb, sch, sched, admin,
356 minimum_time,
357 &interval_start, &interval_end,
358 false);
359 if (!entry) {
360 txtime = 0;
361 goto done;
362 }
363
364 txtime = entry->next_txtime;
365 txtime = max_t(ktime_t, txtime, minimum_time);
366 txtime = max_t(ktime_t, txtime, interval_start);
367
368 if (admin && admin != sched &&
369 ktime_after(txtime, admin->base_time)) {
370 sched = admin;
371 sched_changed = 1;
372 continue;
373 }
374
375 transmit_end_time = ktime_add(txtime, packet_transmit_time);
376 minimum_time = transmit_end_time;
377
378 /* Update the txtime of current entry to the next time it's
379 * interval starts.
380 */
381 if (ktime_after(transmit_end_time, interval_end))
382 entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
383 } while (sched_changed || ktime_after(transmit_end_time, interval_end));
384
385 entry->next_txtime = transmit_end_time;
386
387done:
388 rcu_read_unlock();
389 return txtime;
390}
391
5a781ccb
VCG
392static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
393 struct sk_buff **to_free)
394{
395 struct taprio_sched *q = qdisc_priv(sch);
396 struct Qdisc *child;
397 int queue;
398
399 queue = skb_get_queue_mapping(skb);
400
401 child = q->qdiscs[queue];
402 if (unlikely(!child))
403 return qdisc_drop(skb, sch, to_free);
404
4cfd5779
VP
405 if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
406 if (!is_valid_interval(skb, sch))
407 return qdisc_drop(skb, sch, to_free);
408 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
409 skb->tstamp = get_packet_txtime(skb, sch);
410 if (!skb->tstamp)
411 return qdisc_drop(skb, sch, to_free);
412 }
413
5a781ccb
VCG
414 qdisc_qstats_backlog_inc(sch, skb);
415 sch->q.qlen++;
416
417 return qdisc_enqueue(skb, child, to_free);
418}
419
420static struct sk_buff *taprio_peek(struct Qdisc *sch)
421{
422 struct taprio_sched *q = qdisc_priv(sch);
423 struct net_device *dev = qdisc_dev(sch);
424 struct sched_entry *entry;
425 struct sk_buff *skb;
426 u32 gate_mask;
427 int i;
428
429 rcu_read_lock();
430 entry = rcu_dereference(q->current_entry);
2684d1b7 431 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
5a781ccb
VCG
432 rcu_read_unlock();
433
434 if (!gate_mask)
435 return NULL;
436
437 for (i = 0; i < dev->num_tx_queues; i++) {
438 struct Qdisc *child = q->qdiscs[i];
439 int prio;
440 u8 tc;
441
442 if (unlikely(!child))
443 continue;
444
445 skb = child->ops->peek(child);
446 if (!skb)
447 continue;
448
4cfd5779
VP
449 if (TXTIME_ASSIST_IS_ENABLED(q->flags))
450 return skb;
451
5a781ccb
VCG
452 prio = skb->priority;
453 tc = netdev_get_prio_tc_map(dev, prio);
454
455 if (!(gate_mask & BIT(tc)))
2684d1b7 456 continue;
5a781ccb
VCG
457
458 return skb;
459 }
460
461 return NULL;
462}
463
23bddf69
JK
464static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
465{
466 atomic_set(&entry->budget,
467 div64_u64((u64)entry->interval * 1000,
468 atomic64_read(&q->picos_per_byte)));
5a781ccb
VCG
469}
470
471static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
472{
473 struct taprio_sched *q = qdisc_priv(sch);
474 struct net_device *dev = qdisc_dev(sch);
8c79f0ea 475 struct sk_buff *skb = NULL;
5a781ccb 476 struct sched_entry *entry;
5a781ccb
VCG
477 u32 gate_mask;
478 int i;
479
7b9eba7b
LD
480 if (atomic64_read(&q->picos_per_byte) == -1) {
481 WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
482 return NULL;
483 }
484
5a781ccb
VCG
485 rcu_read_lock();
486 entry = rcu_dereference(q->current_entry);
487 /* if there's no entry, it means that the schedule didn't
488 * start yet, so force all gates to be open, this is in
489 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
490 * "AdminGateSates"
491 */
492 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
5a781ccb
VCG
493
494 if (!gate_mask)
8c79f0ea 495 goto done;
5a781ccb
VCG
496
497 for (i = 0; i < dev->num_tx_queues; i++) {
498 struct Qdisc *child = q->qdiscs[i];
499 ktime_t guard;
500 int prio;
501 int len;
502 u8 tc;
503
504 if (unlikely(!child))
505 continue;
506
4cfd5779
VP
507 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
508 skb = child->ops->dequeue(child);
509 if (!skb)
510 continue;
511 goto skb_found;
512 }
513
5a781ccb
VCG
514 skb = child->ops->peek(child);
515 if (!skb)
516 continue;
517
518 prio = skb->priority;
519 tc = netdev_get_prio_tc_map(dev, prio);
520
521 if (!(gate_mask & BIT(tc)))
522 continue;
523
524 len = qdisc_pkt_len(skb);
7ede7b03 525 guard = ktime_add_ns(taprio_get_time(q),
5a781ccb
VCG
526 length_to_duration(q, len));
527
528 /* In the case that there's no gate entry, there's no
529 * guard band ...
530 */
531 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
532 ktime_after(guard, entry->close_time))
6e734c82 533 continue;
5a781ccb
VCG
534
535 /* ... and no budget. */
536 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
537 atomic_sub_return(len, &entry->budget) < 0)
6e734c82 538 continue;
5a781ccb
VCG
539
540 skb = child->ops->dequeue(child);
541 if (unlikely(!skb))
8c79f0ea 542 goto done;
5a781ccb 543
4cfd5779 544skb_found:
5a781ccb
VCG
545 qdisc_bstats_update(sch, skb);
546 qdisc_qstats_backlog_dec(sch, skb);
547 sch->q.qlen--;
548
8c79f0ea 549 goto done;
5a781ccb
VCG
550 }
551
8c79f0ea
VCG
552done:
553 rcu_read_unlock();
554
555 return skb;
5a781ccb
VCG
556}
557
6ca6a665
VCG
558static bool should_restart_cycle(const struct sched_gate_list *oper,
559 const struct sched_entry *entry)
560{
561 if (list_is_last(&entry->list, &oper->entries))
562 return true;
563
564 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
565 return true;
566
567 return false;
568}
569
a3d43c0d
VCG
570static bool should_change_schedules(const struct sched_gate_list *admin,
571 const struct sched_gate_list *oper,
572 ktime_t close_time)
573{
c25031e9 574 ktime_t next_base_time, extension_time;
a3d43c0d
VCG
575
576 if (!admin)
577 return false;
578
579 next_base_time = sched_base_time(admin);
580
581 /* This is the simple case, the close_time would fall after
582 * the next schedule base_time.
583 */
584 if (ktime_compare(next_base_time, close_time) <= 0)
585 return true;
586
c25031e9
VCG
587 /* This is the cycle_time_extension case, if the close_time
588 * plus the amount that can be extended would fall after the
589 * next schedule base_time, we can extend the current schedule
590 * for that amount.
591 */
592 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
593
594 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
595 * how precisely the extension should be made. So after
596 * conformance testing, this logic may change.
597 */
598 if (ktime_compare(next_base_time, extension_time) <= 0)
599 return true;
600
a3d43c0d
VCG
601 return false;
602}
603
5a781ccb
VCG
604static enum hrtimer_restart advance_sched(struct hrtimer *timer)
605{
606 struct taprio_sched *q = container_of(timer, struct taprio_sched,
607 advance_timer);
a3d43c0d 608 struct sched_gate_list *oper, *admin;
5a781ccb
VCG
609 struct sched_entry *entry, *next;
610 struct Qdisc *sch = q->root;
611 ktime_t close_time;
612
613 spin_lock(&q->current_entry_lock);
614 entry = rcu_dereference_protected(q->current_entry,
615 lockdep_is_held(&q->current_entry_lock));
a3d43c0d
VCG
616 oper = rcu_dereference_protected(q->oper_sched,
617 lockdep_is_held(&q->current_entry_lock));
618 admin = rcu_dereference_protected(q->admin_sched,
619 lockdep_is_held(&q->current_entry_lock));
5a781ccb 620
a3d43c0d
VCG
621 if (!oper)
622 switch_schedules(q, &admin, &oper);
623
624 /* This can happen in two cases: 1. this is the very first run
625 * of this function (i.e. we weren't running any schedule
626 * previously); 2. The previous schedule just ended. The first
627 * entry of all schedules are pre-calculated during the
628 * schedule initialization.
5a781ccb 629 */
a3d43c0d
VCG
630 if (unlikely(!entry || entry->close_time == oper->base_time)) {
631 next = list_first_entry(&oper->entries, struct sched_entry,
5a781ccb
VCG
632 list);
633 close_time = next->close_time;
634 goto first_run;
635 }
636
6ca6a665 637 if (should_restart_cycle(oper, entry)) {
a3d43c0d 638 next = list_first_entry(&oper->entries, struct sched_entry,
5a781ccb 639 list);
6ca6a665
VCG
640 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
641 oper->cycle_time);
642 } else {
5a781ccb 643 next = list_next_entry(entry, list);
6ca6a665 644 }
5a781ccb
VCG
645
646 close_time = ktime_add_ns(entry->close_time, next->interval);
6ca6a665 647 close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
5a781ccb 648
a3d43c0d
VCG
649 if (should_change_schedules(admin, oper, close_time)) {
650 /* Set things so the next time this runs, the new
651 * schedule runs.
652 */
653 close_time = sched_base_time(admin);
654 switch_schedules(q, &admin, &oper);
655 }
656
5a781ccb 657 next->close_time = close_time;
23bddf69 658 taprio_set_budget(q, next);
5a781ccb
VCG
659
660first_run:
661 rcu_assign_pointer(q->current_entry, next);
662 spin_unlock(&q->current_entry_lock);
663
664 hrtimer_set_expires(&q->advance_timer, close_time);
665
666 rcu_read_lock();
667 __netif_schedule(sch);
668 rcu_read_unlock();
669
670 return HRTIMER_RESTART;
671}
672
673static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
674 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
675 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
676 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
677 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
678};
679
680static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = {
681 [TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED },
682};
683
684static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
685 [TCA_TAPRIO_ATTR_PRIOMAP] = {
686 .len = sizeof(struct tc_mqprio_qopt)
687 },
c25031e9
VCG
688 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
689 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
690 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
691 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
692 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
693 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
5a781ccb
VCG
694};
695
696static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
697 struct netlink_ext_ack *extack)
698{
699 u32 interval = 0;
700
701 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
702 entry->command = nla_get_u8(
703 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
704
705 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
706 entry->gate_mask = nla_get_u32(
707 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
708
709 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
710 interval = nla_get_u32(
711 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
712
713 if (interval == 0) {
714 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
715 return -EINVAL;
716 }
717
718 entry->interval = interval;
719
720 return 0;
721}
722
723static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
724 int index, struct netlink_ext_ack *extack)
725{
726 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
727 int err;
728
8cb08174
JB
729 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
730 entry_policy, NULL);
5a781ccb
VCG
731 if (err < 0) {
732 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
733 return -EINVAL;
734 }
735
736 entry->index = index;
737
738 return fill_sched_entry(tb, entry, extack);
739}
740
5a781ccb 741static int parse_sched_list(struct nlattr *list,
a3d43c0d 742 struct sched_gate_list *sched,
5a781ccb
VCG
743 struct netlink_ext_ack *extack)
744{
745 struct nlattr *n;
746 int err, rem;
747 int i = 0;
748
749 if (!list)
750 return -EINVAL;
751
752 nla_for_each_nested(n, list, rem) {
753 struct sched_entry *entry;
754
755 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
756 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
757 continue;
758 }
759
760 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
761 if (!entry) {
762 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
763 return -ENOMEM;
764 }
765
766 err = parse_sched_entry(n, entry, i, extack);
767 if (err < 0) {
768 kfree(entry);
769 return err;
770 }
771
a3d43c0d 772 list_add_tail(&entry->list, &sched->entries);
5a781ccb
VCG
773 i++;
774 }
775
a3d43c0d 776 sched->num_entries = i;
5a781ccb
VCG
777
778 return i;
779}
780
a3d43c0d
VCG
781static int parse_taprio_schedule(struct nlattr **tb,
782 struct sched_gate_list *new,
783 struct netlink_ext_ack *extack)
5a781ccb
VCG
784{
785 int err = 0;
5a781ccb 786
a3d43c0d
VCG
787 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
788 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
789 return -ENOTSUPP;
790 }
5a781ccb
VCG
791
792 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
a3d43c0d 793 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
5a781ccb 794
c25031e9
VCG
795 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
796 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
797
6ca6a665
VCG
798 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
799 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
800
5a781ccb
VCG
801 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
802 err = parse_sched_list(
a3d43c0d
VCG
803 tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
804 if (err < 0)
805 return err;
5a781ccb 806
037be037
VP
807 if (!new->cycle_time) {
808 struct sched_entry *entry;
809 ktime_t cycle = 0;
810
811 list_for_each_entry(entry, &new->entries, list)
812 cycle = ktime_add_ns(cycle, entry->interval);
813 new->cycle_time = cycle;
814 }
815
a3d43c0d 816 return 0;
5a781ccb
VCG
817}
818
819static int taprio_parse_mqprio_opt(struct net_device *dev,
820 struct tc_mqprio_qopt *qopt,
4cfd5779
VP
821 struct netlink_ext_ack *extack,
822 u32 taprio_flags)
5a781ccb
VCG
823{
824 int i, j;
825
a3d43c0d 826 if (!qopt && !dev->num_tc) {
5a781ccb
VCG
827 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
828 return -EINVAL;
829 }
830
a3d43c0d
VCG
831 /* If num_tc is already set, it means that the user already
832 * configured the mqprio part
833 */
834 if (dev->num_tc)
835 return 0;
836
5a781ccb
VCG
837 /* Verify num_tc is not out of max range */
838 if (qopt->num_tc > TC_MAX_QUEUE) {
839 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
840 return -EINVAL;
841 }
842
843 /* taprio imposes that traffic classes map 1:n to tx queues */
844 if (qopt->num_tc > dev->num_tx_queues) {
845 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
846 return -EINVAL;
847 }
848
849 /* Verify priority mapping uses valid tcs */
850 for (i = 0; i < TC_BITMASK + 1; i++) {
851 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
852 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
853 return -EINVAL;
854 }
855 }
856
857 for (i = 0; i < qopt->num_tc; i++) {
858 unsigned int last = qopt->offset[i] + qopt->count[i];
859
860 /* Verify the queue count is in tx range being equal to the
861 * real_num_tx_queues indicates the last queue is in use.
862 */
863 if (qopt->offset[i] >= dev->num_tx_queues ||
864 !qopt->count[i] ||
865 last > dev->real_num_tx_queues) {
866 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
867 return -EINVAL;
868 }
869
4cfd5779
VP
870 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
871 continue;
872
5a781ccb
VCG
873 /* Verify that the offset and counts do not overlap */
874 for (j = i + 1; j < qopt->num_tc; j++) {
875 if (last > qopt->offset[j]) {
876 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
877 return -EINVAL;
878 }
879 }
880 }
881
882 return 0;
883}
884
a3d43c0d
VCG
885static int taprio_get_start_time(struct Qdisc *sch,
886 struct sched_gate_list *sched,
887 ktime_t *start)
5a781ccb
VCG
888{
889 struct taprio_sched *q = qdisc_priv(sch);
5a781ccb
VCG
890 ktime_t now, base, cycle;
891 s64 n;
892
a3d43c0d 893 base = sched_base_time(sched);
7ede7b03 894 now = taprio_get_time(q);
8599099f
AG
895
896 if (ktime_after(base, now)) {
897 *start = base;
898 return 0;
899 }
5a781ccb 900
037be037 901 cycle = sched->cycle_time;
5a781ccb 902
8599099f
AG
903 /* The qdisc is expected to have at least one sched_entry. Moreover,
904 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
905 * something went really wrong. In that case, we should warn about this
906 * inconsistent state and return error.
907 */
908 if (WARN_ON(!cycle))
909 return -EFAULT;
5a781ccb
VCG
910
911 /* Schedule the start time for the beginning of the next
912 * cycle.
913 */
914 n = div64_s64(ktime_sub_ns(now, base), cycle);
8599099f
AG
915 *start = ktime_add_ns(base, (n + 1) * cycle);
916 return 0;
5a781ccb
VCG
917}
918
a3d43c0d
VCG
919static void setup_first_close_time(struct taprio_sched *q,
920 struct sched_gate_list *sched, ktime_t base)
5a781ccb 921{
5a781ccb 922 struct sched_entry *first;
6ca6a665 923 ktime_t cycle;
5a781ccb 924
a3d43c0d
VCG
925 first = list_first_entry(&sched->entries,
926 struct sched_entry, list);
5a781ccb 927
037be037 928 cycle = sched->cycle_time;
6ca6a665
VCG
929
930 /* FIXME: find a better place to do this */
931 sched->cycle_close_time = ktime_add_ns(base, cycle);
932
a3d43c0d 933 first->close_time = ktime_add_ns(base, first->interval);
23bddf69 934 taprio_set_budget(q, first);
5a781ccb 935 rcu_assign_pointer(q->current_entry, NULL);
a3d43c0d 936}
5a781ccb 937
a3d43c0d
VCG
938static void taprio_start_sched(struct Qdisc *sch,
939 ktime_t start, struct sched_gate_list *new)
940{
941 struct taprio_sched *q = qdisc_priv(sch);
942 ktime_t expires;
943
944 expires = hrtimer_get_expires(&q->advance_timer);
945 if (expires == 0)
946 expires = KTIME_MAX;
947
948 /* If the new schedule starts before the next expiration, we
949 * reprogram it to the earliest one, so we change the admin
950 * schedule to the operational one at the right time.
951 */
952 start = min_t(ktime_t, start, expires);
5a781ccb
VCG
953
954 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
955}
956
7b9eba7b
LD
957static void taprio_set_picos_per_byte(struct net_device *dev,
958 struct taprio_sched *q)
959{
960 struct ethtool_link_ksettings ecmd;
961 int picos_per_byte = -1;
962
963 if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
964 ecmd.base.speed != SPEED_UNKNOWN)
965 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
966 ecmd.base.speed * 1000 * 1000);
967
968 atomic64_set(&q->picos_per_byte, picos_per_byte);
969 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
970 dev->name, (long long)atomic64_read(&q->picos_per_byte),
971 ecmd.base.speed);
972}
973
974static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
975 void *ptr)
976{
977 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
978 struct net_device *qdev;
979 struct taprio_sched *q;
980 bool found = false;
981
982 ASSERT_RTNL();
983
984 if (event != NETDEV_UP && event != NETDEV_CHANGE)
985 return NOTIFY_DONE;
986
987 spin_lock(&taprio_list_lock);
988 list_for_each_entry(q, &taprio_list, taprio_list) {
989 qdev = qdisc_dev(q->root);
990 if (qdev == dev) {
991 found = true;
992 break;
993 }
994 }
995 spin_unlock(&taprio_list_lock);
996
997 if (found)
998 taprio_set_picos_per_byte(dev, q);
999
1000 return NOTIFY_DONE;
1001}
1002
4cfd5779
VP
1003static void setup_txtime(struct taprio_sched *q,
1004 struct sched_gate_list *sched, ktime_t base)
1005{
1006 struct sched_entry *entry;
1007 u32 interval = 0;
1008
1009 list_for_each_entry(entry, &sched->entries, list) {
1010 entry->next_txtime = ktime_add_ns(base, interval);
1011 interval += entry->interval;
1012 }
1013}
1014
5a781ccb
VCG
1015static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1016 struct netlink_ext_ack *extack)
1017{
1018 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
a3d43c0d 1019 struct sched_gate_list *oper, *admin, *new_admin;
5a781ccb
VCG
1020 struct taprio_sched *q = qdisc_priv(sch);
1021 struct net_device *dev = qdisc_dev(sch);
1022 struct tc_mqprio_qopt *mqprio = NULL;
4cfd5779 1023 u32 taprio_flags = 0;
a3d43c0d
VCG
1024 int i, err, clockid;
1025 unsigned long flags;
5a781ccb
VCG
1026 ktime_t start;
1027
8cb08174
JB
1028 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1029 taprio_policy, extack);
5a781ccb
VCG
1030 if (err < 0)
1031 return err;
1032
5a781ccb
VCG
1033 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1034 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1035
4cfd5779
VP
1036 if (tb[TCA_TAPRIO_ATTR_FLAGS]) {
1037 taprio_flags = nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]);
1038
1039 if (q->flags != 0 && q->flags != taprio_flags) {
1040 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1041 return -EOPNOTSUPP;
1042 } else if (!FLAGS_VALID(taprio_flags)) {
1043 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1044 return -EINVAL;
1045 }
1046
1047 q->flags = taprio_flags;
1048 }
1049
1050 err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags);
5a781ccb
VCG
1051 if (err < 0)
1052 return err;
1053
a3d43c0d
VCG
1054 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1055 if (!new_admin) {
1056 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1057 return -ENOMEM;
1058 }
1059 INIT_LIST_HEAD(&new_admin->entries);
5a781ccb 1060
a3d43c0d
VCG
1061 rcu_read_lock();
1062 oper = rcu_dereference(q->oper_sched);
1063 admin = rcu_dereference(q->admin_sched);
1064 rcu_read_unlock();
5a781ccb 1065
a3d43c0d
VCG
1066 if (mqprio && (oper || admin)) {
1067 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1068 err = -ENOTSUPP;
1069 goto free_sched;
5a781ccb
VCG
1070 }
1071
a3d43c0d
VCG
1072 err = parse_taprio_schedule(tb, new_admin, extack);
1073 if (err < 0)
1074 goto free_sched;
5a781ccb 1075
a3d43c0d
VCG
1076 if (new_admin->num_entries == 0) {
1077 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1078 err = -EINVAL;
1079 goto free_sched;
1080 }
5a781ccb 1081
a3d43c0d
VCG
1082 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1083 clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
5a781ccb 1084
a3d43c0d
VCG
1085 /* We only support static clockids and we don't allow
1086 * for it to be modified after the first init.
1087 */
1088 if (clockid < 0 ||
1089 (q->clockid != -1 && q->clockid != clockid)) {
1090 NL_SET_ERR_MSG(extack, "Changing the 'clockid' of a running schedule is not supported");
1091 err = -ENOTSUPP;
1092 goto free_sched;
1093 }
1094
1095 q->clockid = clockid;
1096 }
1097
1098 if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1099 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1100 err = -EINVAL;
1101 goto free_sched;
1102 }
1103
1104 taprio_set_picos_per_byte(dev, q);
1105
1106 /* Protects against enqueue()/dequeue() */
1107 spin_lock_bh(qdisc_lock(sch));
1108
4cfd5779
VP
1109 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1110 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1111 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1112 err = -EINVAL;
1113 goto unlock;
1114 }
1115
a5b64700 1116 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
4cfd5779
VP
1117 }
1118
1119 if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) &&
1120 !hrtimer_active(&q->advance_timer)) {
a3d43c0d
VCG
1121 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1122 q->advance_timer.function = advance_sched;
5a781ccb
VCG
1123 }
1124
1125 if (mqprio) {
1126 netdev_set_num_tc(dev, mqprio->num_tc);
1127 for (i = 0; i < mqprio->num_tc; i++)
1128 netdev_set_tc_queue(dev, i,
1129 mqprio->count[i],
1130 mqprio->offset[i]);
1131
1132 /* Always use supplied priority mappings */
1133 for (i = 0; i < TC_BITMASK + 1; i++)
1134 netdev_set_prio_tc_map(dev, i,
1135 mqprio->prio_tc_map[i]);
1136 }
1137
a3d43c0d
VCG
1138 switch (q->clockid) {
1139 case CLOCK_REALTIME:
7ede7b03 1140 q->tk_offset = TK_OFFS_REAL;
a3d43c0d
VCG
1141 break;
1142 case CLOCK_MONOTONIC:
7ede7b03 1143 q->tk_offset = TK_OFFS_MAX;
a3d43c0d
VCG
1144 break;
1145 case CLOCK_BOOTTIME:
7ede7b03 1146 q->tk_offset = TK_OFFS_BOOT;
a3d43c0d
VCG
1147 break;
1148 case CLOCK_TAI:
7ede7b03 1149 q->tk_offset = TK_OFFS_TAI;
a3d43c0d
VCG
1150 break;
1151 default:
1152 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1153 err = -EINVAL;
1154 goto unlock;
1155 }
8599099f 1156
a3d43c0d 1157 err = taprio_get_start_time(sch, new_admin, &start);
8599099f
AG
1158 if (err < 0) {
1159 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
a3d43c0d 1160 goto unlock;
8599099f 1161 }
5a781ccb 1162
4cfd5779
VP
1163 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) {
1164 setup_txtime(q, new_admin, start);
5a781ccb 1165
4cfd5779
VP
1166 if (!oper) {
1167 rcu_assign_pointer(q->oper_sched, new_admin);
1168 err = 0;
1169 new_admin = NULL;
1170 goto unlock;
1171 }
a3d43c0d 1172
4cfd5779
VP
1173 rcu_assign_pointer(q->admin_sched, new_admin);
1174 if (admin)
1175 call_rcu(&admin->rcu, taprio_free_sched_cb);
1176 } else {
1177 setup_first_close_time(q, new_admin, start);
a3d43c0d 1178
4cfd5779
VP
1179 /* Protects against advance_sched() */
1180 spin_lock_irqsave(&q->current_entry_lock, flags);
1181
1182 taprio_start_sched(sch, start, new_admin);
a3d43c0d 1183
4cfd5779
VP
1184 rcu_assign_pointer(q->admin_sched, new_admin);
1185 if (admin)
1186 call_rcu(&admin->rcu, taprio_free_sched_cb);
a3d43c0d 1187
4cfd5779
VP
1188 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1189 }
1190
1191 new_admin = NULL;
a3d43c0d
VCG
1192 err = 0;
1193
1194unlock:
1195 spin_unlock_bh(qdisc_lock(sch));
1196
1197free_sched:
51650d33
IK
1198 if (new_admin)
1199 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
a3d43c0d
VCG
1200
1201 return err;
5a781ccb
VCG
1202}
1203
1204static void taprio_destroy(struct Qdisc *sch)
1205{
1206 struct taprio_sched *q = qdisc_priv(sch);
1207 struct net_device *dev = qdisc_dev(sch);
5a781ccb
VCG
1208 unsigned int i;
1209
7b9eba7b
LD
1210 spin_lock(&taprio_list_lock);
1211 list_del(&q->taprio_list);
1212 spin_unlock(&taprio_list_lock);
1213
5a781ccb
VCG
1214 hrtimer_cancel(&q->advance_timer);
1215
1216 if (q->qdiscs) {
1217 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
1218 qdisc_put(q->qdiscs[i]);
1219
1220 kfree(q->qdiscs);
1221 }
1222 q->qdiscs = NULL;
1223
1224 netdev_set_num_tc(dev, 0);
1225
a3d43c0d
VCG
1226 if (q->oper_sched)
1227 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
1228
1229 if (q->admin_sched)
1230 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
5a781ccb
VCG
1231}
1232
1233static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1234 struct netlink_ext_ack *extack)
1235{
1236 struct taprio_sched *q = qdisc_priv(sch);
1237 struct net_device *dev = qdisc_dev(sch);
a3d43c0d 1238 int i;
5a781ccb 1239
5a781ccb
VCG
1240 spin_lock_init(&q->current_entry_lock);
1241
5a781ccb 1242 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
a3d43c0d 1243 q->advance_timer.function = advance_sched;
5a781ccb
VCG
1244
1245 q->root = sch;
1246
1247 /* We only support static clockids. Use an invalid value as default
1248 * and get the valid one on taprio_change().
1249 */
1250 q->clockid = -1;
1251
1252 if (sch->parent != TC_H_ROOT)
1253 return -EOPNOTSUPP;
1254
1255 if (!netif_is_multiqueue(dev))
1256 return -EOPNOTSUPP;
1257
1258 /* pre-allocate qdisc, attachment can't fail */
1259 q->qdiscs = kcalloc(dev->num_tx_queues,
1260 sizeof(q->qdiscs[0]),
1261 GFP_KERNEL);
1262
1263 if (!q->qdiscs)
1264 return -ENOMEM;
1265
1266 if (!opt)
1267 return -EINVAL;
1268
7b9eba7b
LD
1269 spin_lock(&taprio_list_lock);
1270 list_add(&q->taprio_list, &taprio_list);
1271 spin_unlock(&taprio_list_lock);
1272
a3d43c0d
VCG
1273 for (i = 0; i < dev->num_tx_queues; i++) {
1274 struct netdev_queue *dev_queue;
1275 struct Qdisc *qdisc;
1276
1277 dev_queue = netdev_get_tx_queue(dev, i);
1278 qdisc = qdisc_create_dflt(dev_queue,
1279 &pfifo_qdisc_ops,
1280 TC_H_MAKE(TC_H_MAJ(sch->handle),
1281 TC_H_MIN(i + 1)),
1282 extack);
1283 if (!qdisc)
1284 return -ENOMEM;
1285
1286 if (i < dev->real_num_tx_queues)
1287 qdisc_hash_add(qdisc, false);
1288
1289 q->qdiscs[i] = qdisc;
1290 }
1291
5a781ccb
VCG
1292 return taprio_change(sch, opt, extack);
1293}
1294
1295static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1296 unsigned long cl)
1297{
1298 struct net_device *dev = qdisc_dev(sch);
1299 unsigned long ntx = cl - 1;
1300
1301 if (ntx >= dev->num_tx_queues)
1302 return NULL;
1303
1304 return netdev_get_tx_queue(dev, ntx);
1305}
1306
1307static int taprio_graft(struct Qdisc *sch, unsigned long cl,
1308 struct Qdisc *new, struct Qdisc **old,
1309 struct netlink_ext_ack *extack)
1310{
1311 struct taprio_sched *q = qdisc_priv(sch);
1312 struct net_device *dev = qdisc_dev(sch);
1313 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1314
1315 if (!dev_queue)
1316 return -EINVAL;
1317
1318 if (dev->flags & IFF_UP)
1319 dev_deactivate(dev);
1320
1321 *old = q->qdiscs[cl - 1];
1322 q->qdiscs[cl - 1] = new;
1323
1324 if (new)
1325 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1326
1327 if (dev->flags & IFF_UP)
1328 dev_activate(dev);
1329
1330 return 0;
1331}
1332
1333static int dump_entry(struct sk_buff *msg,
1334 const struct sched_entry *entry)
1335{
1336 struct nlattr *item;
1337
ae0be8de 1338 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
5a781ccb
VCG
1339 if (!item)
1340 return -ENOSPC;
1341
1342 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
1343 goto nla_put_failure;
1344
1345 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
1346 goto nla_put_failure;
1347
1348 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
1349 entry->gate_mask))
1350 goto nla_put_failure;
1351
1352 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
1353 entry->interval))
1354 goto nla_put_failure;
1355
1356 return nla_nest_end(msg, item);
1357
1358nla_put_failure:
1359 nla_nest_cancel(msg, item);
1360 return -1;
1361}
1362
a3d43c0d
VCG
1363static int dump_schedule(struct sk_buff *msg,
1364 const struct sched_gate_list *root)
1365{
1366 struct nlattr *entry_list;
1367 struct sched_entry *entry;
1368
1369 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1370 root->base_time, TCA_TAPRIO_PAD))
1371 return -1;
1372
6ca6a665
VCG
1373 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
1374 root->cycle_time, TCA_TAPRIO_PAD))
1375 return -1;
1376
c25031e9
VCG
1377 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1378 root->cycle_time_extension, TCA_TAPRIO_PAD))
1379 return -1;
1380
a3d43c0d
VCG
1381 entry_list = nla_nest_start_noflag(msg,
1382 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1383 if (!entry_list)
1384 goto error_nest;
1385
1386 list_for_each_entry(entry, &root->entries, list) {
1387 if (dump_entry(msg, entry) < 0)
1388 goto error_nest;
1389 }
1390
1391 nla_nest_end(msg, entry_list);
1392 return 0;
1393
1394error_nest:
1395 nla_nest_cancel(msg, entry_list);
1396 return -1;
1397}
1398
5a781ccb
VCG
1399static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1400{
1401 struct taprio_sched *q = qdisc_priv(sch);
1402 struct net_device *dev = qdisc_dev(sch);
a3d43c0d 1403 struct sched_gate_list *oper, *admin;
5a781ccb 1404 struct tc_mqprio_qopt opt = { 0 };
a3d43c0d 1405 struct nlattr *nest, *sched_nest;
5a781ccb
VCG
1406 unsigned int i;
1407
a3d43c0d
VCG
1408 rcu_read_lock();
1409 oper = rcu_dereference(q->oper_sched);
1410 admin = rcu_dereference(q->admin_sched);
1411
5a781ccb
VCG
1412 opt.num_tc = netdev_get_num_tc(dev);
1413 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
1414
1415 for (i = 0; i < netdev_get_num_tc(dev); i++) {
1416 opt.count[i] = dev->tc_to_txq[i].count;
1417 opt.offset[i] = dev->tc_to_txq[i].offset;
1418 }
1419
ae0be8de 1420 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
5a781ccb 1421 if (!nest)
a3d43c0d 1422 goto start_error;
5a781ccb
VCG
1423
1424 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
1425 goto options_error;
1426
5a781ccb
VCG
1427 if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
1428 goto options_error;
1429
4cfd5779
VP
1430 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
1431 goto options_error;
1432
1433 if (q->txtime_delay &&
a5b64700 1434 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
4cfd5779
VP
1435 goto options_error;
1436
a3d43c0d 1437 if (oper && dump_schedule(skb, oper))
5a781ccb
VCG
1438 goto options_error;
1439
a3d43c0d
VCG
1440 if (!admin)
1441 goto done;
1442
1443 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
e4acf427
CIK
1444 if (!sched_nest)
1445 goto options_error;
5a781ccb 1446
a3d43c0d
VCG
1447 if (dump_schedule(skb, admin))
1448 goto admin_error;
1449
1450 nla_nest_end(skb, sched_nest);
1451
1452done:
1453 rcu_read_unlock();
5a781ccb
VCG
1454
1455 return nla_nest_end(skb, nest);
1456
a3d43c0d
VCG
1457admin_error:
1458 nla_nest_cancel(skb, sched_nest);
1459
5a781ccb
VCG
1460options_error:
1461 nla_nest_cancel(skb, nest);
a3d43c0d
VCG
1462
1463start_error:
1464 rcu_read_unlock();
1465 return -ENOSPC;
5a781ccb
VCG
1466}
1467
1468static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
1469{
1470 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1471
1472 if (!dev_queue)
1473 return NULL;
1474
1475 return dev_queue->qdisc_sleeping;
1476}
1477
1478static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
1479{
1480 unsigned int ntx = TC_H_MIN(classid);
1481
1482 if (!taprio_queue_get(sch, ntx))
1483 return 0;
1484 return ntx;
1485}
1486
1487static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
1488 struct sk_buff *skb, struct tcmsg *tcm)
1489{
1490 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1491
1492 tcm->tcm_parent = TC_H_ROOT;
1493 tcm->tcm_handle |= TC_H_MIN(cl);
1494 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
1495
1496 return 0;
1497}
1498
1499static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1500 struct gnet_dump *d)
1501 __releases(d->lock)
1502 __acquires(d->lock)
1503{
1504 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1505
1506 sch = dev_queue->qdisc_sleeping;
1507 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
5dd431b6 1508 qdisc_qstats_copy(d, sch) < 0)
5a781ccb
VCG
1509 return -1;
1510 return 0;
1511}
1512
1513static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1514{
1515 struct net_device *dev = qdisc_dev(sch);
1516 unsigned long ntx;
1517
1518 if (arg->stop)
1519 return;
1520
1521 arg->count = arg->skip;
1522 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
1523 if (arg->fn(sch, ntx + 1, arg) < 0) {
1524 arg->stop = 1;
1525 break;
1526 }
1527 arg->count++;
1528 }
1529}
1530
1531static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
1532 struct tcmsg *tcm)
1533{
1534 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
1535}
1536
1537static const struct Qdisc_class_ops taprio_class_ops = {
1538 .graft = taprio_graft,
1539 .leaf = taprio_leaf,
1540 .find = taprio_find,
1541 .walk = taprio_walk,
1542 .dump = taprio_dump_class,
1543 .dump_stats = taprio_dump_class_stats,
1544 .select_queue = taprio_select_queue,
1545};
1546
1547static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
1548 .cl_ops = &taprio_class_ops,
1549 .id = "taprio",
1550 .priv_size = sizeof(struct taprio_sched),
1551 .init = taprio_init,
a3d43c0d 1552 .change = taprio_change,
5a781ccb
VCG
1553 .destroy = taprio_destroy,
1554 .peek = taprio_peek,
1555 .dequeue = taprio_dequeue,
1556 .enqueue = taprio_enqueue,
1557 .dump = taprio_dump,
1558 .owner = THIS_MODULE,
1559};
1560
7b9eba7b
LD
1561static struct notifier_block taprio_device_notifier = {
1562 .notifier_call = taprio_dev_notifier,
1563};
1564
5a781ccb
VCG
1565static int __init taprio_module_init(void)
1566{
7b9eba7b
LD
1567 int err = register_netdevice_notifier(&taprio_device_notifier);
1568
1569 if (err)
1570 return err;
1571
5a781ccb
VCG
1572 return register_qdisc(&taprio_qdisc_ops);
1573}
1574
1575static void __exit taprio_module_exit(void)
1576{
1577 unregister_qdisc(&taprio_qdisc_ops);
7b9eba7b 1578 unregister_netdevice_notifier(&taprio_device_notifier);
5a781ccb
VCG
1579}
1580
1581module_init(taprio_module_init);
1582module_exit(taprio_module_exit);
1583MODULE_LICENSE("GPL");