1 // SPDX-License-Identifier: GPL-2.0
3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
16 #include <linux/math64.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/rcupdate.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/sch_generic.h>
27 static LIST_HEAD(taprio_list
);
28 static DEFINE_SPINLOCK(taprio_list_lock
);
30 #define TAPRIO_ALL_GATES_OPEN -1
32 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
33 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
36 struct list_head list
;
38 /* The instant that this entry "closes" and the next one
39 * should open, the qdisc will make some effort so that no
40 * packet leaves after this time.
51 struct sched_gate_list
{
53 struct list_head entries
;
55 ktime_t cycle_close_time
;
57 s64 cycle_time_extension
;
62 struct Qdisc
**qdiscs
;
65 enum tk_offsets tk_offset
;
67 atomic64_t picos_per_byte
; /* Using picoseconds because for 10Gbps+
68 * speeds it's sub-nanoseconds per byte
71 /* Protects the update side of the RCU protected current_entry */
72 spinlock_t current_entry_lock
;
73 struct sched_entry __rcu
*current_entry
;
74 struct sched_gate_list __rcu
*oper_sched
;
75 struct sched_gate_list __rcu
*admin_sched
;
76 struct hrtimer advance_timer
;
77 struct list_head taprio_list
;
78 struct sk_buff
*(*dequeue
)(struct Qdisc
*sch
);
79 struct sk_buff
*(*peek
)(struct Qdisc
*sch
);
83 struct __tc_taprio_qopt_offload
{
85 struct tc_taprio_qopt_offload offload
;
88 static ktime_t
sched_base_time(const struct sched_gate_list
*sched
)
93 return ns_to_ktime(sched
->base_time
);
96 static ktime_t
taprio_get_time(struct taprio_sched
*q
)
98 ktime_t mono
= ktime_get();
100 switch (q
->tk_offset
) {
104 return ktime_mono_to_any(mono
, q
->tk_offset
);
110 static void taprio_free_sched_cb(struct rcu_head
*head
)
112 struct sched_gate_list
*sched
= container_of(head
, struct sched_gate_list
, rcu
);
113 struct sched_entry
*entry
, *n
;
118 list_for_each_entry_safe(entry
, n
, &sched
->entries
, list
) {
119 list_del(&entry
->list
);
126 static void switch_schedules(struct taprio_sched
*q
,
127 struct sched_gate_list
**admin
,
128 struct sched_gate_list
**oper
)
130 rcu_assign_pointer(q
->oper_sched
, *admin
);
131 rcu_assign_pointer(q
->admin_sched
, NULL
);
134 call_rcu(&(*oper
)->rcu
, taprio_free_sched_cb
);
140 /* Get how much time has been already elapsed in the current cycle. */
141 static s32
get_cycle_time_elapsed(struct sched_gate_list
*sched
, ktime_t time
)
143 ktime_t time_since_sched_start
;
146 time_since_sched_start
= ktime_sub(time
, sched
->base_time
);
147 div_s64_rem(time_since_sched_start
, sched
->cycle_time
, &time_elapsed
);
152 static ktime_t
get_interval_end_time(struct sched_gate_list
*sched
,
153 struct sched_gate_list
*admin
,
154 struct sched_entry
*entry
,
157 s32 cycle_elapsed
= get_cycle_time_elapsed(sched
, intv_start
);
158 ktime_t intv_end
, cycle_ext_end
, cycle_end
;
160 cycle_end
= ktime_add_ns(intv_start
, sched
->cycle_time
- cycle_elapsed
);
161 intv_end
= ktime_add_ns(intv_start
, entry
->interval
);
162 cycle_ext_end
= ktime_add(cycle_end
, sched
->cycle_time_extension
);
164 if (ktime_before(intv_end
, cycle_end
))
166 else if (admin
&& admin
!= sched
&&
167 ktime_after(admin
->base_time
, cycle_end
) &&
168 ktime_before(admin
->base_time
, cycle_ext_end
))
169 return admin
->base_time
;
174 static int length_to_duration(struct taprio_sched
*q
, int len
)
176 return div_u64(len
* atomic64_read(&q
->picos_per_byte
), 1000);
179 /* Returns the entry corresponding to next available interval. If
180 * validate_interval is set, it only validates whether the timestamp occurs
181 * when the gate corresponding to the skb's traffic class is open.
183 static struct sched_entry
*find_entry_to_transmit(struct sk_buff
*skb
,
185 struct sched_gate_list
*sched
,
186 struct sched_gate_list
*admin
,
188 ktime_t
*interval_start
,
189 ktime_t
*interval_end
,
190 bool validate_interval
)
192 ktime_t curr_intv_start
, curr_intv_end
, cycle_end
, packet_transmit_time
;
193 ktime_t earliest_txtime
= KTIME_MAX
, txtime
, cycle
, transmit_end_time
;
194 struct sched_entry
*entry
= NULL
, *entry_found
= NULL
;
195 struct taprio_sched
*q
= qdisc_priv(sch
);
196 struct net_device
*dev
= qdisc_dev(sch
);
197 bool entry_available
= false;
201 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
202 packet_transmit_time
= length_to_duration(q
, qdisc_pkt_len(skb
));
210 cycle
= sched
->cycle_time
;
211 cycle_elapsed
= get_cycle_time_elapsed(sched
, time
);
212 curr_intv_end
= ktime_sub_ns(time
, cycle_elapsed
);
213 cycle_end
= ktime_add_ns(curr_intv_end
, cycle
);
215 list_for_each_entry(entry
, &sched
->entries
, list
) {
216 curr_intv_start
= curr_intv_end
;
217 curr_intv_end
= get_interval_end_time(sched
, admin
, entry
,
220 if (ktime_after(curr_intv_start
, cycle_end
))
223 if (!(entry
->gate_mask
& BIT(tc
)) ||
224 packet_transmit_time
> entry
->interval
)
227 txtime
= entry
->next_txtime
;
229 if (ktime_before(txtime
, time
) || validate_interval
) {
230 transmit_end_time
= ktime_add_ns(time
, packet_transmit_time
);
231 if ((ktime_before(curr_intv_start
, time
) &&
232 ktime_before(transmit_end_time
, curr_intv_end
)) ||
233 (ktime_after(curr_intv_start
, time
) && !validate_interval
)) {
235 *interval_start
= curr_intv_start
;
236 *interval_end
= curr_intv_end
;
238 } else if (!entry_available
&& !validate_interval
) {
239 /* Here, we are just trying to find out the
240 * first available interval in the next cycle.
244 *interval_start
= ktime_add_ns(curr_intv_start
, cycle
);
245 *interval_end
= ktime_add_ns(curr_intv_end
, cycle
);
247 } else if (ktime_before(txtime
, earliest_txtime
) &&
249 earliest_txtime
= txtime
;
251 n
= div_s64(ktime_sub(txtime
, curr_intv_start
), cycle
);
252 *interval_start
= ktime_add(curr_intv_start
, n
* cycle
);
253 *interval_end
= ktime_add(curr_intv_end
, n
* cycle
);
260 static bool is_valid_interval(struct sk_buff
*skb
, struct Qdisc
*sch
)
262 struct taprio_sched
*q
= qdisc_priv(sch
);
263 struct sched_gate_list
*sched
, *admin
;
264 ktime_t interval_start
, interval_end
;
265 struct sched_entry
*entry
;
268 sched
= rcu_dereference(q
->oper_sched
);
269 admin
= rcu_dereference(q
->admin_sched
);
271 entry
= find_entry_to_transmit(skb
, sch
, sched
, admin
, skb
->tstamp
,
272 &interval_start
, &interval_end
, true);
278 static bool taprio_flags_valid(u32 flags
)
280 /* Make sure no other flag bits are set. */
281 if (flags
& ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST
|
282 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD
))
284 /* txtime-assist and full offload are mutually exclusive */
285 if ((flags
& TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST
) &&
286 (flags
& TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD
))
291 /* This returns the tstamp value set by TCP in terms of the set clock. */
292 static ktime_t
get_tcp_tstamp(struct taprio_sched
*q
, struct sk_buff
*skb
)
294 unsigned int offset
= skb_network_offset(skb
);
295 const struct ipv6hdr
*ipv6h
;
296 const struct iphdr
*iph
;
297 struct ipv6hdr _ipv6h
;
299 ipv6h
= skb_header_pointer(skb
, offset
, sizeof(_ipv6h
), &_ipv6h
);
303 if (ipv6h
->version
== 4) {
304 iph
= (struct iphdr
*)ipv6h
;
305 offset
+= iph
->ihl
* 4;
307 /* special-case 6in4 tunnelling, as that is a common way to get
308 * v6 connectivity in the home
310 if (iph
->protocol
== IPPROTO_IPV6
) {
311 ipv6h
= skb_header_pointer(skb
, offset
,
312 sizeof(_ipv6h
), &_ipv6h
);
314 if (!ipv6h
|| ipv6h
->nexthdr
!= IPPROTO_TCP
)
316 } else if (iph
->protocol
!= IPPROTO_TCP
) {
319 } else if (ipv6h
->version
== 6 && ipv6h
->nexthdr
!= IPPROTO_TCP
) {
323 return ktime_mono_to_any(skb
->skb_mstamp_ns
, q
->tk_offset
);
326 /* There are a few scenarios where we will have to modify the txtime from
327 * what is read from next_txtime in sched_entry. They are:
328 * 1. If txtime is in the past,
329 * a. The gate for the traffic class is currently open and packet can be
330 * transmitted before it closes, schedule the packet right away.
331 * b. If the gate corresponding to the traffic class is going to open later
332 * in the cycle, set the txtime of packet to the interval start.
333 * 2. If txtime is in the future, there are packets corresponding to the
334 * current traffic class waiting to be transmitted. So, the following
335 * possibilities exist:
336 * a. We can transmit the packet before the window containing the txtime
338 * b. The window might close before the transmission can be completed
339 * successfully. So, schedule the packet in the next open window.
341 static long get_packet_txtime(struct sk_buff
*skb
, struct Qdisc
*sch
)
343 ktime_t transmit_end_time
, interval_end
, interval_start
, tcp_tstamp
;
344 struct taprio_sched
*q
= qdisc_priv(sch
);
345 struct sched_gate_list
*sched
, *admin
;
346 ktime_t minimum_time
, now
, txtime
;
347 int len
, packet_transmit_time
;
348 struct sched_entry
*entry
;
351 now
= taprio_get_time(q
);
352 minimum_time
= ktime_add_ns(now
, q
->txtime_delay
);
354 tcp_tstamp
= get_tcp_tstamp(q
, skb
);
355 minimum_time
= max_t(ktime_t
, minimum_time
, tcp_tstamp
);
358 admin
= rcu_dereference(q
->admin_sched
);
359 sched
= rcu_dereference(q
->oper_sched
);
360 if (admin
&& ktime_after(minimum_time
, admin
->base_time
))
361 switch_schedules(q
, &admin
, &sched
);
363 /* Until the schedule starts, all the queues are open */
364 if (!sched
|| ktime_before(minimum_time
, sched
->base_time
)) {
365 txtime
= minimum_time
;
369 len
= qdisc_pkt_len(skb
);
370 packet_transmit_time
= length_to_duration(q
, len
);
375 entry
= find_entry_to_transmit(skb
, sch
, sched
, admin
,
377 &interval_start
, &interval_end
,
384 txtime
= entry
->next_txtime
;
385 txtime
= max_t(ktime_t
, txtime
, minimum_time
);
386 txtime
= max_t(ktime_t
, txtime
, interval_start
);
388 if (admin
&& admin
!= sched
&&
389 ktime_after(txtime
, admin
->base_time
)) {
395 transmit_end_time
= ktime_add(txtime
, packet_transmit_time
);
396 minimum_time
= transmit_end_time
;
398 /* Update the txtime of current entry to the next time it's
401 if (ktime_after(transmit_end_time
, interval_end
))
402 entry
->next_txtime
= ktime_add(interval_start
, sched
->cycle_time
);
403 } while (sched_changed
|| ktime_after(transmit_end_time
, interval_end
));
405 entry
->next_txtime
= transmit_end_time
;
412 static int taprio_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
413 struct sk_buff
**to_free
)
415 struct taprio_sched
*q
= qdisc_priv(sch
);
419 queue
= skb_get_queue_mapping(skb
);
421 child
= q
->qdiscs
[queue
];
422 if (unlikely(!child
))
423 return qdisc_drop(skb
, sch
, to_free
);
425 if (skb
->sk
&& sock_flag(skb
->sk
, SOCK_TXTIME
)) {
426 if (!is_valid_interval(skb
, sch
))
427 return qdisc_drop(skb
, sch
, to_free
);
428 } else if (TXTIME_ASSIST_IS_ENABLED(q
->flags
)) {
429 skb
->tstamp
= get_packet_txtime(skb
, sch
);
431 return qdisc_drop(skb
, sch
, to_free
);
434 qdisc_qstats_backlog_inc(sch
, skb
);
437 return qdisc_enqueue(skb
, child
, to_free
);
440 static struct sk_buff
*taprio_peek_soft(struct Qdisc
*sch
)
442 struct taprio_sched
*q
= qdisc_priv(sch
);
443 struct net_device
*dev
= qdisc_dev(sch
);
444 struct sched_entry
*entry
;
450 entry
= rcu_dereference(q
->current_entry
);
451 gate_mask
= entry
? entry
->gate_mask
: TAPRIO_ALL_GATES_OPEN
;
457 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
458 struct Qdisc
*child
= q
->qdiscs
[i
];
462 if (unlikely(!child
))
465 skb
= child
->ops
->peek(child
);
469 if (TXTIME_ASSIST_IS_ENABLED(q
->flags
))
472 prio
= skb
->priority
;
473 tc
= netdev_get_prio_tc_map(dev
, prio
);
475 if (!(gate_mask
& BIT(tc
)))
484 static struct sk_buff
*taprio_peek_offload(struct Qdisc
*sch
)
486 struct taprio_sched
*q
= qdisc_priv(sch
);
487 struct net_device
*dev
= qdisc_dev(sch
);
491 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
492 struct Qdisc
*child
= q
->qdiscs
[i
];
494 if (unlikely(!child
))
497 skb
= child
->ops
->peek(child
);
507 static struct sk_buff
*taprio_peek(struct Qdisc
*sch
)
509 struct taprio_sched
*q
= qdisc_priv(sch
);
514 static void taprio_set_budget(struct taprio_sched
*q
, struct sched_entry
*entry
)
516 atomic_set(&entry
->budget
,
517 div64_u64((u64
)entry
->interval
* 1000,
518 atomic64_read(&q
->picos_per_byte
)));
521 static struct sk_buff
*taprio_dequeue_soft(struct Qdisc
*sch
)
523 struct taprio_sched
*q
= qdisc_priv(sch
);
524 struct net_device
*dev
= qdisc_dev(sch
);
525 struct sk_buff
*skb
= NULL
;
526 struct sched_entry
*entry
;
531 entry
= rcu_dereference(q
->current_entry
);
532 /* if there's no entry, it means that the schedule didn't
533 * start yet, so force all gates to be open, this is in
534 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
537 gate_mask
= entry
? entry
->gate_mask
: TAPRIO_ALL_GATES_OPEN
;
542 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
543 struct Qdisc
*child
= q
->qdiscs
[i
];
549 if (unlikely(!child
))
552 if (TXTIME_ASSIST_IS_ENABLED(q
->flags
)) {
553 skb
= child
->ops
->dequeue(child
);
559 skb
= child
->ops
->peek(child
);
563 prio
= skb
->priority
;
564 tc
= netdev_get_prio_tc_map(dev
, prio
);
566 if (!(gate_mask
& BIT(tc
)))
569 len
= qdisc_pkt_len(skb
);
570 guard
= ktime_add_ns(taprio_get_time(q
),
571 length_to_duration(q
, len
));
573 /* In the case that there's no gate entry, there's no
576 if (gate_mask
!= TAPRIO_ALL_GATES_OPEN
&&
577 ktime_after(guard
, entry
->close_time
))
580 /* ... and no budget. */
581 if (gate_mask
!= TAPRIO_ALL_GATES_OPEN
&&
582 atomic_sub_return(len
, &entry
->budget
) < 0)
585 skb
= child
->ops
->dequeue(child
);
590 qdisc_bstats_update(sch
, skb
);
591 qdisc_qstats_backlog_dec(sch
, skb
);
603 static struct sk_buff
*taprio_dequeue_offload(struct Qdisc
*sch
)
605 struct taprio_sched
*q
= qdisc_priv(sch
);
606 struct net_device
*dev
= qdisc_dev(sch
);
610 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
611 struct Qdisc
*child
= q
->qdiscs
[i
];
613 if (unlikely(!child
))
616 skb
= child
->ops
->dequeue(child
);
620 qdisc_bstats_update(sch
, skb
);
621 qdisc_qstats_backlog_dec(sch
, skb
);
630 static struct sk_buff
*taprio_dequeue(struct Qdisc
*sch
)
632 struct taprio_sched
*q
= qdisc_priv(sch
);
634 return q
->dequeue(sch
);
637 static bool should_restart_cycle(const struct sched_gate_list
*oper
,
638 const struct sched_entry
*entry
)
640 if (list_is_last(&entry
->list
, &oper
->entries
))
643 if (ktime_compare(entry
->close_time
, oper
->cycle_close_time
) == 0)
649 static bool should_change_schedules(const struct sched_gate_list
*admin
,
650 const struct sched_gate_list
*oper
,
653 ktime_t next_base_time
, extension_time
;
658 next_base_time
= sched_base_time(admin
);
660 /* This is the simple case, the close_time would fall after
661 * the next schedule base_time.
663 if (ktime_compare(next_base_time
, close_time
) <= 0)
666 /* This is the cycle_time_extension case, if the close_time
667 * plus the amount that can be extended would fall after the
668 * next schedule base_time, we can extend the current schedule
671 extension_time
= ktime_add_ns(close_time
, oper
->cycle_time_extension
);
673 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
674 * how precisely the extension should be made. So after
675 * conformance testing, this logic may change.
677 if (ktime_compare(next_base_time
, extension_time
) <= 0)
683 static enum hrtimer_restart
advance_sched(struct hrtimer
*timer
)
685 struct taprio_sched
*q
= container_of(timer
, struct taprio_sched
,
687 struct sched_gate_list
*oper
, *admin
;
688 struct sched_entry
*entry
, *next
;
689 struct Qdisc
*sch
= q
->root
;
692 spin_lock(&q
->current_entry_lock
);
693 entry
= rcu_dereference_protected(q
->current_entry
,
694 lockdep_is_held(&q
->current_entry_lock
));
695 oper
= rcu_dereference_protected(q
->oper_sched
,
696 lockdep_is_held(&q
->current_entry_lock
));
697 admin
= rcu_dereference_protected(q
->admin_sched
,
698 lockdep_is_held(&q
->current_entry_lock
));
701 switch_schedules(q
, &admin
, &oper
);
703 /* This can happen in two cases: 1. this is the very first run
704 * of this function (i.e. we weren't running any schedule
705 * previously); 2. The previous schedule just ended. The first
706 * entry of all schedules are pre-calculated during the
707 * schedule initialization.
709 if (unlikely(!entry
|| entry
->close_time
== oper
->base_time
)) {
710 next
= list_first_entry(&oper
->entries
, struct sched_entry
,
712 close_time
= next
->close_time
;
716 if (should_restart_cycle(oper
, entry
)) {
717 next
= list_first_entry(&oper
->entries
, struct sched_entry
,
719 oper
->cycle_close_time
= ktime_add_ns(oper
->cycle_close_time
,
722 next
= list_next_entry(entry
, list
);
725 close_time
= ktime_add_ns(entry
->close_time
, next
->interval
);
726 close_time
= min_t(ktime_t
, close_time
, oper
->cycle_close_time
);
728 if (should_change_schedules(admin
, oper
, close_time
)) {
729 /* Set things so the next time this runs, the new
732 close_time
= sched_base_time(admin
);
733 switch_schedules(q
, &admin
, &oper
);
736 next
->close_time
= close_time
;
737 taprio_set_budget(q
, next
);
740 rcu_assign_pointer(q
->current_entry
, next
);
741 spin_unlock(&q
->current_entry_lock
);
743 hrtimer_set_expires(&q
->advance_timer
, close_time
);
746 __netif_schedule(sch
);
749 return HRTIMER_RESTART
;
752 static const struct nla_policy entry_policy
[TCA_TAPRIO_SCHED_ENTRY_MAX
+ 1] = {
753 [TCA_TAPRIO_SCHED_ENTRY_INDEX
] = { .type
= NLA_U32
},
754 [TCA_TAPRIO_SCHED_ENTRY_CMD
] = { .type
= NLA_U8
},
755 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
] = { .type
= NLA_U32
},
756 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL
] = { .type
= NLA_U32
},
759 static const struct nla_policy taprio_policy
[TCA_TAPRIO_ATTR_MAX
+ 1] = {
760 [TCA_TAPRIO_ATTR_PRIOMAP
] = {
761 .len
= sizeof(struct tc_mqprio_qopt
)
763 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
] = { .type
= NLA_NESTED
},
764 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME
] = { .type
= NLA_S64
},
765 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY
] = { .type
= NLA_NESTED
},
766 [TCA_TAPRIO_ATTR_SCHED_CLOCKID
] = { .type
= NLA_S32
},
767 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
] = { .type
= NLA_S64
},
768 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
] = { .type
= NLA_S64
},
771 static int fill_sched_entry(struct nlattr
**tb
, struct sched_entry
*entry
,
772 struct netlink_ext_ack
*extack
)
776 if (tb
[TCA_TAPRIO_SCHED_ENTRY_CMD
])
777 entry
->command
= nla_get_u8(
778 tb
[TCA_TAPRIO_SCHED_ENTRY_CMD
]);
780 if (tb
[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
])
781 entry
->gate_mask
= nla_get_u32(
782 tb
[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
]);
784 if (tb
[TCA_TAPRIO_SCHED_ENTRY_INTERVAL
])
785 interval
= nla_get_u32(
786 tb
[TCA_TAPRIO_SCHED_ENTRY_INTERVAL
]);
789 NL_SET_ERR_MSG(extack
, "Invalid interval for schedule entry");
793 entry
->interval
= interval
;
798 static int parse_sched_entry(struct nlattr
*n
, struct sched_entry
*entry
,
799 int index
, struct netlink_ext_ack
*extack
)
801 struct nlattr
*tb
[TCA_TAPRIO_SCHED_ENTRY_MAX
+ 1] = { };
804 err
= nla_parse_nested_deprecated(tb
, TCA_TAPRIO_SCHED_ENTRY_MAX
, n
,
807 NL_SET_ERR_MSG(extack
, "Could not parse nested entry");
811 entry
->index
= index
;
813 return fill_sched_entry(tb
, entry
, extack
);
816 static int parse_sched_list(struct nlattr
*list
,
817 struct sched_gate_list
*sched
,
818 struct netlink_ext_ack
*extack
)
827 nla_for_each_nested(n
, list
, rem
) {
828 struct sched_entry
*entry
;
830 if (nla_type(n
) != TCA_TAPRIO_SCHED_ENTRY
) {
831 NL_SET_ERR_MSG(extack
, "Attribute is not of type 'entry'");
835 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
837 NL_SET_ERR_MSG(extack
, "Not enough memory for entry");
841 err
= parse_sched_entry(n
, entry
, i
, extack
);
847 list_add_tail(&entry
->list
, &sched
->entries
);
851 sched
->num_entries
= i
;
856 static int parse_taprio_schedule(struct nlattr
**tb
,
857 struct sched_gate_list
*new,
858 struct netlink_ext_ack
*extack
)
862 if (tb
[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY
]) {
863 NL_SET_ERR_MSG(extack
, "Adding a single entry is not supported");
867 if (tb
[TCA_TAPRIO_ATTR_SCHED_BASE_TIME
])
868 new->base_time
= nla_get_s64(tb
[TCA_TAPRIO_ATTR_SCHED_BASE_TIME
]);
870 if (tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
])
871 new->cycle_time_extension
= nla_get_s64(tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
]);
873 if (tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
])
874 new->cycle_time
= nla_get_s64(tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
]);
876 if (tb
[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
])
877 err
= parse_sched_list(
878 tb
[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
], new, extack
);
882 if (!new->cycle_time
) {
883 struct sched_entry
*entry
;
886 list_for_each_entry(entry
, &new->entries
, list
)
887 cycle
= ktime_add_ns(cycle
, entry
->interval
);
888 new->cycle_time
= cycle
;
894 static int taprio_parse_mqprio_opt(struct net_device
*dev
,
895 struct tc_mqprio_qopt
*qopt
,
896 struct netlink_ext_ack
*extack
,
901 if (!qopt
&& !dev
->num_tc
) {
902 NL_SET_ERR_MSG(extack
, "'mqprio' configuration is necessary");
906 /* If num_tc is already set, it means that the user already
907 * configured the mqprio part
912 /* Verify num_tc is not out of max range */
913 if (qopt
->num_tc
> TC_MAX_QUEUE
) {
914 NL_SET_ERR_MSG(extack
, "Number of traffic classes is outside valid range");
918 /* taprio imposes that traffic classes map 1:n to tx queues */
919 if (qopt
->num_tc
> dev
->num_tx_queues
) {
920 NL_SET_ERR_MSG(extack
, "Number of traffic classes is greater than number of HW queues");
924 /* Verify priority mapping uses valid tcs */
925 for (i
= 0; i
< TC_BITMASK
+ 1; i
++) {
926 if (qopt
->prio_tc_map
[i
] >= qopt
->num_tc
) {
927 NL_SET_ERR_MSG(extack
, "Invalid traffic class in priority to traffic class mapping");
932 for (i
= 0; i
< qopt
->num_tc
; i
++) {
933 unsigned int last
= qopt
->offset
[i
] + qopt
->count
[i
];
935 /* Verify the queue count is in tx range being equal to the
936 * real_num_tx_queues indicates the last queue is in use.
938 if (qopt
->offset
[i
] >= dev
->num_tx_queues
||
940 last
> dev
->real_num_tx_queues
) {
941 NL_SET_ERR_MSG(extack
, "Invalid queue in traffic class to queue mapping");
945 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags
))
948 /* Verify that the offset and counts do not overlap */
949 for (j
= i
+ 1; j
< qopt
->num_tc
; j
++) {
950 if (last
> qopt
->offset
[j
]) {
951 NL_SET_ERR_MSG(extack
, "Detected overlap in the traffic class to queue mapping");
960 static int taprio_get_start_time(struct Qdisc
*sch
,
961 struct sched_gate_list
*sched
,
964 struct taprio_sched
*q
= qdisc_priv(sch
);
965 ktime_t now
, base
, cycle
;
968 base
= sched_base_time(sched
);
969 now
= taprio_get_time(q
);
971 if (ktime_after(base
, now
)) {
976 cycle
= sched
->cycle_time
;
978 /* The qdisc is expected to have at least one sched_entry. Moreover,
979 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
980 * something went really wrong. In that case, we should warn about this
981 * inconsistent state and return error.
986 /* Schedule the start time for the beginning of the next
989 n
= div64_s64(ktime_sub_ns(now
, base
), cycle
);
990 *start
= ktime_add_ns(base
, (n
+ 1) * cycle
);
994 static void setup_first_close_time(struct taprio_sched
*q
,
995 struct sched_gate_list
*sched
, ktime_t base
)
997 struct sched_entry
*first
;
1000 first
= list_first_entry(&sched
->entries
,
1001 struct sched_entry
, list
);
1003 cycle
= sched
->cycle_time
;
1005 /* FIXME: find a better place to do this */
1006 sched
->cycle_close_time
= ktime_add_ns(base
, cycle
);
1008 first
->close_time
= ktime_add_ns(base
, first
->interval
);
1009 taprio_set_budget(q
, first
);
1010 rcu_assign_pointer(q
->current_entry
, NULL
);
1013 static void taprio_start_sched(struct Qdisc
*sch
,
1014 ktime_t start
, struct sched_gate_list
*new)
1016 struct taprio_sched
*q
= qdisc_priv(sch
);
1019 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
))
1022 expires
= hrtimer_get_expires(&q
->advance_timer
);
1024 expires
= KTIME_MAX
;
1026 /* If the new schedule starts before the next expiration, we
1027 * reprogram it to the earliest one, so we change the admin
1028 * schedule to the operational one at the right time.
1030 start
= min_t(ktime_t
, start
, expires
);
1032 hrtimer_start(&q
->advance_timer
, start
, HRTIMER_MODE_ABS
);
1035 static void taprio_set_picos_per_byte(struct net_device
*dev
,
1036 struct taprio_sched
*q
)
1038 struct ethtool_link_ksettings ecmd
;
1039 int speed
= SPEED_10
;
1043 err
= __ethtool_get_link_ksettings(dev
, &ecmd
);
1047 if (ecmd
.base
.speed
!= SPEED_UNKNOWN
)
1048 speed
= ecmd
.base
.speed
;
1051 picos_per_byte
= div64_s64(NSEC_PER_SEC
* 1000LL * 8,
1052 speed
* 1000 * 1000);
1054 atomic64_set(&q
->picos_per_byte
, picos_per_byte
);
1055 netdev_dbg(dev
, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1056 dev
->name
, (long long)atomic64_read(&q
->picos_per_byte
),
1060 static int taprio_dev_notifier(struct notifier_block
*nb
, unsigned long event
,
1063 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1064 struct net_device
*qdev
;
1065 struct taprio_sched
*q
;
1070 if (event
!= NETDEV_UP
&& event
!= NETDEV_CHANGE
)
1073 spin_lock(&taprio_list_lock
);
1074 list_for_each_entry(q
, &taprio_list
, taprio_list
) {
1075 qdev
= qdisc_dev(q
->root
);
1081 spin_unlock(&taprio_list_lock
);
1084 taprio_set_picos_per_byte(dev
, q
);
1089 static void setup_txtime(struct taprio_sched
*q
,
1090 struct sched_gate_list
*sched
, ktime_t base
)
1092 struct sched_entry
*entry
;
1095 list_for_each_entry(entry
, &sched
->entries
, list
) {
1096 entry
->next_txtime
= ktime_add_ns(base
, interval
);
1097 interval
+= entry
->interval
;
1101 static struct tc_taprio_qopt_offload
*taprio_offload_alloc(int num_entries
)
1103 size_t size
= sizeof(struct tc_taprio_sched_entry
) * num_entries
+
1104 sizeof(struct __tc_taprio_qopt_offload
);
1105 struct __tc_taprio_qopt_offload
*__offload
;
1107 __offload
= kzalloc(size
, GFP_KERNEL
);
1111 refcount_set(&__offload
->users
, 1);
1113 return &__offload
->offload
;
1116 struct tc_taprio_qopt_offload
*taprio_offload_get(struct tc_taprio_qopt_offload
1119 struct __tc_taprio_qopt_offload
*__offload
;
1121 __offload
= container_of(offload
, struct __tc_taprio_qopt_offload
,
1124 refcount_inc(&__offload
->users
);
1128 EXPORT_SYMBOL_GPL(taprio_offload_get
);
1130 void taprio_offload_free(struct tc_taprio_qopt_offload
*offload
)
1132 struct __tc_taprio_qopt_offload
*__offload
;
1134 __offload
= container_of(offload
, struct __tc_taprio_qopt_offload
,
1137 if (!refcount_dec_and_test(&__offload
->users
))
1142 EXPORT_SYMBOL_GPL(taprio_offload_free
);
1144 /* The function will only serve to keep the pointers to the "oper" and "admin"
1145 * schedules valid in relation to their base times, so when calling dump() the
1146 * users looks at the right schedules.
1147 * When using full offload, the admin configuration is promoted to oper at the
1148 * base_time in the PHC time domain. But because the system time is not
1149 * necessarily in sync with that, we can't just trigger a hrtimer to call
1150 * switch_schedules at the right hardware time.
1151 * At the moment we call this by hand right away from taprio, but in the future
1152 * it will be useful to create a mechanism for drivers to notify taprio of the
1153 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1154 * This is left as TODO.
1156 void taprio_offload_config_changed(struct taprio_sched
*q
)
1158 struct sched_gate_list
*oper
, *admin
;
1160 spin_lock(&q
->current_entry_lock
);
1162 oper
= rcu_dereference_protected(q
->oper_sched
,
1163 lockdep_is_held(&q
->current_entry_lock
));
1164 admin
= rcu_dereference_protected(q
->admin_sched
,
1165 lockdep_is_held(&q
->current_entry_lock
));
1167 switch_schedules(q
, &admin
, &oper
);
1169 spin_unlock(&q
->current_entry_lock
);
1172 static void taprio_sched_to_offload(struct taprio_sched
*q
,
1173 struct sched_gate_list
*sched
,
1174 const struct tc_mqprio_qopt
*mqprio
,
1175 struct tc_taprio_qopt_offload
*offload
)
1177 struct sched_entry
*entry
;
1180 offload
->base_time
= sched
->base_time
;
1181 offload
->cycle_time
= sched
->cycle_time
;
1182 offload
->cycle_time_extension
= sched
->cycle_time_extension
;
1184 list_for_each_entry(entry
, &sched
->entries
, list
) {
1185 struct tc_taprio_sched_entry
*e
= &offload
->entries
[i
];
1187 e
->command
= entry
->command
;
1188 e
->interval
= entry
->interval
;
1189 e
->gate_mask
= entry
->gate_mask
;
1193 offload
->num_entries
= i
;
1196 static int taprio_enable_offload(struct net_device
*dev
,
1197 struct tc_mqprio_qopt
*mqprio
,
1198 struct taprio_sched
*q
,
1199 struct sched_gate_list
*sched
,
1200 struct netlink_ext_ack
*extack
)
1202 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1203 struct tc_taprio_qopt_offload
*offload
;
1206 if (!ops
->ndo_setup_tc
) {
1207 NL_SET_ERR_MSG(extack
,
1208 "Device does not support taprio offload");
1212 offload
= taprio_offload_alloc(sched
->num_entries
);
1214 NL_SET_ERR_MSG(extack
,
1215 "Not enough memory for enabling offload mode");
1218 offload
->enable
= 1;
1219 taprio_sched_to_offload(q
, sched
, mqprio
, offload
);
1221 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_TAPRIO
, offload
);
1223 NL_SET_ERR_MSG(extack
,
1224 "Device failed to setup taprio offload");
1228 taprio_offload_config_changed(q
);
1231 taprio_offload_free(offload
);
1236 static int taprio_disable_offload(struct net_device
*dev
,
1237 struct taprio_sched
*q
,
1238 struct netlink_ext_ack
*extack
)
1240 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1241 struct tc_taprio_qopt_offload
*offload
;
1244 if (!FULL_OFFLOAD_IS_ENABLED(q
->flags
))
1247 if (!ops
->ndo_setup_tc
)
1250 offload
= taprio_offload_alloc(0);
1252 NL_SET_ERR_MSG(extack
,
1253 "Not enough memory to disable offload mode");
1256 offload
->enable
= 0;
1258 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_TAPRIO
, offload
);
1260 NL_SET_ERR_MSG(extack
,
1261 "Device failed to disable offload");
1266 taprio_offload_free(offload
);
1271 /* If full offload is enabled, the only possible clockid is the net device's
1272 * PHC. For that reason, specifying a clockid through netlink is incorrect.
1273 * For txtime-assist, it is implicitly assumed that the device's PHC is kept
1274 * in sync with the specified clockid via a user space daemon such as phc2sys.
1275 * For both software taprio and txtime-assist, the clockid is used for the
1276 * hrtimer that advances the schedule and hence mandatory.
1278 static int taprio_parse_clockid(struct Qdisc
*sch
, struct nlattr
**tb
,
1279 struct netlink_ext_ack
*extack
)
1281 struct taprio_sched
*q
= qdisc_priv(sch
);
1282 struct net_device
*dev
= qdisc_dev(sch
);
1285 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
)) {
1286 const struct ethtool_ops
*ops
= dev
->ethtool_ops
;
1287 struct ethtool_ts_info info
= {
1288 .cmd
= ETHTOOL_GET_TS_INFO
,
1292 if (tb
[TCA_TAPRIO_ATTR_SCHED_CLOCKID
]) {
1293 NL_SET_ERR_MSG(extack
,
1294 "The 'clockid' cannot be specified for full offload");
1298 if (ops
&& ops
->get_ts_info
)
1299 err
= ops
->get_ts_info(dev
, &info
);
1301 if (err
|| info
.phc_index
< 0) {
1302 NL_SET_ERR_MSG(extack
,
1303 "Device does not have a PTP clock");
1307 } else if (tb
[TCA_TAPRIO_ATTR_SCHED_CLOCKID
]) {
1308 int clockid
= nla_get_s32(tb
[TCA_TAPRIO_ATTR_SCHED_CLOCKID
]);
1310 /* We only support static clockids and we don't allow
1311 * for it to be modified after the first init.
1314 (q
->clockid
!= -1 && q
->clockid
!= clockid
)) {
1315 NL_SET_ERR_MSG(extack
,
1316 "Changing the 'clockid' of a running schedule is not supported");
1322 case CLOCK_REALTIME
:
1323 q
->tk_offset
= TK_OFFS_REAL
;
1325 case CLOCK_MONOTONIC
:
1326 q
->tk_offset
= TK_OFFS_MAX
;
1328 case CLOCK_BOOTTIME
:
1329 q
->tk_offset
= TK_OFFS_BOOT
;
1332 q
->tk_offset
= TK_OFFS_TAI
;
1335 NL_SET_ERR_MSG(extack
, "Invalid 'clockid'");
1340 q
->clockid
= clockid
;
1342 NL_SET_ERR_MSG(extack
, "Specifying a 'clockid' is mandatory");
1349 static int taprio_change(struct Qdisc
*sch
, struct nlattr
*opt
,
1350 struct netlink_ext_ack
*extack
)
1352 struct nlattr
*tb
[TCA_TAPRIO_ATTR_MAX
+ 1] = { };
1353 struct sched_gate_list
*oper
, *admin
, *new_admin
;
1354 struct taprio_sched
*q
= qdisc_priv(sch
);
1355 struct net_device
*dev
= qdisc_dev(sch
);
1356 struct tc_mqprio_qopt
*mqprio
= NULL
;
1357 u32 taprio_flags
= 0;
1358 unsigned long flags
;
1362 err
= nla_parse_nested_deprecated(tb
, TCA_TAPRIO_ATTR_MAX
, opt
,
1363 taprio_policy
, extack
);
1367 if (tb
[TCA_TAPRIO_ATTR_PRIOMAP
])
1368 mqprio
= nla_data(tb
[TCA_TAPRIO_ATTR_PRIOMAP
]);
1370 if (tb
[TCA_TAPRIO_ATTR_FLAGS
]) {
1371 taprio_flags
= nla_get_u32(tb
[TCA_TAPRIO_ATTR_FLAGS
]);
1373 if (q
->flags
!= 0 && q
->flags
!= taprio_flags
) {
1374 NL_SET_ERR_MSG_MOD(extack
, "Changing 'flags' of a running schedule is not supported");
1376 } else if (!taprio_flags_valid(taprio_flags
)) {
1377 NL_SET_ERR_MSG_MOD(extack
, "Specified 'flags' are not valid");
1381 q
->flags
= taprio_flags
;
1384 err
= taprio_parse_mqprio_opt(dev
, mqprio
, extack
, taprio_flags
);
1388 new_admin
= kzalloc(sizeof(*new_admin
), GFP_KERNEL
);
1390 NL_SET_ERR_MSG(extack
, "Not enough memory for a new schedule");
1393 INIT_LIST_HEAD(&new_admin
->entries
);
1396 oper
= rcu_dereference(q
->oper_sched
);
1397 admin
= rcu_dereference(q
->admin_sched
);
1400 if (mqprio
&& (oper
|| admin
)) {
1401 NL_SET_ERR_MSG(extack
, "Changing the traffic mapping of a running schedule is not supported");
1406 err
= parse_taprio_schedule(tb
, new_admin
, extack
);
1410 if (new_admin
->num_entries
== 0) {
1411 NL_SET_ERR_MSG(extack
, "There should be at least one entry in the schedule");
1416 err
= taprio_parse_clockid(sch
, tb
, extack
);
1420 taprio_set_picos_per_byte(dev
, q
);
1422 if (FULL_OFFLOAD_IS_ENABLED(taprio_flags
))
1423 err
= taprio_enable_offload(dev
, mqprio
, q
, new_admin
, extack
);
1425 err
= taprio_disable_offload(dev
, q
, extack
);
1429 /* Protects against enqueue()/dequeue() */
1430 spin_lock_bh(qdisc_lock(sch
));
1432 if (tb
[TCA_TAPRIO_ATTR_TXTIME_DELAY
]) {
1433 if (!TXTIME_ASSIST_IS_ENABLED(q
->flags
)) {
1434 NL_SET_ERR_MSG_MOD(extack
, "txtime-delay can only be set when txtime-assist mode is enabled");
1439 q
->txtime_delay
= nla_get_u32(tb
[TCA_TAPRIO_ATTR_TXTIME_DELAY
]);
1442 if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags
) &&
1443 !FULL_OFFLOAD_IS_ENABLED(taprio_flags
) &&
1444 !hrtimer_active(&q
->advance_timer
)) {
1445 hrtimer_init(&q
->advance_timer
, q
->clockid
, HRTIMER_MODE_ABS
);
1446 q
->advance_timer
.function
= advance_sched
;
1450 netdev_set_num_tc(dev
, mqprio
->num_tc
);
1451 for (i
= 0; i
< mqprio
->num_tc
; i
++)
1452 netdev_set_tc_queue(dev
, i
,
1456 /* Always use supplied priority mappings */
1457 for (i
= 0; i
< TC_BITMASK
+ 1; i
++)
1458 netdev_set_prio_tc_map(dev
, i
,
1459 mqprio
->prio_tc_map
[i
]);
1462 if (FULL_OFFLOAD_IS_ENABLED(taprio_flags
)) {
1463 q
->dequeue
= taprio_dequeue_offload
;
1464 q
->peek
= taprio_peek_offload
;
1466 /* Be sure to always keep the function pointers
1467 * in a consistent state.
1469 q
->dequeue
= taprio_dequeue_soft
;
1470 q
->peek
= taprio_peek_soft
;
1473 err
= taprio_get_start_time(sch
, new_admin
, &start
);
1475 NL_SET_ERR_MSG(extack
, "Internal error: failed get start time");
1479 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags
)) {
1480 setup_txtime(q
, new_admin
, start
);
1483 rcu_assign_pointer(q
->oper_sched
, new_admin
);
1489 rcu_assign_pointer(q
->admin_sched
, new_admin
);
1491 call_rcu(&admin
->rcu
, taprio_free_sched_cb
);
1493 setup_first_close_time(q
, new_admin
, start
);
1495 /* Protects against advance_sched() */
1496 spin_lock_irqsave(&q
->current_entry_lock
, flags
);
1498 taprio_start_sched(sch
, start
, new_admin
);
1500 rcu_assign_pointer(q
->admin_sched
, new_admin
);
1502 call_rcu(&admin
->rcu
, taprio_free_sched_cb
);
1504 spin_unlock_irqrestore(&q
->current_entry_lock
, flags
);
1511 spin_unlock_bh(qdisc_lock(sch
));
1515 call_rcu(&new_admin
->rcu
, taprio_free_sched_cb
);
1520 static void taprio_destroy(struct Qdisc
*sch
)
1522 struct taprio_sched
*q
= qdisc_priv(sch
);
1523 struct net_device
*dev
= qdisc_dev(sch
);
1526 spin_lock(&taprio_list_lock
);
1527 list_del(&q
->taprio_list
);
1528 spin_unlock(&taprio_list_lock
);
1530 hrtimer_cancel(&q
->advance_timer
);
1532 taprio_disable_offload(dev
, q
, NULL
);
1535 for (i
= 0; i
< dev
->num_tx_queues
&& q
->qdiscs
[i
]; i
++)
1536 qdisc_put(q
->qdiscs
[i
]);
1542 netdev_set_num_tc(dev
, 0);
1545 call_rcu(&q
->oper_sched
->rcu
, taprio_free_sched_cb
);
1548 call_rcu(&q
->admin_sched
->rcu
, taprio_free_sched_cb
);
1551 static int taprio_init(struct Qdisc
*sch
, struct nlattr
*opt
,
1552 struct netlink_ext_ack
*extack
)
1554 struct taprio_sched
*q
= qdisc_priv(sch
);
1555 struct net_device
*dev
= qdisc_dev(sch
);
1558 spin_lock_init(&q
->current_entry_lock
);
1560 hrtimer_init(&q
->advance_timer
, CLOCK_TAI
, HRTIMER_MODE_ABS
);
1561 q
->advance_timer
.function
= advance_sched
;
1563 q
->dequeue
= taprio_dequeue_soft
;
1564 q
->peek
= taprio_peek_soft
;
1568 /* We only support static clockids. Use an invalid value as default
1569 * and get the valid one on taprio_change().
1573 spin_lock(&taprio_list_lock
);
1574 list_add(&q
->taprio_list
, &taprio_list
);
1575 spin_unlock(&taprio_list_lock
);
1577 if (sch
->parent
!= TC_H_ROOT
)
1580 if (!netif_is_multiqueue(dev
))
1583 /* pre-allocate qdisc, attachment can't fail */
1584 q
->qdiscs
= kcalloc(dev
->num_tx_queues
,
1585 sizeof(q
->qdiscs
[0]),
1594 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1595 struct netdev_queue
*dev_queue
;
1596 struct Qdisc
*qdisc
;
1598 dev_queue
= netdev_get_tx_queue(dev
, i
);
1599 qdisc
= qdisc_create_dflt(dev_queue
,
1601 TC_H_MAKE(TC_H_MAJ(sch
->handle
),
1607 if (i
< dev
->real_num_tx_queues
)
1608 qdisc_hash_add(qdisc
, false);
1610 q
->qdiscs
[i
] = qdisc
;
1613 return taprio_change(sch
, opt
, extack
);
1616 static struct netdev_queue
*taprio_queue_get(struct Qdisc
*sch
,
1619 struct net_device
*dev
= qdisc_dev(sch
);
1620 unsigned long ntx
= cl
- 1;
1622 if (ntx
>= dev
->num_tx_queues
)
1625 return netdev_get_tx_queue(dev
, ntx
);
1628 static int taprio_graft(struct Qdisc
*sch
, unsigned long cl
,
1629 struct Qdisc
*new, struct Qdisc
**old
,
1630 struct netlink_ext_ack
*extack
)
1632 struct taprio_sched
*q
= qdisc_priv(sch
);
1633 struct net_device
*dev
= qdisc_dev(sch
);
1634 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1639 if (dev
->flags
& IFF_UP
)
1640 dev_deactivate(dev
);
1642 *old
= q
->qdiscs
[cl
- 1];
1643 q
->qdiscs
[cl
- 1] = new;
1646 new->flags
|= TCQ_F_ONETXQUEUE
| TCQ_F_NOPARENT
;
1648 if (dev
->flags
& IFF_UP
)
1654 static int dump_entry(struct sk_buff
*msg
,
1655 const struct sched_entry
*entry
)
1657 struct nlattr
*item
;
1659 item
= nla_nest_start_noflag(msg
, TCA_TAPRIO_SCHED_ENTRY
);
1663 if (nla_put_u32(msg
, TCA_TAPRIO_SCHED_ENTRY_INDEX
, entry
->index
))
1664 goto nla_put_failure
;
1666 if (nla_put_u8(msg
, TCA_TAPRIO_SCHED_ENTRY_CMD
, entry
->command
))
1667 goto nla_put_failure
;
1669 if (nla_put_u32(msg
, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
,
1671 goto nla_put_failure
;
1673 if (nla_put_u32(msg
, TCA_TAPRIO_SCHED_ENTRY_INTERVAL
,
1675 goto nla_put_failure
;
1677 return nla_nest_end(msg
, item
);
1680 nla_nest_cancel(msg
, item
);
1684 static int dump_schedule(struct sk_buff
*msg
,
1685 const struct sched_gate_list
*root
)
1687 struct nlattr
*entry_list
;
1688 struct sched_entry
*entry
;
1690 if (nla_put_s64(msg
, TCA_TAPRIO_ATTR_SCHED_BASE_TIME
,
1691 root
->base_time
, TCA_TAPRIO_PAD
))
1694 if (nla_put_s64(msg
, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
,
1695 root
->cycle_time
, TCA_TAPRIO_PAD
))
1698 if (nla_put_s64(msg
, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
,
1699 root
->cycle_time_extension
, TCA_TAPRIO_PAD
))
1702 entry_list
= nla_nest_start_noflag(msg
,
1703 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
);
1707 list_for_each_entry(entry
, &root
->entries
, list
) {
1708 if (dump_entry(msg
, entry
) < 0)
1712 nla_nest_end(msg
, entry_list
);
1716 nla_nest_cancel(msg
, entry_list
);
1720 static int taprio_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
1722 struct taprio_sched
*q
= qdisc_priv(sch
);
1723 struct net_device
*dev
= qdisc_dev(sch
);
1724 struct sched_gate_list
*oper
, *admin
;
1725 struct tc_mqprio_qopt opt
= { 0 };
1726 struct nlattr
*nest
, *sched_nest
;
1730 oper
= rcu_dereference(q
->oper_sched
);
1731 admin
= rcu_dereference(q
->admin_sched
);
1733 opt
.num_tc
= netdev_get_num_tc(dev
);
1734 memcpy(opt
.prio_tc_map
, dev
->prio_tc_map
, sizeof(opt
.prio_tc_map
));
1736 for (i
= 0; i
< netdev_get_num_tc(dev
); i
++) {
1737 opt
.count
[i
] = dev
->tc_to_txq
[i
].count
;
1738 opt
.offset
[i
] = dev
->tc_to_txq
[i
].offset
;
1741 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
1745 if (nla_put(skb
, TCA_TAPRIO_ATTR_PRIOMAP
, sizeof(opt
), &opt
))
1748 if (!FULL_OFFLOAD_IS_ENABLED(q
->flags
) &&
1749 nla_put_s32(skb
, TCA_TAPRIO_ATTR_SCHED_CLOCKID
, q
->clockid
))
1752 if (q
->flags
&& nla_put_u32(skb
, TCA_TAPRIO_ATTR_FLAGS
, q
->flags
))
1755 if (q
->txtime_delay
&&
1756 nla_put_u32(skb
, TCA_TAPRIO_ATTR_TXTIME_DELAY
, q
->txtime_delay
))
1759 if (oper
&& dump_schedule(skb
, oper
))
1765 sched_nest
= nla_nest_start_noflag(skb
, TCA_TAPRIO_ATTR_ADMIN_SCHED
);
1769 if (dump_schedule(skb
, admin
))
1772 nla_nest_end(skb
, sched_nest
);
1777 return nla_nest_end(skb
, nest
);
1780 nla_nest_cancel(skb
, sched_nest
);
1783 nla_nest_cancel(skb
, nest
);
1790 static struct Qdisc
*taprio_leaf(struct Qdisc
*sch
, unsigned long cl
)
1792 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1797 return dev_queue
->qdisc_sleeping
;
1800 static unsigned long taprio_find(struct Qdisc
*sch
, u32 classid
)
1802 unsigned int ntx
= TC_H_MIN(classid
);
1804 if (!taprio_queue_get(sch
, ntx
))
1809 static int taprio_dump_class(struct Qdisc
*sch
, unsigned long cl
,
1810 struct sk_buff
*skb
, struct tcmsg
*tcm
)
1812 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1814 tcm
->tcm_parent
= TC_H_ROOT
;
1815 tcm
->tcm_handle
|= TC_H_MIN(cl
);
1816 tcm
->tcm_info
= dev_queue
->qdisc_sleeping
->handle
;
1821 static int taprio_dump_class_stats(struct Qdisc
*sch
, unsigned long cl
,
1822 struct gnet_dump
*d
)
1826 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1828 sch
= dev_queue
->qdisc_sleeping
;
1829 if (gnet_stats_copy_basic(&sch
->running
, d
, NULL
, &sch
->bstats
) < 0 ||
1830 qdisc_qstats_copy(d
, sch
) < 0)
1835 static void taprio_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
1837 struct net_device
*dev
= qdisc_dev(sch
);
1843 arg
->count
= arg
->skip
;
1844 for (ntx
= arg
->skip
; ntx
< dev
->num_tx_queues
; ntx
++) {
1845 if (arg
->fn(sch
, ntx
+ 1, arg
) < 0) {
1853 static struct netdev_queue
*taprio_select_queue(struct Qdisc
*sch
,
1856 return taprio_queue_get(sch
, TC_H_MIN(tcm
->tcm_parent
));
1859 static const struct Qdisc_class_ops taprio_class_ops
= {
1860 .graft
= taprio_graft
,
1861 .leaf
= taprio_leaf
,
1862 .find
= taprio_find
,
1863 .walk
= taprio_walk
,
1864 .dump
= taprio_dump_class
,
1865 .dump_stats
= taprio_dump_class_stats
,
1866 .select_queue
= taprio_select_queue
,
1869 static struct Qdisc_ops taprio_qdisc_ops __read_mostly
= {
1870 .cl_ops
= &taprio_class_ops
,
1872 .priv_size
= sizeof(struct taprio_sched
),
1873 .init
= taprio_init
,
1874 .change
= taprio_change
,
1875 .destroy
= taprio_destroy
,
1876 .peek
= taprio_peek
,
1877 .dequeue
= taprio_dequeue
,
1878 .enqueue
= taprio_enqueue
,
1879 .dump
= taprio_dump
,
1880 .owner
= THIS_MODULE
,
1883 static struct notifier_block taprio_device_notifier
= {
1884 .notifier_call
= taprio_dev_notifier
,
1887 static int __init
taprio_module_init(void)
1889 int err
= register_netdevice_notifier(&taprio_device_notifier
);
1894 return register_qdisc(&taprio_qdisc_ops
);
1897 static void __exit
taprio_module_exit(void)
1899 unregister_qdisc(&taprio_qdisc_ops
);
1900 unregister_netdevice_notifier(&taprio_device_notifier
);
1903 module_init(taprio_module_init
);
1904 module_exit(taprio_module_exit
);
1905 MODULE_LICENSE("GPL");