]> git.proxmox.com Git - ceph.git/blame - ceph/src/dpdk/lib/librte_sched/rte_sched.h
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / dpdk / lib / librte_sched / rte_sched.h
CommitLineData
7c673cae
FG
1/*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#ifndef __INCLUDE_RTE_SCHED_H__
35#define __INCLUDE_RTE_SCHED_H__
36
37#ifdef __cplusplus
38extern "C" {
39#endif
40
41/**
42 * @file
43 * RTE Hierarchical Scheduler
44 *
45 * The hierarchical scheduler prioritizes the transmission of packets
46 * from different users and traffic classes according to the Service
47 * Level Agreements (SLAs) defined for the current network node.
48 *
49 * The scheduler supports thousands of packet queues grouped under a
50 * 5-level hierarchy:
51 * 1. Port:
52 * - Typical usage: output Ethernet port;
53 * - Multiple ports are scheduled in round robin order with
54 * equal priority;
55 * 2. Subport:
56 * - Typical usage: group of users;
57 * - Traffic shaping using the token bucket algorithm
58 * (one bucket per subport);
59 * - Upper limit enforced per traffic class at subport level;
60 * - Lower priority traffic classes able to reuse subport
61 * bandwidth currently unused by higher priority traffic
62 * classes of the same subport;
63 * - When any subport traffic class is oversubscribed
64 * (configuration time event), the usage of subport member
65 * pipes with high demand for thattraffic class pipes is
66 * truncated to a dynamically adjusted value with no
67 * impact to low demand pipes;
68 * 3. Pipe:
69 * - Typical usage: individual user/subscriber;
70 * - Traffic shaping using the token bucket algorithm
71 * (one bucket per pipe);
72 * 4. Traffic class:
73 * - Traffic classes of the same pipe handled in strict
74 * priority order;
75 * - Upper limit enforced per traffic class at the pipe level;
76 * - Lower priority traffic classes able to reuse pipe
77 * bandwidth currently unused by higher priority traffic
78 * classes of the same pipe;
79 * 5. Queue:
80 * - Typical usage: queue hosting packets from one or
81 * multiple connections of same traffic class belonging to
82 * the same user;
83 * - Weighted Round Robin (WRR) is used to service the
84 * queues within same pipe traffic class.
85 *
86 */
87
88#include <sys/types.h>
89#include <rte_mbuf.h>
90#include <rte_meter.h>
91
92/** Random Early Detection (RED) */
93#ifdef RTE_SCHED_RED
94#include "rte_red.h"
95#endif
96
97/** Number of traffic classes per pipe (as well as subport).
98 * Cannot be changed.
99 */
100#define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE 4
101
102/** Number of queues per pipe traffic class. Cannot be changed. */
103#define RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS 4
104
105/** Number of queues per pipe. */
106#define RTE_SCHED_QUEUES_PER_PIPE \
107 (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * \
108 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
109
110/** Maximum number of pipe profiles that can be defined per port.
111 * Compile-time configurable.
112 */
113#ifndef RTE_SCHED_PIPE_PROFILES_PER_PORT
114#define RTE_SCHED_PIPE_PROFILES_PER_PORT 256
115#endif
116
117/*
118 * Ethernet framing overhead. Overhead fields per Ethernet frame:
119 * 1. Preamble: 7 bytes;
120 * 2. Start of Frame Delimiter (SFD): 1 byte;
121 * 3. Frame Check Sequence (FCS): 4 bytes;
122 * 4. Inter Frame Gap (IFG): 12 bytes.
123 *
124 * The FCS is considered overhead only if not included in the packet
125 * length (field pkt_len of struct rte_mbuf).
126 */
127#ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT
128#define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24
129#endif
130
131/*
132 * Subport configuration parameters. The period and credits_per_period
133 * parameters are measured in bytes, with one byte meaning the time
134 * duration associated with the transmission of one byte on the
135 * physical medium of the output port, with pipe or pipe traffic class
136 * rate (measured as percentage of output port rate) determined as
137 * credits_per_period divided by period. One credit represents one
138 * byte.
139 */
140struct rte_sched_subport_params {
141 /* Subport token bucket */
142 uint32_t tb_rate; /**< Rate (measured in bytes per second) */
143 uint32_t tb_size; /**< Size (measured in credits) */
144
145 /* Subport traffic classes */
146 uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
147 /**< Traffic class rates (measured in bytes per second) */
148 uint32_t tc_period;
149 /**< Enforcement period for rates (measured in milliseconds) */
150};
151
152/** Subport statistics */
153struct rte_sched_subport_stats {
154 /* Packets */
155 uint32_t n_pkts_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
156 /**< Number of packets successfully written */
157 uint32_t n_pkts_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
158 /**< Number of packets dropped */
159
160 /* Bytes */
161 uint32_t n_bytes_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
162 /**< Number of bytes successfully written for each traffic class */
163 uint32_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
164 /**< Number of bytes dropped for each traffic class */
165
166#ifdef RTE_SCHED_RED
167 uint32_t n_pkts_red_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
168 /**< Number of packets dropped by red */
169#endif
170};
171
172/*
173 * Pipe configuration parameters. The period and credits_per_period
174 * parameters are measured in bytes, with one byte meaning the time
175 * duration associated with the transmission of one byte on the
176 * physical medium of the output port, with pipe or pipe traffic class
177 * rate (measured as percentage of output port rate) determined as
178 * credits_per_period divided by period. One credit represents one
179 * byte.
180 */
181struct rte_sched_pipe_params {
182 /* Pipe token bucket */
183 uint32_t tb_rate; /**< Rate (measured in bytes per second) */
184 uint32_t tb_size; /**< Size (measured in credits) */
185
186 /* Pipe traffic classes */
187 uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
188 /**< Traffic class rates (measured in bytes per second) */
189 uint32_t tc_period;
190 /**< Enforcement period (measured in milliseconds) */
191#ifdef RTE_SCHED_SUBPORT_TC_OV
192 uint8_t tc_ov_weight; /**< Weight Traffic class 3 oversubscription */
193#endif
194
195 /* Pipe queues */
196 uint8_t wrr_weights[RTE_SCHED_QUEUES_PER_PIPE]; /**< WRR weights */
197};
198
199/** Queue statistics */
200struct rte_sched_queue_stats {
201 /* Packets */
202 uint32_t n_pkts; /**< Packets successfully written */
203 uint32_t n_pkts_dropped; /**< Packets dropped */
204#ifdef RTE_SCHED_RED
205 uint32_t n_pkts_red_dropped; /**< Packets dropped by RED */
206#endif
207
208 /* Bytes */
209 uint32_t n_bytes; /**< Bytes successfully written */
210 uint32_t n_bytes_dropped; /**< Bytes dropped */
211};
212
213/** Port configuration parameters. */
214struct rte_sched_port_params {
215 const char *name; /**< String to be associated */
216 int socket; /**< CPU socket ID */
217 uint32_t rate; /**< Output port rate
218 * (measured in bytes per second) */
219 uint32_t mtu; /**< Maximum Ethernet frame size
220 * (measured in bytes).
221 * Should not include the framing overhead. */
222 uint32_t frame_overhead; /**< Framing overhead per packet
223 * (measured in bytes) */
224 uint32_t n_subports_per_port; /**< Number of subports */
225 uint32_t n_pipes_per_subport; /**< Number of pipes per subport */
226 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
227 /**< Packet queue size for each traffic class.
228 * All queues within the same pipe traffic class have the same
229 * size. Queues from different pipes serving the same traffic
230 * class have the same size. */
231 struct rte_sched_pipe_params *pipe_profiles;
232 /**< Pipe profile table.
233 * Every pipe is configured using one of the profiles from this table. */
234 uint32_t n_pipe_profiles; /**< Profiles in the pipe profile table */
235#ifdef RTE_SCHED_RED
236 struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][e_RTE_METER_COLORS]; /**< RED parameters */
237#endif
238};
239
240/*
241 * Configuration
242 *
243 ***/
244
245/**
246 * Hierarchical scheduler port configuration
247 *
248 * @param params
249 * Port scheduler configuration parameter structure
250 * @return
251 * Handle to port scheduler instance upon success or NULL otherwise.
252 */
253struct rte_sched_port *
254rte_sched_port_config(struct rte_sched_port_params *params);
255
256/**
257 * Hierarchical scheduler port free
258 *
259 * @param port
260 * Handle to port scheduler instance
261 */
262void
263rte_sched_port_free(struct rte_sched_port *port);
264
265/**
266 * Hierarchical scheduler subport configuration
267 *
268 * @param port
269 * Handle to port scheduler instance
270 * @param subport_id
271 * Subport ID
272 * @param params
273 * Subport configuration parameters
274 * @return
275 * 0 upon success, error code otherwise
276 */
277int
278rte_sched_subport_config(struct rte_sched_port *port,
279 uint32_t subport_id,
280 struct rte_sched_subport_params *params);
281
282/**
283 * Hierarchical scheduler pipe configuration
284 *
285 * @param port
286 * Handle to port scheduler instance
287 * @param subport_id
288 * Subport ID
289 * @param pipe_id
290 * Pipe ID within subport
291 * @param pipe_profile
292 * ID of port-level pre-configured pipe profile
293 * @return
294 * 0 upon success, error code otherwise
295 */
296int
297rte_sched_pipe_config(struct rte_sched_port *port,
298 uint32_t subport_id,
299 uint32_t pipe_id,
300 int32_t pipe_profile);
301
302/**
303 * Hierarchical scheduler memory footprint size per port
304 *
305 * @param params
306 * Port scheduler configuration parameter structure
307 * @return
308 * Memory footprint size in bytes upon success, 0 otherwise
309 */
310uint32_t
311rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params);
312
313/*
314 * Statistics
315 *
316 ***/
317
318/**
319 * Hierarchical scheduler subport statistics read
320 *
321 * @param port
322 * Handle to port scheduler instance
323 * @param subport_id
324 * Subport ID
325 * @param stats
326 * Pointer to pre-allocated subport statistics structure where the statistics
327 * counters should be stored
328 * @param tc_ov
329 * Pointer to pre-allocated 4-entry array where the oversubscription status for
330 * each of the 4 subport traffic classes should be stored.
331 * @return
332 * 0 upon success, error code otherwise
333 */
334int
335rte_sched_subport_read_stats(struct rte_sched_port *port,
336 uint32_t subport_id,
337 struct rte_sched_subport_stats *stats,
338 uint32_t *tc_ov);
339
340/**
341 * Hierarchical scheduler queue statistics read
342 *
343 * @param port
344 * Handle to port scheduler instance
345 * @param queue_id
346 * Queue ID within port scheduler
347 * @param stats
348 * Pointer to pre-allocated subport statistics structure where the statistics
349 * counters should be stored
350 * @param qlen
351 * Pointer to pre-allocated variable where the current queue length
352 * should be stored.
353 * @return
354 * 0 upon success, error code otherwise
355 */
356int
357rte_sched_queue_read_stats(struct rte_sched_port *port,
358 uint32_t queue_id,
359 struct rte_sched_queue_stats *stats,
360 uint16_t *qlen);
361
362/**
363 * Scheduler hierarchy path write to packet descriptor. Typically
364 * called by the packet classification stage.
365 *
366 * @param pkt
367 * Packet descriptor handle
368 * @param subport
369 * Subport ID
370 * @param pipe
371 * Pipe ID within subport
372 * @param traffic_class
373 * Traffic class ID within pipe (0 .. 3)
374 * @param queue
375 * Queue ID within pipe traffic class (0 .. 3)
376 * @param color
377 * Packet color set
378 */
379void
380rte_sched_port_pkt_write(struct rte_mbuf *pkt,
381 uint32_t subport, uint32_t pipe, uint32_t traffic_class,
382 uint32_t queue, enum rte_meter_color color);
383
384/**
385 * Scheduler hierarchy path read from packet descriptor (struct
386 * rte_mbuf). Typically called as part of the hierarchical scheduler
387 * enqueue operation. The subport, pipe, traffic class and queue
388 * parameters need to be pre-allocated by the caller.
389 *
390 * @param pkt
391 * Packet descriptor handle
392 * @param subport
393 * Subport ID
394 * @param pipe
395 * Pipe ID within subport
396 * @param traffic_class
397 * Traffic class ID within pipe (0 .. 3)
398 * @param queue
399 * Queue ID within pipe traffic class (0 .. 3)
400 *
401 */
402void
403rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
404 uint32_t *subport, uint32_t *pipe,
405 uint32_t *traffic_class, uint32_t *queue);
406
407enum rte_meter_color
408rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt);
409
410/**
411 * Hierarchical scheduler port enqueue. Writes up to n_pkts to port
412 * scheduler and returns the number of packets actually written. For
413 * each packet, the port scheduler queue to write the packet to is
414 * identified by reading the hierarchy path from the packet
415 * descriptor; if the queue is full or congested and the packet is not
416 * written to the queue, then the packet is automatically dropped
417 * without any action required from the caller.
418 *
419 * @param port
420 * Handle to port scheduler instance
421 * @param pkts
422 * Array storing the packet descriptor handles
423 * @param n_pkts
424 * Number of packets to enqueue from the pkts array into the port scheduler
425 * @return
426 * Number of packets successfully enqueued
427 */
428int
429rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts);
430
431/**
432 * Hierarchical scheduler port dequeue. Reads up to n_pkts from the
433 * port scheduler and stores them in the pkts array and returns the
434 * number of packets actually read. The pkts array needs to be
435 * pre-allocated by the caller with at least n_pkts entries.
436 *
437 * @param port
438 * Handle to port scheduler instance
439 * @param pkts
440 * Pre-allocated packet descriptor array where the packets dequeued
441 * from the port
442 * scheduler should be stored
443 * @param n_pkts
444 * Number of packets to dequeue from the port scheduler
445 * @return
446 * Number of packets successfully dequeued and placed in the pkts array
447 */
448int
449rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts);
450
451#ifdef __cplusplus
452}
453#endif
454
455#endif /* __INCLUDE_RTE_SCHED_H__ */