]> git.proxmox.com Git - mirror_iproute2.git/blob - tc/tc_core.c
lib: introduce print_nl
[mirror_iproute2.git] / tc / tc_core.c
1 /*
2 * tc_core.c TC core library.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <stdint.h>
16 #include <unistd.h>
17 #include <fcntl.h>
18 #include <math.h>
19 #include <sys/socket.h>
20 #include <netinet/in.h>
21 #include <arpa/inet.h>
22 #include <string.h>
23
24 #include "tc_core.h"
25 #include <linux/atm.h>
26
27 static double tick_in_usec = 1;
28 static double clock_factor = 1;
29
30 int tc_core_time2big(unsigned int time)
31 {
32 __u64 t = time;
33
34 t *= tick_in_usec;
35 return (t >> 32) != 0;
36 }
37
38
39 unsigned int tc_core_time2tick(unsigned int time)
40 {
41 return time*tick_in_usec;
42 }
43
44 unsigned int tc_core_tick2time(unsigned int tick)
45 {
46 return tick/tick_in_usec;
47 }
48
49 unsigned int tc_core_time2ktime(unsigned int time)
50 {
51 return time * clock_factor;
52 }
53
54 unsigned int tc_core_ktime2time(unsigned int ktime)
55 {
56 return ktime / clock_factor;
57 }
58
59 unsigned int tc_calc_xmittime(__u64 rate, unsigned int size)
60 {
61 return tc_core_time2tick(TIME_UNITS_PER_SEC*((double)size/(double)rate));
62 }
63
64 unsigned int tc_calc_xmitsize(__u64 rate, unsigned int ticks)
65 {
66 return ((double)rate*tc_core_tick2time(ticks))/TIME_UNITS_PER_SEC;
67 }
68
69 /*
70 * The align to ATM cells is used for determining the (ATM) SAR
71 * alignment overhead at the ATM layer. (SAR = Segmentation And
72 * Reassembly). This is for example needed when scheduling packet on
73 * an ADSL connection. Note that the extra ATM-AAL overhead is _not_
74 * included in this calculation. This overhead is added in the kernel
75 * before doing the rate table lookup, as this gives better precision
76 * (as the table will always be aligned for 48 bytes).
77 * --Hawk, d.7/11-2004. <hawk@diku.dk>
78 */
79 static unsigned int tc_align_to_atm(unsigned int size)
80 {
81 int linksize, cells;
82
83 cells = size / ATM_CELL_PAYLOAD;
84 if ((size % ATM_CELL_PAYLOAD) > 0)
85 cells++;
86
87 linksize = cells * ATM_CELL_SIZE; /* Use full cell size to add ATM tax */
88 return linksize;
89 }
90
91 static unsigned int tc_adjust_size(unsigned int sz, unsigned int mpu, enum link_layer linklayer)
92 {
93 if (sz < mpu)
94 sz = mpu;
95
96 switch (linklayer) {
97 case LINKLAYER_ATM:
98 return tc_align_to_atm(sz);
99 case LINKLAYER_ETHERNET:
100 default:
101 /* No size adjustments on Ethernet */
102 return sz;
103 }
104 }
105
106 /* Notice, the rate table calculated here, have gotten replaced in the
107 * kernel and is no-longer used for lookups.
108 *
109 * This happened in kernel release v3.8 caused by kernel
110 * - commit 56b765b79 ("htb: improved accuracy at high rates").
111 * This change unfortunately caused breakage of tc overhead and
112 * linklayer parameters.
113 *
114 * Kernel overhead handling got fixed in kernel v3.10 by
115 * - commit 01cb71d2d47 (net_sched: restore "overhead xxx" handling)
116 *
117 * Kernel linklayer handling got fixed in kernel v3.11 by
118 * - commit 8a8e3d84b17 (net_sched: restore "linklayer atm" handling)
119 */
120
121 /*
122 rtab[pkt_len>>cell_log] = pkt_xmit_time
123 */
124
125 int tc_calc_rtable(struct tc_ratespec *r, __u32 *rtab,
126 int cell_log, unsigned int mtu,
127 enum link_layer linklayer)
128 {
129 int i;
130 unsigned int sz;
131 unsigned int bps = r->rate;
132 unsigned int mpu = r->mpu;
133
134 if (mtu == 0)
135 mtu = 2047;
136
137 if (cell_log < 0) {
138 cell_log = 0;
139 while ((mtu >> cell_log) > 255)
140 cell_log++;
141 }
142
143 for (i = 0; i < 256; i++) {
144 sz = tc_adjust_size((i + 1) << cell_log, mpu, linklayer);
145 rtab[i] = tc_calc_xmittime(bps, sz);
146 }
147
148 r->cell_align = -1;
149 r->cell_log = cell_log;
150 r->linklayer = (linklayer & TC_LINKLAYER_MASK);
151 return cell_log;
152 }
153
154 /*
155 stab[pkt_len>>cell_log] = pkt_xmit_size>>size_log
156 */
157
158 int tc_calc_size_table(struct tc_sizespec *s, __u16 **stab)
159 {
160 int i;
161 enum link_layer linklayer = s->linklayer;
162 unsigned int sz;
163
164 if (linklayer <= LINKLAYER_ETHERNET && s->mpu == 0) {
165 /* don't need data table in this case (only overhead set) */
166 s->mtu = 0;
167 s->tsize = 0;
168 s->cell_log = 0;
169 s->cell_align = 0;
170 *stab = NULL;
171 return 0;
172 }
173
174 if (s->mtu == 0)
175 s->mtu = 2047;
176 if (s->tsize == 0)
177 s->tsize = 512;
178
179 s->cell_log = 0;
180 while ((s->mtu >> s->cell_log) > s->tsize - 1)
181 s->cell_log++;
182
183 *stab = malloc(s->tsize * sizeof(__u16));
184 if (!*stab)
185 return -1;
186
187 again:
188 for (i = s->tsize - 1; i >= 0; i--) {
189 sz = tc_adjust_size((i + 1) << s->cell_log, s->mpu, linklayer);
190 if ((sz >> s->size_log) > UINT16_MAX) {
191 s->size_log++;
192 goto again;
193 }
194 (*stab)[i] = sz >> s->size_log;
195 }
196
197 s->cell_align = -1; /* Due to the sz calc */
198 return 0;
199 }
200
201 int tc_core_init(void)
202 {
203 FILE *fp;
204 __u32 clock_res;
205 __u32 t2us;
206 __u32 us2t;
207
208 fp = fopen("/proc/net/psched", "r");
209 if (fp == NULL)
210 return -1;
211
212 if (fscanf(fp, "%08x%08x%08x", &t2us, &us2t, &clock_res) != 3) {
213 fclose(fp);
214 return -1;
215 }
216 fclose(fp);
217
218 /* compatibility hack: for old iproute binaries (ignoring
219 * the kernel clock resolution) the kernel advertises a
220 * tick multiplier of 1000 in case of nano-second resolution,
221 * which really is 1. */
222 if (clock_res == 1000000000)
223 t2us = us2t;
224
225 clock_factor = (double)clock_res / TIME_UNITS_PER_SEC;
226 tick_in_usec = (double)t2us / us2t * clock_factor;
227 return 0;
228 }