]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __NET_SCHED_GENERIC_H |
2 | #define __NET_SCHED_GENERIC_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/netdevice.h> |
5 | #include <linux/types.h> | |
6 | #include <linux/rcupdate.h> | |
7 | #include <linux/module.h> | |
1da177e4 LT |
8 | #include <linux/pkt_sched.h> |
9 | #include <linux/pkt_cls.h> | |
10 | #include <net/gen_stats.h> | |
be577ddc | 11 | #include <net/rtnetlink.h> |
1da177e4 LT |
12 | |
13 | struct Qdisc_ops; | |
14 | struct qdisc_walker; | |
15 | struct tcf_walker; | |
16 | struct module; | |
17 | ||
18 | struct qdisc_rate_table | |
19 | { | |
20 | struct tc_ratespec rate; | |
21 | u32 data[256]; | |
22 | struct qdisc_rate_table *next; | |
23 | int refcnt; | |
24 | }; | |
25 | ||
e2627c8c DM |
26 | enum qdisc_state_t |
27 | { | |
28 | __QDISC_STATE_RUNNING, | |
37437bb2 | 29 | __QDISC_STATE_SCHED, |
a9312ae8 | 30 | __QDISC_STATE_DEACTIVATED, |
e2627c8c DM |
31 | }; |
32 | ||
175f9c1b JK |
33 | struct qdisc_size_table { |
34 | struct list_head list; | |
35 | struct tc_sizespec szopts; | |
36 | int refcnt; | |
37 | u16 data[]; | |
38 | }; | |
39 | ||
1da177e4 LT |
40 | struct Qdisc |
41 | { | |
42 | int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); | |
43 | struct sk_buff * (*dequeue)(struct Qdisc *dev); | |
44 | unsigned flags; | |
45 | #define TCQ_F_BUILTIN 1 | |
46 | #define TCQ_F_THROTTLED 2 | |
47 | #define TCQ_F_INGRESS 4 | |
48 | int padded; | |
49 | struct Qdisc_ops *ops; | |
175f9c1b | 50 | struct qdisc_size_table *stab; |
1da177e4 LT |
51 | u32 handle; |
52 | u32 parent; | |
53 | atomic_t refcnt; | |
e2627c8c | 54 | unsigned long state; |
d3b753db | 55 | struct sk_buff *gso_skb; |
1da177e4 | 56 | struct sk_buff_head q; |
bb949fbd | 57 | struct netdev_queue *dev_queue; |
37437bb2 | 58 | struct Qdisc *next_sched; |
1da177e4 LT |
59 | struct list_head list; |
60 | ||
61 | struct gnet_stats_basic bstats; | |
62 | struct gnet_stats_queue qstats; | |
63 | struct gnet_stats_rate_est rate_est; | |
1da177e4 LT |
64 | int (*reshape_fail)(struct sk_buff *skb, |
65 | struct Qdisc *q); | |
66 | ||
72b25a91 DM |
67 | void *u32_node; |
68 | ||
1da177e4 LT |
69 | /* This field is deprecated, but it is still used by CBQ |
70 | * and it will live until better solution will be invented. | |
71 | */ | |
72 | struct Qdisc *__parent; | |
73 | }; | |
74 | ||
75 | struct Qdisc_class_ops | |
76 | { | |
77 | /* Child qdisc manipulation */ | |
78 | int (*graft)(struct Qdisc *, unsigned long cl, | |
79 | struct Qdisc *, struct Qdisc **); | |
80 | struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); | |
43effa1e | 81 | void (*qlen_notify)(struct Qdisc *, unsigned long); |
1da177e4 LT |
82 | |
83 | /* Class manipulation routines */ | |
84 | unsigned long (*get)(struct Qdisc *, u32 classid); | |
85 | void (*put)(struct Qdisc *, unsigned long); | |
86 | int (*change)(struct Qdisc *, u32, u32, | |
1e90474c | 87 | struct nlattr **, unsigned long *); |
1da177e4 LT |
88 | int (*delete)(struct Qdisc *, unsigned long); |
89 | void (*walk)(struct Qdisc *, struct qdisc_walker * arg); | |
90 | ||
91 | /* Filter manipulation */ | |
92 | struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); | |
93 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, | |
94 | u32 classid); | |
95 | void (*unbind_tcf)(struct Qdisc *, unsigned long); | |
96 | ||
97 | /* rtnetlink specific */ | |
98 | int (*dump)(struct Qdisc *, unsigned long, | |
99 | struct sk_buff *skb, struct tcmsg*); | |
100 | int (*dump_stats)(struct Qdisc *, unsigned long, | |
101 | struct gnet_dump *); | |
102 | }; | |
103 | ||
104 | struct Qdisc_ops | |
105 | { | |
106 | struct Qdisc_ops *next; | |
20fea08b | 107 | const struct Qdisc_class_ops *cl_ops; |
1da177e4 LT |
108 | char id[IFNAMSIZ]; |
109 | int priv_size; | |
110 | ||
111 | int (*enqueue)(struct sk_buff *, struct Qdisc *); | |
112 | struct sk_buff * (*dequeue)(struct Qdisc *); | |
113 | int (*requeue)(struct sk_buff *, struct Qdisc *); | |
114 | unsigned int (*drop)(struct Qdisc *); | |
115 | ||
1e90474c | 116 | int (*init)(struct Qdisc *, struct nlattr *arg); |
1da177e4 LT |
117 | void (*reset)(struct Qdisc *); |
118 | void (*destroy)(struct Qdisc *); | |
1e90474c | 119 | int (*change)(struct Qdisc *, struct nlattr *arg); |
1da177e4 LT |
120 | |
121 | int (*dump)(struct Qdisc *, struct sk_buff *); | |
122 | int (*dump_stats)(struct Qdisc *, struct gnet_dump *); | |
123 | ||
124 | struct module *owner; | |
125 | }; | |
126 | ||
127 | ||
128 | struct tcf_result | |
129 | { | |
130 | unsigned long class; | |
131 | u32 classid; | |
132 | }; | |
133 | ||
134 | struct tcf_proto_ops | |
135 | { | |
136 | struct tcf_proto_ops *next; | |
137 | char kind[IFNAMSIZ]; | |
138 | ||
139 | int (*classify)(struct sk_buff*, struct tcf_proto*, | |
140 | struct tcf_result *); | |
141 | int (*init)(struct tcf_proto*); | |
142 | void (*destroy)(struct tcf_proto*); | |
143 | ||
144 | unsigned long (*get)(struct tcf_proto*, u32 handle); | |
145 | void (*put)(struct tcf_proto*, unsigned long); | |
146 | int (*change)(struct tcf_proto*, unsigned long, | |
add93b61 | 147 | u32 handle, struct nlattr **, |
1da177e4 LT |
148 | unsigned long *); |
149 | int (*delete)(struct tcf_proto*, unsigned long); | |
150 | void (*walk)(struct tcf_proto*, struct tcf_walker *arg); | |
151 | ||
152 | /* rtnetlink specific */ | |
153 | int (*dump)(struct tcf_proto*, unsigned long, | |
154 | struct sk_buff *skb, struct tcmsg*); | |
155 | ||
156 | struct module *owner; | |
157 | }; | |
158 | ||
159 | struct tcf_proto | |
160 | { | |
161 | /* Fast access part */ | |
162 | struct tcf_proto *next; | |
163 | void *root; | |
164 | int (*classify)(struct sk_buff*, struct tcf_proto*, | |
165 | struct tcf_result *); | |
66c6f529 | 166 | __be16 protocol; |
1da177e4 LT |
167 | |
168 | /* All the rest */ | |
169 | u32 prio; | |
170 | u32 classid; | |
171 | struct Qdisc *q; | |
172 | void *data; | |
173 | struct tcf_proto_ops *ops; | |
174 | }; | |
175 | ||
175f9c1b JK |
176 | struct qdisc_skb_cb { |
177 | unsigned int pkt_len; | |
178 | char data[]; | |
179 | }; | |
180 | ||
181 | static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) | |
182 | { | |
183 | return (struct qdisc_skb_cb *)skb->cb; | |
184 | } | |
185 | ||
83874000 DM |
186 | static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) |
187 | { | |
188 | return &qdisc->q.lock; | |
189 | } | |
190 | ||
7698b4fc DM |
191 | static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) |
192 | { | |
193 | return qdisc->dev_queue->qdisc; | |
194 | } | |
195 | ||
2540e051 JP |
196 | static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc) |
197 | { | |
198 | return qdisc->dev_queue->qdisc_sleeping; | |
199 | } | |
200 | ||
7e43f112 DM |
201 | /* The qdisc root lock is a mechanism by which to top level |
202 | * of a qdisc tree can be locked from any qdisc node in the | |
203 | * forest. This allows changing the configuration of some | |
204 | * aspect of the qdisc tree while blocking out asynchronous | |
205 | * qdisc access in the packet processing paths. | |
206 | * | |
207 | * It is only legal to do this when the root will not change | |
208 | * on us. Otherwise we'll potentially lock the wrong qdisc | |
209 | * root. This is enforced by holding the RTNL semaphore, which | |
210 | * all users of this lock accessor must do. | |
211 | */ | |
7698b4fc DM |
212 | static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc) |
213 | { | |
214 | struct Qdisc *root = qdisc_root(qdisc); | |
215 | ||
7e43f112 | 216 | ASSERT_RTNL(); |
83874000 | 217 | return qdisc_lock(root); |
7698b4fc DM |
218 | } |
219 | ||
5ce2d488 DM |
220 | static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) |
221 | { | |
222 | return qdisc->dev_queue->dev; | |
223 | } | |
1da177e4 | 224 | |
78a5b30b DM |
225 | static inline void sch_tree_lock(struct Qdisc *q) |
226 | { | |
227 | spin_lock_bh(qdisc_root_lock(q)); | |
228 | } | |
229 | ||
230 | static inline void sch_tree_unlock(struct Qdisc *q) | |
231 | { | |
232 | spin_unlock_bh(qdisc_root_lock(q)); | |
233 | } | |
234 | ||
235 | #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) | |
236 | #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) | |
1da177e4 | 237 | |
e41a33e6 TG |
238 | extern struct Qdisc noop_qdisc; |
239 | extern struct Qdisc_ops noop_qdisc_ops; | |
240 | ||
6fe1c7a5 PM |
241 | struct Qdisc_class_common |
242 | { | |
243 | u32 classid; | |
244 | struct hlist_node hnode; | |
245 | }; | |
246 | ||
247 | struct Qdisc_class_hash | |
248 | { | |
249 | struct hlist_head *hash; | |
250 | unsigned int hashsize; | |
251 | unsigned int hashmask; | |
252 | unsigned int hashelems; | |
253 | }; | |
254 | ||
255 | static inline unsigned int qdisc_class_hash(u32 id, u32 mask) | |
256 | { | |
257 | id ^= id >> 8; | |
258 | id ^= id >> 4; | |
259 | return id & mask; | |
260 | } | |
261 | ||
262 | static inline struct Qdisc_class_common * | |
263 | qdisc_class_find(struct Qdisc_class_hash *hash, u32 id) | |
264 | { | |
265 | struct Qdisc_class_common *cl; | |
266 | struct hlist_node *n; | |
267 | unsigned int h; | |
268 | ||
269 | h = qdisc_class_hash(id, hash->hashmask); | |
270 | hlist_for_each_entry(cl, n, &hash->hash[h], hnode) { | |
271 | if (cl->classid == id) | |
272 | return cl; | |
273 | } | |
274 | return NULL; | |
275 | } | |
276 | ||
277 | extern int qdisc_class_hash_init(struct Qdisc_class_hash *); | |
278 | extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *); | |
279 | extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *); | |
280 | extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); | |
281 | extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *); | |
282 | ||
e41a33e6 TG |
283 | extern void dev_init_scheduler(struct net_device *dev); |
284 | extern void dev_shutdown(struct net_device *dev); | |
285 | extern void dev_activate(struct net_device *dev); | |
286 | extern void dev_deactivate(struct net_device *dev); | |
287 | extern void qdisc_reset(struct Qdisc *qdisc); | |
288 | extern void qdisc_destroy(struct Qdisc *qdisc); | |
43effa1e | 289 | extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); |
5ce2d488 | 290 | extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
bb949fbd | 291 | struct Qdisc_ops *ops); |
e41a33e6 | 292 | extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, |
bb949fbd | 293 | struct netdev_queue *dev_queue, |
9f9afec4 | 294 | struct Qdisc_ops *ops, u32 parentid); |
175f9c1b JK |
295 | extern void qdisc_calculate_pkt_len(struct sk_buff *skb, |
296 | struct qdisc_size_table *stab); | |
a48b5a61 | 297 | extern void tcf_destroy(struct tcf_proto *tp); |
ff31ab56 | 298 | extern void tcf_destroy_chain(struct tcf_proto **fl); |
1da177e4 | 299 | |
5aa70995 DM |
300 | /* Reset all TX qdiscs of a device. */ |
301 | static inline void qdisc_reset_all_tx(struct net_device *dev) | |
302 | { | |
e8a0464c DM |
303 | unsigned int i; |
304 | for (i = 0; i < dev->num_tx_queues; i++) | |
305 | qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc); | |
5aa70995 DM |
306 | } |
307 | ||
3e745dd6 DM |
308 | /* Are all TX queues of the device empty? */ |
309 | static inline bool qdisc_all_tx_empty(const struct net_device *dev) | |
310 | { | |
e8a0464c DM |
311 | unsigned int i; |
312 | for (i = 0; i < dev->num_tx_queues; i++) { | |
313 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
314 | const struct Qdisc *q = txq->qdisc; | |
3e745dd6 | 315 | |
e8a0464c DM |
316 | if (q->q.qlen) |
317 | return false; | |
318 | } | |
319 | return true; | |
3e745dd6 DM |
320 | } |
321 | ||
6fa9864b DM |
322 | /* Are any of the TX qdiscs changing? */ |
323 | static inline bool qdisc_tx_changing(struct net_device *dev) | |
324 | { | |
e8a0464c DM |
325 | unsigned int i; |
326 | for (i = 0; i < dev->num_tx_queues; i++) { | |
327 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
328 | if (txq->qdisc != txq->qdisc_sleeping) | |
329 | return true; | |
330 | } | |
331 | return false; | |
6fa9864b DM |
332 | } |
333 | ||
e8a0464c | 334 | /* Is the device using the noop qdisc on all queues? */ |
05297949 DM |
335 | static inline bool qdisc_tx_is_noop(const struct net_device *dev) |
336 | { | |
e8a0464c DM |
337 | unsigned int i; |
338 | for (i = 0; i < dev->num_tx_queues; i++) { | |
339 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
340 | if (txq->qdisc != &noop_qdisc) | |
341 | return false; | |
342 | } | |
343 | return true; | |
05297949 DM |
344 | } |
345 | ||
0abf77e5 JK |
346 | static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) |
347 | { | |
175f9c1b | 348 | return qdisc_skb_cb(skb)->pkt_len; |
0abf77e5 JK |
349 | } |
350 | ||
c27f339a | 351 | /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ |
378a2f09 JP |
352 | enum net_xmit_qdisc_t { |
353 | __NET_XMIT_STOLEN = 0x00010000, | |
c27f339a | 354 | __NET_XMIT_BYPASS = 0x00020000, |
378a2f09 JP |
355 | }; |
356 | ||
c27f339a | 357 | #ifdef CONFIG_NET_CLS_ACT |
378a2f09 | 358 | #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) |
378a2f09 JP |
359 | #else |
360 | #define net_xmit_drop_count(e) (1) | |
361 | #endif | |
362 | ||
5f86173b JK |
363 | static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
364 | { | |
3a682fbd | 365 | #ifdef CONFIG_NET_SCHED |
175f9c1b JK |
366 | if (sch->stab) |
367 | qdisc_calculate_pkt_len(skb, sch->stab); | |
3a682fbd | 368 | #endif |
5f86173b JK |
369 | return sch->enqueue(skb, sch); |
370 | } | |
371 | ||
372 | static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) | |
373 | { | |
175f9c1b | 374 | qdisc_skb_cb(skb)->pkt_len = skb->len; |
378a2f09 | 375 | return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; |
5f86173b JK |
376 | } |
377 | ||
9972b25d TG |
378 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, |
379 | struct sk_buff_head *list) | |
380 | { | |
381 | __skb_queue_tail(list, skb); | |
0abf77e5 JK |
382 | sch->qstats.backlog += qdisc_pkt_len(skb); |
383 | sch->bstats.bytes += qdisc_pkt_len(skb); | |
9972b25d TG |
384 | sch->bstats.packets++; |
385 | ||
386 | return NET_XMIT_SUCCESS; | |
387 | } | |
388 | ||
389 | static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) | |
390 | { | |
391 | return __qdisc_enqueue_tail(skb, sch, &sch->q); | |
392 | } | |
393 | ||
394 | static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, | |
395 | struct sk_buff_head *list) | |
396 | { | |
397 | struct sk_buff *skb = __skb_dequeue(list); | |
398 | ||
399 | if (likely(skb != NULL)) | |
0abf77e5 | 400 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
9972b25d TG |
401 | |
402 | return skb; | |
403 | } | |
404 | ||
405 | static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) | |
406 | { | |
407 | return __qdisc_dequeue_head(sch, &sch->q); | |
408 | } | |
409 | ||
410 | static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, | |
411 | struct sk_buff_head *list) | |
412 | { | |
413 | struct sk_buff *skb = __skb_dequeue_tail(list); | |
414 | ||
415 | if (likely(skb != NULL)) | |
0abf77e5 | 416 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
9972b25d TG |
417 | |
418 | return skb; | |
419 | } | |
420 | ||
421 | static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) | |
422 | { | |
423 | return __qdisc_dequeue_tail(sch, &sch->q); | |
424 | } | |
425 | ||
426 | static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch, | |
427 | struct sk_buff_head *list) | |
428 | { | |
429 | __skb_queue_head(list, skb); | |
0abf77e5 | 430 | sch->qstats.backlog += qdisc_pkt_len(skb); |
9972b25d TG |
431 | sch->qstats.requeues++; |
432 | ||
433 | return NET_XMIT_SUCCESS; | |
434 | } | |
435 | ||
436 | static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch) | |
437 | { | |
438 | return __qdisc_requeue(skb, sch, &sch->q); | |
439 | } | |
440 | ||
441 | static inline void __qdisc_reset_queue(struct Qdisc *sch, | |
442 | struct sk_buff_head *list) | |
443 | { | |
444 | /* | |
445 | * We do not know the backlog in bytes of this list, it | |
446 | * is up to the caller to correct it | |
447 | */ | |
93245dd6 | 448 | __skb_queue_purge(list); |
9972b25d TG |
449 | } |
450 | ||
451 | static inline void qdisc_reset_queue(struct Qdisc *sch) | |
452 | { | |
453 | __qdisc_reset_queue(sch, &sch->q); | |
454 | sch->qstats.backlog = 0; | |
455 | } | |
456 | ||
457 | static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, | |
458 | struct sk_buff_head *list) | |
459 | { | |
460 | struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); | |
461 | ||
462 | if (likely(skb != NULL)) { | |
0abf77e5 | 463 | unsigned int len = qdisc_pkt_len(skb); |
9972b25d TG |
464 | kfree_skb(skb); |
465 | return len; | |
466 | } | |
467 | ||
468 | return 0; | |
469 | } | |
470 | ||
471 | static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) | |
472 | { | |
473 | return __qdisc_queue_drop(sch, &sch->q); | |
474 | } | |
475 | ||
476 | static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) | |
477 | { | |
478 | kfree_skb(skb); | |
479 | sch->qstats.drops++; | |
480 | ||
481 | return NET_XMIT_DROP; | |
482 | } | |
483 | ||
484 | static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) | |
485 | { | |
486 | sch->qstats.drops++; | |
487 | ||
c3bc7cff | 488 | #ifdef CONFIG_NET_CLS_ACT |
9972b25d TG |
489 | if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) |
490 | goto drop; | |
491 | ||
492 | return NET_XMIT_SUCCESS; | |
493 | ||
494 | drop: | |
495 | #endif | |
496 | kfree_skb(skb); | |
497 | return NET_XMIT_DROP; | |
498 | } | |
499 | ||
e9bef55d JDB |
500 | /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how |
501 | long it will take to send a packet given its size. | |
502 | */ | |
503 | static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) | |
504 | { | |
e08b0998 JDB |
505 | int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; |
506 | if (slot < 0) | |
507 | slot = 0; | |
e9bef55d JDB |
508 | slot >>= rtab->rate.cell_log; |
509 | if (slot > 255) | |
510 | return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]); | |
511 | return rtab->data[slot]; | |
512 | } | |
513 | ||
12da81d1 JHS |
514 | #ifdef CONFIG_NET_CLS_ACT |
515 | static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask) | |
516 | { | |
517 | struct sk_buff *n = skb_clone(skb, gfp_mask); | |
518 | ||
519 | if (n) { | |
520 | n->tc_verd = SET_TC_VERD(n->tc_verd, 0); | |
521 | n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); | |
522 | n->tc_verd = CLR_TC_MUNGED(n->tc_verd); | |
12da81d1 JHS |
523 | } |
524 | return n; | |
525 | } | |
526 | #endif | |
527 | ||
1da177e4 | 528 | #endif |