]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/sched/act_ipt.c
Merge tag 'nfsd-5.3-1' of git://linux-nfs.org/~bfields/linux
[mirror_ubuntu-jammy-kernel.git] / net / sched / act_ipt.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/act_ipt.c iptables target interface
4 *
5 *TODO: Add other tables. For now we only support the ipv4 table targets
6 *
7 * Copyright: Jamal Hadi Salim (2002-13)
8 */
9
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <linux/tc_act/tc_ipt.h>
22 #include <net/tc_act/tc_ipt.h>
23
24 #include <linux/netfilter_ipv4/ip_tables.h>
25
26
27 static unsigned int ipt_net_id;
28 static struct tc_action_ops act_ipt_ops;
29
30 static unsigned int xt_net_id;
31 static struct tc_action_ops act_xt_ops;
32
33 static int ipt_init_target(struct net *net, struct xt_entry_target *t,
34 char *table, unsigned int hook)
35 {
36 struct xt_tgchk_param par;
37 struct xt_target *target;
38 struct ipt_entry e = {};
39 int ret = 0;
40
41 target = xt_request_find_target(AF_INET, t->u.user.name,
42 t->u.user.revision);
43 if (IS_ERR(target))
44 return PTR_ERR(target);
45
46 t->u.kernel.target = target;
47 memset(&par, 0, sizeof(par));
48 par.net = net;
49 par.table = table;
50 par.entryinfo = &e;
51 par.target = target;
52 par.targinfo = t->data;
53 par.hook_mask = hook;
54 par.family = NFPROTO_IPV4;
55
56 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
57 if (ret < 0) {
58 module_put(t->u.kernel.target->me);
59 return ret;
60 }
61 return 0;
62 }
63
64 static void ipt_destroy_target(struct xt_entry_target *t)
65 {
66 struct xt_tgdtor_param par = {
67 .target = t->u.kernel.target,
68 .targinfo = t->data,
69 .family = NFPROTO_IPV4,
70 };
71 if (par.target->destroy != NULL)
72 par.target->destroy(&par);
73 module_put(par.target->me);
74 }
75
76 static void tcf_ipt_release(struct tc_action *a)
77 {
78 struct tcf_ipt *ipt = to_ipt(a);
79
80 if (ipt->tcfi_t) {
81 ipt_destroy_target(ipt->tcfi_t);
82 kfree(ipt->tcfi_t);
83 }
84 kfree(ipt->tcfi_tname);
85 }
86
87 static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
88 [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ },
89 [TCA_IPT_HOOK] = { .type = NLA_U32 },
90 [TCA_IPT_INDEX] = { .type = NLA_U32 },
91 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
92 };
93
94 static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
95 struct nlattr *est, struct tc_action **a,
96 const struct tc_action_ops *ops, int ovr, int bind,
97 struct tcf_proto *tp)
98 {
99 struct tc_action_net *tn = net_generic(net, id);
100 struct nlattr *tb[TCA_IPT_MAX + 1];
101 struct tcf_ipt *ipt;
102 struct xt_entry_target *td, *t;
103 char *tname;
104 bool exists = false;
105 int ret = 0, err;
106 u32 hook = 0;
107 u32 index = 0;
108
109 if (nla == NULL)
110 return -EINVAL;
111
112 err = nla_parse_nested_deprecated(tb, TCA_IPT_MAX, nla, ipt_policy,
113 NULL);
114 if (err < 0)
115 return err;
116
117 if (tb[TCA_IPT_INDEX] != NULL)
118 index = nla_get_u32(tb[TCA_IPT_INDEX]);
119
120 err = tcf_idr_check_alloc(tn, &index, a, bind);
121 if (err < 0)
122 return err;
123 exists = err;
124 if (exists && bind)
125 return 0;
126
127 if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
128 if (exists)
129 tcf_idr_release(*a, bind);
130 else
131 tcf_idr_cleanup(tn, index);
132 return -EINVAL;
133 }
134
135 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
136 if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
137 if (exists)
138 tcf_idr_release(*a, bind);
139 else
140 tcf_idr_cleanup(tn, index);
141 return -EINVAL;
142 }
143
144 if (!exists) {
145 ret = tcf_idr_create(tn, index, est, a, ops, bind,
146 false);
147 if (ret) {
148 tcf_idr_cleanup(tn, index);
149 return ret;
150 }
151 ret = ACT_P_CREATED;
152 } else {
153 if (bind)/* dont override defaults */
154 return 0;
155
156 if (!ovr) {
157 tcf_idr_release(*a, bind);
158 return -EEXIST;
159 }
160 }
161 hook = nla_get_u32(tb[TCA_IPT_HOOK]);
162
163 err = -ENOMEM;
164 tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
165 if (unlikely(!tname))
166 goto err1;
167 if (tb[TCA_IPT_TABLE] == NULL ||
168 nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
169 strcpy(tname, "mangle");
170
171 t = kmemdup(td, td->u.target_size, GFP_KERNEL);
172 if (unlikely(!t))
173 goto err2;
174
175 err = ipt_init_target(net, t, tname, hook);
176 if (err < 0)
177 goto err3;
178
179 ipt = to_ipt(*a);
180
181 spin_lock_bh(&ipt->tcf_lock);
182 if (ret != ACT_P_CREATED) {
183 ipt_destroy_target(ipt->tcfi_t);
184 kfree(ipt->tcfi_tname);
185 kfree(ipt->tcfi_t);
186 }
187 ipt->tcfi_tname = tname;
188 ipt->tcfi_t = t;
189 ipt->tcfi_hook = hook;
190 spin_unlock_bh(&ipt->tcf_lock);
191 if (ret == ACT_P_CREATED)
192 tcf_idr_insert(tn, *a);
193 return ret;
194
195 err3:
196 kfree(t);
197 err2:
198 kfree(tname);
199 err1:
200 tcf_idr_release(*a, bind);
201 return err;
202 }
203
204 static int tcf_ipt_init(struct net *net, struct nlattr *nla,
205 struct nlattr *est, struct tc_action **a, int ovr,
206 int bind, bool rtnl_held, struct tcf_proto *tp,
207 struct netlink_ext_ack *extack)
208 {
209 return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
210 bind, tp);
211 }
212
213 static int tcf_xt_init(struct net *net, struct nlattr *nla,
214 struct nlattr *est, struct tc_action **a, int ovr,
215 int bind, bool unlocked, struct tcf_proto *tp,
216 struct netlink_ext_ack *extack)
217 {
218 return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
219 bind, tp);
220 }
221
222 static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
223 struct tcf_result *res)
224 {
225 int ret = 0, result = 0;
226 struct tcf_ipt *ipt = to_ipt(a);
227 struct xt_action_param par;
228 struct nf_hook_state state = {
229 .net = dev_net(skb->dev),
230 .in = skb->dev,
231 .hook = ipt->tcfi_hook,
232 .pf = NFPROTO_IPV4,
233 };
234
235 if (skb_unclone(skb, GFP_ATOMIC))
236 return TC_ACT_UNSPEC;
237
238 spin_lock(&ipt->tcf_lock);
239
240 tcf_lastuse_update(&ipt->tcf_tm);
241 bstats_update(&ipt->tcf_bstats, skb);
242
243 /* yes, we have to worry about both in and out dev
244 * worry later - danger - this API seems to have changed
245 * from earlier kernels
246 */
247 par.state = &state;
248 par.target = ipt->tcfi_t->u.kernel.target;
249 par.targinfo = ipt->tcfi_t->data;
250 ret = par.target->target(skb, &par);
251
252 switch (ret) {
253 case NF_ACCEPT:
254 result = TC_ACT_OK;
255 break;
256 case NF_DROP:
257 result = TC_ACT_SHOT;
258 ipt->tcf_qstats.drops++;
259 break;
260 case XT_CONTINUE:
261 result = TC_ACT_PIPE;
262 break;
263 default:
264 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
265 ret);
266 result = TC_ACT_OK;
267 break;
268 }
269 spin_unlock(&ipt->tcf_lock);
270 return result;
271
272 }
273
274 static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
275 int ref)
276 {
277 unsigned char *b = skb_tail_pointer(skb);
278 struct tcf_ipt *ipt = to_ipt(a);
279 struct xt_entry_target *t;
280 struct tcf_t tm;
281 struct tc_cnt c;
282
283 /* for simple targets kernel size == user size
284 * user name = target name
285 * for foolproof you need to not assume this
286 */
287
288 spin_lock_bh(&ipt->tcf_lock);
289 t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
290 if (unlikely(!t))
291 goto nla_put_failure;
292
293 c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind;
294 c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref;
295 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
296
297 if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
298 nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
299 nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
300 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
301 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
302 goto nla_put_failure;
303
304 tcf_tm_dump(&tm, &ipt->tcf_tm);
305 if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
306 goto nla_put_failure;
307
308 spin_unlock_bh(&ipt->tcf_lock);
309 kfree(t);
310 return skb->len;
311
312 nla_put_failure:
313 spin_unlock_bh(&ipt->tcf_lock);
314 nlmsg_trim(skb, b);
315 kfree(t);
316 return -1;
317 }
318
319 static int tcf_ipt_walker(struct net *net, struct sk_buff *skb,
320 struct netlink_callback *cb, int type,
321 const struct tc_action_ops *ops,
322 struct netlink_ext_ack *extack)
323 {
324 struct tc_action_net *tn = net_generic(net, ipt_net_id);
325
326 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
327 }
328
329 static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index)
330 {
331 struct tc_action_net *tn = net_generic(net, ipt_net_id);
332
333 return tcf_idr_search(tn, a, index);
334 }
335
336 static struct tc_action_ops act_ipt_ops = {
337 .kind = "ipt",
338 .id = TCA_ID_IPT,
339 .owner = THIS_MODULE,
340 .act = tcf_ipt_act,
341 .dump = tcf_ipt_dump,
342 .cleanup = tcf_ipt_release,
343 .init = tcf_ipt_init,
344 .walk = tcf_ipt_walker,
345 .lookup = tcf_ipt_search,
346 .size = sizeof(struct tcf_ipt),
347 };
348
349 static __net_init int ipt_init_net(struct net *net)
350 {
351 struct tc_action_net *tn = net_generic(net, ipt_net_id);
352
353 return tc_action_net_init(tn, &act_ipt_ops);
354 }
355
356 static void __net_exit ipt_exit_net(struct list_head *net_list)
357 {
358 tc_action_net_exit(net_list, ipt_net_id);
359 }
360
361 static struct pernet_operations ipt_net_ops = {
362 .init = ipt_init_net,
363 .exit_batch = ipt_exit_net,
364 .id = &ipt_net_id,
365 .size = sizeof(struct tc_action_net),
366 };
367
368 static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
369 struct netlink_callback *cb, int type,
370 const struct tc_action_ops *ops,
371 struct netlink_ext_ack *extack)
372 {
373 struct tc_action_net *tn = net_generic(net, xt_net_id);
374
375 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
376 }
377
378 static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index)
379 {
380 struct tc_action_net *tn = net_generic(net, xt_net_id);
381
382 return tcf_idr_search(tn, a, index);
383 }
384
385 static struct tc_action_ops act_xt_ops = {
386 .kind = "xt",
387 .id = TCA_ID_XT,
388 .owner = THIS_MODULE,
389 .act = tcf_ipt_act,
390 .dump = tcf_ipt_dump,
391 .cleanup = tcf_ipt_release,
392 .init = tcf_xt_init,
393 .walk = tcf_xt_walker,
394 .lookup = tcf_xt_search,
395 .size = sizeof(struct tcf_ipt),
396 };
397
398 static __net_init int xt_init_net(struct net *net)
399 {
400 struct tc_action_net *tn = net_generic(net, xt_net_id);
401
402 return tc_action_net_init(tn, &act_xt_ops);
403 }
404
405 static void __net_exit xt_exit_net(struct list_head *net_list)
406 {
407 tc_action_net_exit(net_list, xt_net_id);
408 }
409
410 static struct pernet_operations xt_net_ops = {
411 .init = xt_init_net,
412 .exit_batch = xt_exit_net,
413 .id = &xt_net_id,
414 .size = sizeof(struct tc_action_net),
415 };
416
417 MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
418 MODULE_DESCRIPTION("Iptables target actions");
419 MODULE_LICENSE("GPL");
420 MODULE_ALIAS("act_xt");
421
422 static int __init ipt_init_module(void)
423 {
424 int ret1, ret2;
425
426 ret1 = tcf_register_action(&act_xt_ops, &xt_net_ops);
427 if (ret1 < 0)
428 pr_err("Failed to load xt action\n");
429
430 ret2 = tcf_register_action(&act_ipt_ops, &ipt_net_ops);
431 if (ret2 < 0)
432 pr_err("Failed to load ipt action\n");
433
434 if (ret1 < 0 && ret2 < 0) {
435 return ret1;
436 } else
437 return 0;
438 }
439
440 static void __exit ipt_cleanup_module(void)
441 {
442 tcf_unregister_action(&act_ipt_ops, &ipt_net_ops);
443 tcf_unregister_action(&act_xt_ops, &xt_net_ops);
444 }
445
446 module_init(ipt_init_module);
447 module_exit(ipt_cleanup_module);