]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sched/act_ipt.c
iommu/amd: Reserve exclusion range in iova-domain
[mirror_ubuntu-bionic-kernel.git] / net / sched / act_ipt.c
1 /*
2 * net/sched/act_ipt.c iptables target interface
3 *
4 *TODO: Add other tables. For now we only support the ipv4 table targets
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Copyright: Jamal Hadi Salim (2002-13)
12 */
13
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/pkt_sched.h>
25 #include <linux/tc_act/tc_ipt.h>
26 #include <net/tc_act/tc_ipt.h>
27
28 #include <linux/netfilter_ipv4/ip_tables.h>
29
30
31 static unsigned int ipt_net_id;
32 static struct tc_action_ops act_ipt_ops;
33
34 static unsigned int xt_net_id;
35 static struct tc_action_ops act_xt_ops;
36
37 static int ipt_init_target(struct net *net, struct xt_entry_target *t,
38 char *table, unsigned int hook)
39 {
40 struct xt_tgchk_param par;
41 struct xt_target *target;
42 struct ipt_entry e = {};
43 int ret = 0;
44
45 target = xt_request_find_target(AF_INET, t->u.user.name,
46 t->u.user.revision);
47 if (IS_ERR(target))
48 return PTR_ERR(target);
49
50 t->u.kernel.target = target;
51 memset(&par, 0, sizeof(par));
52 par.net = net;
53 par.table = table;
54 par.entryinfo = &e;
55 par.target = target;
56 par.targinfo = t->data;
57 par.hook_mask = hook;
58 par.family = NFPROTO_IPV4;
59
60 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
61 if (ret < 0) {
62 module_put(t->u.kernel.target->me);
63 return ret;
64 }
65 return 0;
66 }
67
68 static void ipt_destroy_target(struct xt_entry_target *t)
69 {
70 struct xt_tgdtor_param par = {
71 .target = t->u.kernel.target,
72 .targinfo = t->data,
73 .family = NFPROTO_IPV4,
74 };
75 if (par.target->destroy != NULL)
76 par.target->destroy(&par);
77 module_put(par.target->me);
78 }
79
80 static void tcf_ipt_release(struct tc_action *a, int bind)
81 {
82 struct tcf_ipt *ipt = to_ipt(a);
83
84 if (ipt->tcfi_t) {
85 ipt_destroy_target(ipt->tcfi_t);
86 kfree(ipt->tcfi_t);
87 }
88 kfree(ipt->tcfi_tname);
89 }
90
91 static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
92 [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ },
93 [TCA_IPT_HOOK] = { .type = NLA_U32 },
94 [TCA_IPT_INDEX] = { .type = NLA_U32 },
95 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
96 };
97
98 static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
99 struct nlattr *est, struct tc_action **a,
100 const struct tc_action_ops *ops, int ovr, int bind)
101 {
102 struct tc_action_net *tn = net_generic(net, id);
103 struct nlattr *tb[TCA_IPT_MAX + 1];
104 struct tcf_ipt *ipt;
105 struct xt_entry_target *td, *t;
106 char *tname;
107 bool exists = false;
108 int ret = 0, err;
109 u32 hook = 0;
110 u32 index = 0;
111
112 if (nla == NULL)
113 return -EINVAL;
114
115 err = nla_parse_nested(tb, TCA_IPT_MAX, nla, ipt_policy, NULL);
116 if (err < 0)
117 return err;
118
119 if (tb[TCA_IPT_INDEX] != NULL)
120 index = nla_get_u32(tb[TCA_IPT_INDEX]);
121
122 exists = tcf_idr_check(tn, index, a, bind);
123 if (exists && bind)
124 return 0;
125
126 if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
127 if (exists)
128 tcf_idr_release(*a, bind);
129 return -EINVAL;
130 }
131
132 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
133 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
134 if (exists)
135 tcf_idr_release(*a, bind);
136 return -EINVAL;
137 }
138
139 if (!exists) {
140 ret = tcf_idr_create(tn, index, est, a, ops, bind,
141 false);
142 if (ret)
143 return ret;
144 ret = ACT_P_CREATED;
145 } else {
146 if (bind)/* dont override defaults */
147 return 0;
148 tcf_idr_release(*a, bind);
149
150 if (!ovr)
151 return -EEXIST;
152 }
153 hook = nla_get_u32(tb[TCA_IPT_HOOK]);
154
155 err = -ENOMEM;
156 tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
157 if (unlikely(!tname))
158 goto err1;
159 if (tb[TCA_IPT_TABLE] == NULL ||
160 nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
161 strcpy(tname, "mangle");
162
163 t = kmemdup(td, td->u.target_size, GFP_KERNEL);
164 if (unlikely(!t))
165 goto err2;
166
167 err = ipt_init_target(net, t, tname, hook);
168 if (err < 0)
169 goto err3;
170
171 ipt = to_ipt(*a);
172
173 spin_lock_bh(&ipt->tcf_lock);
174 if (ret != ACT_P_CREATED) {
175 ipt_destroy_target(ipt->tcfi_t);
176 kfree(ipt->tcfi_tname);
177 kfree(ipt->tcfi_t);
178 }
179 ipt->tcfi_tname = tname;
180 ipt->tcfi_t = t;
181 ipt->tcfi_hook = hook;
182 spin_unlock_bh(&ipt->tcf_lock);
183 if (ret == ACT_P_CREATED)
184 tcf_idr_insert(tn, *a);
185 return ret;
186
187 err3:
188 kfree(t);
189 err2:
190 kfree(tname);
191 err1:
192 if (ret == ACT_P_CREATED)
193 tcf_idr_release(*a, bind);
194 return err;
195 }
196
197 static int tcf_ipt_init(struct net *net, struct nlattr *nla,
198 struct nlattr *est, struct tc_action **a, int ovr,
199 int bind)
200 {
201 return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
202 bind);
203 }
204
205 static int tcf_xt_init(struct net *net, struct nlattr *nla,
206 struct nlattr *est, struct tc_action **a, int ovr,
207 int bind)
208 {
209 return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
210 bind);
211 }
212
213 static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
214 struct tcf_result *res)
215 {
216 int ret = 0, result = 0;
217 struct tcf_ipt *ipt = to_ipt(a);
218 struct xt_action_param par;
219 struct nf_hook_state state = {
220 .net = dev_net(skb->dev),
221 .in = skb->dev,
222 .hook = ipt->tcfi_hook,
223 .pf = NFPROTO_IPV4,
224 };
225
226 if (skb_unclone(skb, GFP_ATOMIC))
227 return TC_ACT_UNSPEC;
228
229 spin_lock(&ipt->tcf_lock);
230
231 tcf_lastuse_update(&ipt->tcf_tm);
232 bstats_update(&ipt->tcf_bstats, skb);
233
234 /* yes, we have to worry about both in and out dev
235 * worry later - danger - this API seems to have changed
236 * from earlier kernels
237 */
238 par.state = &state;
239 par.target = ipt->tcfi_t->u.kernel.target;
240 par.targinfo = ipt->tcfi_t->data;
241 ret = par.target->target(skb, &par);
242
243 switch (ret) {
244 case NF_ACCEPT:
245 result = TC_ACT_OK;
246 break;
247 case NF_DROP:
248 result = TC_ACT_SHOT;
249 ipt->tcf_qstats.drops++;
250 break;
251 case XT_CONTINUE:
252 result = TC_ACT_PIPE;
253 break;
254 default:
255 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
256 ret);
257 result = TC_ACT_OK;
258 break;
259 }
260 spin_unlock(&ipt->tcf_lock);
261 return result;
262
263 }
264
265 static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
266 int ref)
267 {
268 unsigned char *b = skb_tail_pointer(skb);
269 struct tcf_ipt *ipt = to_ipt(a);
270 struct xt_entry_target *t;
271 struct tcf_t tm;
272 struct tc_cnt c;
273
274 /* for simple targets kernel size == user size
275 * user name = target name
276 * for foolproof you need to not assume this
277 */
278
279 t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
280 if (unlikely(!t))
281 goto nla_put_failure;
282
283 c.bindcnt = ipt->tcf_bindcnt - bind;
284 c.refcnt = ipt->tcf_refcnt - ref;
285 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
286
287 if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
288 nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
289 nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
290 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
291 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
292 goto nla_put_failure;
293
294 tcf_tm_dump(&tm, &ipt->tcf_tm);
295 if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
296 goto nla_put_failure;
297
298 kfree(t);
299 return skb->len;
300
301 nla_put_failure:
302 nlmsg_trim(skb, b);
303 kfree(t);
304 return -1;
305 }
306
307 static int tcf_ipt_walker(struct net *net, struct sk_buff *skb,
308 struct netlink_callback *cb, int type,
309 const struct tc_action_ops *ops)
310 {
311 struct tc_action_net *tn = net_generic(net, ipt_net_id);
312
313 return tcf_generic_walker(tn, skb, cb, type, ops);
314 }
315
316 static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index)
317 {
318 struct tc_action_net *tn = net_generic(net, ipt_net_id);
319
320 return tcf_idr_search(tn, a, index);
321 }
322
323 static struct tc_action_ops act_ipt_ops = {
324 .kind = "ipt",
325 .type = TCA_ACT_IPT,
326 .owner = THIS_MODULE,
327 .act = tcf_ipt,
328 .dump = tcf_ipt_dump,
329 .cleanup = tcf_ipt_release,
330 .init = tcf_ipt_init,
331 .walk = tcf_ipt_walker,
332 .lookup = tcf_ipt_search,
333 .size = sizeof(struct tcf_ipt),
334 };
335
336 static __net_init int ipt_init_net(struct net *net)
337 {
338 struct tc_action_net *tn = net_generic(net, ipt_net_id);
339
340 return tc_action_net_init(tn, &act_ipt_ops);
341 }
342
343 static void __net_exit ipt_exit_net(struct net *net)
344 {
345 struct tc_action_net *tn = net_generic(net, ipt_net_id);
346
347 tc_action_net_exit(tn);
348 }
349
350 static struct pernet_operations ipt_net_ops = {
351 .init = ipt_init_net,
352 .exit = ipt_exit_net,
353 .id = &ipt_net_id,
354 .size = sizeof(struct tc_action_net),
355 };
356
357 static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
358 struct netlink_callback *cb, int type,
359 const struct tc_action_ops *ops)
360 {
361 struct tc_action_net *tn = net_generic(net, xt_net_id);
362
363 return tcf_generic_walker(tn, skb, cb, type, ops);
364 }
365
366 static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index)
367 {
368 struct tc_action_net *tn = net_generic(net, xt_net_id);
369
370 return tcf_idr_search(tn, a, index);
371 }
372
373 static struct tc_action_ops act_xt_ops = {
374 .kind = "xt",
375 .type = TCA_ACT_XT,
376 .owner = THIS_MODULE,
377 .act = tcf_ipt,
378 .dump = tcf_ipt_dump,
379 .cleanup = tcf_ipt_release,
380 .init = tcf_xt_init,
381 .walk = tcf_xt_walker,
382 .lookup = tcf_xt_search,
383 .size = sizeof(struct tcf_ipt),
384 };
385
386 static __net_init int xt_init_net(struct net *net)
387 {
388 struct tc_action_net *tn = net_generic(net, xt_net_id);
389
390 return tc_action_net_init(tn, &act_xt_ops);
391 }
392
393 static void __net_exit xt_exit_net(struct net *net)
394 {
395 struct tc_action_net *tn = net_generic(net, xt_net_id);
396
397 tc_action_net_exit(tn);
398 }
399
400 static struct pernet_operations xt_net_ops = {
401 .init = xt_init_net,
402 .exit = xt_exit_net,
403 .id = &xt_net_id,
404 .size = sizeof(struct tc_action_net),
405 };
406
407 MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
408 MODULE_DESCRIPTION("Iptables target actions");
409 MODULE_LICENSE("GPL");
410 MODULE_ALIAS("act_xt");
411
412 static int __init ipt_init_module(void)
413 {
414 int ret1, ret2;
415
416 ret1 = tcf_register_action(&act_xt_ops, &xt_net_ops);
417 if (ret1 < 0)
418 pr_err("Failed to load xt action\n");
419
420 ret2 = tcf_register_action(&act_ipt_ops, &ipt_net_ops);
421 if (ret2 < 0)
422 pr_err("Failed to load ipt action\n");
423
424 if (ret1 < 0 && ret2 < 0) {
425 return ret1;
426 } else
427 return 0;
428 }
429
430 static void __exit ipt_cleanup_module(void)
431 {
432 tcf_unregister_action(&act_ipt_ops, &ipt_net_ops);
433 tcf_unregister_action(&act_xt_ops, &xt_net_ops);
434 }
435
436 module_init(ipt_init_module);
437 module_exit(ipt_cleanup_module);