]>
Commit | Line | Data |
---|---|---|
459aa660 PN |
1 | /* GTP according to GSM TS 09.60 / 3GPP TS 29.060 |
2 | * | |
3 | * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH | |
4 | * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org> | |
5 | * | |
6 | * Author: Harald Welte <hwelte@sysmocom.de> | |
7 | * Pablo Neira Ayuso <pablo@netfilter.org> | |
8 | * Andreas Schultz <aschultz@travelping.com> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or (at your option) any later version. | |
14 | */ | |
15 | ||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
17 | ||
18 | #include <linux/module.h> | |
459aa660 PN |
19 | #include <linux/skbuff.h> |
20 | #include <linux/udp.h> | |
21 | #include <linux/rculist.h> | |
22 | #include <linux/jhash.h> | |
23 | #include <linux/if_tunnel.h> | |
24 | #include <linux/net.h> | |
25 | #include <linux/file.h> | |
26 | #include <linux/gtp.h> | |
27 | ||
28 | #include <net/net_namespace.h> | |
29 | #include <net/protocol.h> | |
30 | #include <net/ip.h> | |
31 | #include <net/udp.h> | |
32 | #include <net/udp_tunnel.h> | |
33 | #include <net/icmp.h> | |
34 | #include <net/xfrm.h> | |
35 | #include <net/genetlink.h> | |
36 | #include <net/netns/generic.h> | |
37 | #include <net/gtp.h> | |
38 | ||
39 | /* An active session for the subscriber. */ | |
40 | struct pdp_ctx { | |
41 | struct hlist_node hlist_tid; | |
42 | struct hlist_node hlist_addr; | |
43 | ||
44 | union { | |
45 | u64 tid; | |
46 | struct { | |
47 | u64 tid; | |
48 | u16 flow; | |
49 | } v0; | |
50 | struct { | |
51 | u32 i_tei; | |
52 | u32 o_tei; | |
53 | } v1; | |
54 | } u; | |
55 | u8 gtp_version; | |
56 | u16 af; | |
57 | ||
58 | struct in_addr ms_addr_ip4; | |
ae6336b5 | 59 | struct in_addr peer_addr_ip4; |
459aa660 | 60 | |
101cfbc1 | 61 | struct sock *sk; |
5b171f9c AS |
62 | struct net_device *dev; |
63 | ||
459aa660 PN |
64 | atomic_t tx_seq; |
65 | struct rcu_head rcu_head; | |
66 | }; | |
67 | ||
68 | /* One instance of the GTP device. */ | |
69 | struct gtp_dev { | |
70 | struct list_head list; | |
71 | ||
17886c47 AS |
72 | struct sock *sk0; |
73 | struct sock *sk1u; | |
459aa660 | 74 | |
459aa660 PN |
75 | struct net_device *dev; |
76 | ||
91ed81f9 | 77 | unsigned int role; |
459aa660 PN |
78 | unsigned int hash_size; |
79 | struct hlist_head *tid_hash; | |
80 | struct hlist_head *addr_hash; | |
81 | }; | |
82 | ||
c7d03a00 | 83 | static unsigned int gtp_net_id __read_mostly; |
459aa660 PN |
84 | |
85 | struct gtp_net { | |
86 | struct list_head gtp_dev_list; | |
87 | }; | |
88 | ||
89 | static u32 gtp_h_initval; | |
90 | ||
6b5e2e74 AS |
91 | static void pdp_context_delete(struct pdp_ctx *pctx); |
92 | ||
459aa660 PN |
93 | static inline u32 gtp0_hashfn(u64 tid) |
94 | { | |
95 | u32 *tid32 = (u32 *) &tid; | |
96 | return jhash_2words(tid32[0], tid32[1], gtp_h_initval); | |
97 | } | |
98 | ||
99 | static inline u32 gtp1u_hashfn(u32 tid) | |
100 | { | |
101 | return jhash_1word(tid, gtp_h_initval); | |
102 | } | |
103 | ||
104 | static inline u32 ipv4_hashfn(__be32 ip) | |
105 | { | |
106 | return jhash_1word((__force u32)ip, gtp_h_initval); | |
107 | } | |
108 | ||
109 | /* Resolve a PDP context structure based on the 64bit TID. */ | |
110 | static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid) | |
111 | { | |
112 | struct hlist_head *head; | |
113 | struct pdp_ctx *pdp; | |
114 | ||
115 | head = >p->tid_hash[gtp0_hashfn(tid) % gtp->hash_size]; | |
116 | ||
117 | hlist_for_each_entry_rcu(pdp, head, hlist_tid) { | |
118 | if (pdp->gtp_version == GTP_V0 && | |
119 | pdp->u.v0.tid == tid) | |
120 | return pdp; | |
121 | } | |
122 | return NULL; | |
123 | } | |
124 | ||
125 | /* Resolve a PDP context structure based on the 32bit TEI. */ | |
126 | static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid) | |
127 | { | |
128 | struct hlist_head *head; | |
129 | struct pdp_ctx *pdp; | |
130 | ||
131 | head = >p->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size]; | |
132 | ||
133 | hlist_for_each_entry_rcu(pdp, head, hlist_tid) { | |
134 | if (pdp->gtp_version == GTP_V1 && | |
135 | pdp->u.v1.i_tei == tid) | |
136 | return pdp; | |
137 | } | |
138 | return NULL; | |
139 | } | |
140 | ||
141 | /* Resolve a PDP context based on IPv4 address of MS. */ | |
142 | static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr) | |
143 | { | |
144 | struct hlist_head *head; | |
145 | struct pdp_ctx *pdp; | |
146 | ||
147 | head = >p->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size]; | |
148 | ||
149 | hlist_for_each_entry_rcu(pdp, head, hlist_addr) { | |
150 | if (pdp->af == AF_INET && | |
151 | pdp->ms_addr_ip4.s_addr == ms_addr) | |
152 | return pdp; | |
153 | } | |
154 | ||
155 | return NULL; | |
156 | } | |
157 | ||
91ed81f9 JB |
158 | static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx, |
159 | unsigned int hdrlen, unsigned int role) | |
459aa660 PN |
160 | { |
161 | struct iphdr *iph; | |
162 | ||
163 | if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr))) | |
164 | return false; | |
165 | ||
88edf103 | 166 | iph = (struct iphdr *)(skb->data + hdrlen); |
459aa660 | 167 | |
91ed81f9 JB |
168 | if (role == GTP_ROLE_SGSN) |
169 | return iph->daddr == pctx->ms_addr_ip4.s_addr; | |
170 | else | |
171 | return iph->saddr == pctx->ms_addr_ip4.s_addr; | |
459aa660 PN |
172 | } |
173 | ||
91ed81f9 | 174 | /* Check if the inner IP address in this packet is assigned to any |
459aa660 PN |
175 | * existing mobile subscriber. |
176 | */ | |
91ed81f9 JB |
177 | static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx, |
178 | unsigned int hdrlen, unsigned int role) | |
459aa660 PN |
179 | { |
180 | switch (ntohs(skb->protocol)) { | |
181 | case ETH_P_IP: | |
91ed81f9 | 182 | return gtp_check_ms_ipv4(skb, pctx, hdrlen, role); |
459aa660 PN |
183 | } |
184 | return false; | |
185 | } | |
186 | ||
91ed81f9 JB |
187 | static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, |
188 | unsigned int hdrlen, unsigned int role) | |
5b171f9c AS |
189 | { |
190 | struct pcpu_sw_netstats *stats; | |
191 | ||
91ed81f9 | 192 | if (!gtp_check_ms(skb, pctx, hdrlen, role)) { |
5b171f9c AS |
193 | netdev_dbg(pctx->dev, "No PDP ctx for this MS\n"); |
194 | return 1; | |
195 | } | |
196 | ||
197 | /* Get rid of the GTP + UDP headers. */ | |
101cfbc1 AS |
198 | if (iptunnel_pull_header(skb, hdrlen, skb->protocol, |
199 | !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) | |
5b171f9c AS |
200 | return -1; |
201 | ||
202 | netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n"); | |
203 | ||
204 | /* Now that the UDP and the GTP header have been removed, set up the | |
205 | * new network header. This is required by the upper layer to | |
206 | * calculate the transport header. | |
207 | */ | |
208 | skb_reset_network_header(skb); | |
209 | ||
210 | skb->dev = pctx->dev; | |
211 | ||
212 | stats = this_cpu_ptr(pctx->dev->tstats); | |
213 | u64_stats_update_begin(&stats->syncp); | |
214 | stats->rx_packets++; | |
215 | stats->rx_bytes += skb->len; | |
216 | u64_stats_update_end(&stats->syncp); | |
217 | ||
218 | netif_rx(skb); | |
219 | return 0; | |
220 | } | |
221 | ||
459aa660 | 222 | /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */ |
101cfbc1 | 223 | static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) |
459aa660 PN |
224 | { |
225 | unsigned int hdrlen = sizeof(struct udphdr) + | |
226 | sizeof(struct gtp0_header); | |
227 | struct gtp0_header *gtp0; | |
228 | struct pdp_ctx *pctx; | |
459aa660 PN |
229 | |
230 | if (!pskb_may_pull(skb, hdrlen)) | |
231 | return -1; | |
232 | ||
233 | gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); | |
234 | ||
235 | if ((gtp0->flags >> 5) != GTP_V0) | |
236 | return 1; | |
237 | ||
238 | if (gtp0->type != GTP_TPDU) | |
239 | return 1; | |
240 | ||
459aa660 PN |
241 | pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); |
242 | if (!pctx) { | |
243 | netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); | |
1796a81d | 244 | return 1; |
459aa660 PN |
245 | } |
246 | ||
91ed81f9 | 247 | return gtp_rx(pctx, skb, hdrlen, gtp->role); |
459aa660 PN |
248 | } |
249 | ||
101cfbc1 | 250 | static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) |
459aa660 PN |
251 | { |
252 | unsigned int hdrlen = sizeof(struct udphdr) + | |
253 | sizeof(struct gtp1_header); | |
254 | struct gtp1_header *gtp1; | |
255 | struct pdp_ctx *pctx; | |
459aa660 PN |
256 | |
257 | if (!pskb_may_pull(skb, hdrlen)) | |
258 | return -1; | |
259 | ||
260 | gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); | |
261 | ||
262 | if ((gtp1->flags >> 5) != GTP_V1) | |
263 | return 1; | |
264 | ||
265 | if (gtp1->type != GTP_TPDU) | |
266 | return 1; | |
267 | ||
268 | /* From 29.060: "This field shall be present if and only if any one or | |
269 | * more of the S, PN and E flags are set.". | |
270 | * | |
271 | * If any of the bit is set, then the remaining ones also have to be | |
272 | * set. | |
273 | */ | |
274 | if (gtp1->flags & GTP1_F_MASK) | |
275 | hdrlen += 4; | |
276 | ||
277 | /* Make sure the header is larger enough, including extensions. */ | |
278 | if (!pskb_may_pull(skb, hdrlen)) | |
279 | return -1; | |
280 | ||
93edb8c7 PN |
281 | gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); |
282 | ||
459aa660 PN |
283 | pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); |
284 | if (!pctx) { | |
285 | netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); | |
1796a81d | 286 | return 1; |
459aa660 PN |
287 | } |
288 | ||
91ed81f9 | 289 | return gtp_rx(pctx, skb, hdrlen, gtp->role); |
459aa660 PN |
290 | } |
291 | ||
1e3a3abd | 292 | static void gtp_encap_destroy(struct sock *sk) |
459aa660 | 293 | { |
1e3a3abd | 294 | struct gtp_dev *gtp; |
459aa660 | 295 | |
1e3a3abd AS |
296 | gtp = rcu_dereference_sk_user_data(sk); |
297 | if (gtp) { | |
298 | udp_sk(sk)->encap_type = 0; | |
299 | rcu_assign_sk_user_data(sk, NULL); | |
300 | sock_put(sk); | |
301 | } | |
459aa660 PN |
302 | } |
303 | ||
1e3a3abd | 304 | static void gtp_encap_disable_sock(struct sock *sk) |
459aa660 | 305 | { |
1e3a3abd AS |
306 | if (!sk) |
307 | return; | |
459aa660 | 308 | |
1e3a3abd AS |
309 | gtp_encap_destroy(sk); |
310 | } | |
311 | ||
312 | static void gtp_encap_disable(struct gtp_dev *gtp) | |
313 | { | |
314 | gtp_encap_disable_sock(gtp->sk0); | |
315 | gtp_encap_disable_sock(gtp->sk1u); | |
459aa660 PN |
316 | } |
317 | ||
318 | /* UDP encapsulation receive handler. See net/ipv4/udp.c. | |
319 | * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket. | |
320 | */ | |
321 | static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) | |
322 | { | |
459aa660 | 323 | struct gtp_dev *gtp; |
5b171f9c | 324 | int ret = 0; |
459aa660 PN |
325 | |
326 | gtp = rcu_dereference_sk_user_data(sk); | |
327 | if (!gtp) | |
328 | return 1; | |
329 | ||
330 | netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); | |
331 | ||
459aa660 PN |
332 | switch (udp_sk(sk)->encap_type) { |
333 | case UDP_ENCAP_GTP0: | |
334 | netdev_dbg(gtp->dev, "received GTP0 packet\n"); | |
101cfbc1 | 335 | ret = gtp0_udp_encap_recv(gtp, skb); |
459aa660 PN |
336 | break; |
337 | case UDP_ENCAP_GTP1U: | |
338 | netdev_dbg(gtp->dev, "received GTP1U packet\n"); | |
101cfbc1 | 339 | ret = gtp1u_udp_encap_recv(gtp, skb); |
459aa660 PN |
340 | break; |
341 | default: | |
342 | ret = -1; /* Shouldn't happen. */ | |
343 | } | |
344 | ||
345 | switch (ret) { | |
346 | case 1: | |
347 | netdev_dbg(gtp->dev, "pass up to the process\n"); | |
5b171f9c | 348 | break; |
459aa660 | 349 | case 0: |
459aa660 PN |
350 | break; |
351 | case -1: | |
352 | netdev_dbg(gtp->dev, "GTP packet has been dropped\n"); | |
353 | kfree_skb(skb); | |
5b171f9c AS |
354 | ret = 0; |
355 | break; | |
459aa660 PN |
356 | } |
357 | ||
5b171f9c | 358 | return ret; |
459aa660 PN |
359 | } |
360 | ||
361 | static int gtp_dev_init(struct net_device *dev) | |
362 | { | |
363 | struct gtp_dev *gtp = netdev_priv(dev); | |
364 | ||
365 | gtp->dev = dev; | |
366 | ||
790cb2eb | 367 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
459aa660 PN |
368 | if (!dev->tstats) |
369 | return -ENOMEM; | |
370 | ||
371 | return 0; | |
372 | } | |
373 | ||
374 | static void gtp_dev_uninit(struct net_device *dev) | |
375 | { | |
376 | struct gtp_dev *gtp = netdev_priv(dev); | |
377 | ||
378 | gtp_encap_disable(gtp); | |
379 | free_percpu(dev->tstats); | |
380 | } | |
381 | ||
101cfbc1 AS |
382 | static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, |
383 | const struct sock *sk, | |
384 | __be32 daddr) | |
459aa660 PN |
385 | { |
386 | memset(fl4, 0, sizeof(*fl4)); | |
387 | fl4->flowi4_oif = sk->sk_bound_dev_if; | |
388 | fl4->daddr = daddr; | |
389 | fl4->saddr = inet_sk(sk)->inet_saddr; | |
390 | fl4->flowi4_tos = RT_CONN_FLAGS(sk); | |
391 | fl4->flowi4_proto = sk->sk_protocol; | |
392 | ||
101cfbc1 | 393 | return ip_route_output_key(sock_net(sk), fl4); |
459aa660 PN |
394 | } |
395 | ||
396 | static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) | |
397 | { | |
398 | int payload_len = skb->len; | |
399 | struct gtp0_header *gtp0; | |
400 | ||
d58ff351 | 401 | gtp0 = skb_push(skb, sizeof(*gtp0)); |
459aa660 PN |
402 | |
403 | gtp0->flags = 0x1e; /* v0, GTP-non-prime. */ | |
404 | gtp0->type = GTP_TPDU; | |
405 | gtp0->length = htons(payload_len); | |
406 | gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff); | |
407 | gtp0->flow = htons(pctx->u.v0.flow); | |
408 | gtp0->number = 0xff; | |
409 | gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff; | |
410 | gtp0->tid = cpu_to_be64(pctx->u.v0.tid); | |
411 | } | |
412 | ||
413 | static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) | |
414 | { | |
415 | int payload_len = skb->len; | |
416 | struct gtp1_header *gtp1; | |
417 | ||
d58ff351 | 418 | gtp1 = skb_push(skb, sizeof(*gtp1)); |
459aa660 PN |
419 | |
420 | /* Bits 8 7 6 5 4 3 2 1 | |
421 | * +--+--+--+--+--+--+--+--+ | |
d928be81 | 422 | * |version |PT| 0| E| S|PN| |
459aa660 PN |
423 | * +--+--+--+--+--+--+--+--+ |
424 | * 0 0 1 1 1 0 0 0 | |
425 | */ | |
d928be81 | 426 | gtp1->flags = 0x30; /* v1, GTP-non-prime. */ |
459aa660 PN |
427 | gtp1->type = GTP_TPDU; |
428 | gtp1->length = htons(payload_len); | |
429 | gtp1->tid = htonl(pctx->u.v1.o_tei); | |
430 | ||
431 | /* TODO: Suppport for extension header, sequence number and N-PDU. | |
432 | * Update the length field if any of them is available. | |
433 | */ | |
434 | } | |
435 | ||
436 | struct gtp_pktinfo { | |
437 | struct sock *sk; | |
438 | struct iphdr *iph; | |
439 | struct flowi4 fl4; | |
440 | struct rtable *rt; | |
441 | struct pdp_ctx *pctx; | |
442 | struct net_device *dev; | |
443 | __be16 gtph_port; | |
444 | }; | |
445 | ||
446 | static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo) | |
447 | { | |
448 | switch (pktinfo->pctx->gtp_version) { | |
449 | case GTP_V0: | |
450 | pktinfo->gtph_port = htons(GTP0_PORT); | |
451 | gtp0_push_header(skb, pktinfo->pctx); | |
452 | break; | |
453 | case GTP_V1: | |
454 | pktinfo->gtph_port = htons(GTP1U_PORT); | |
455 | gtp1_push_header(skb, pktinfo->pctx); | |
456 | break; | |
457 | } | |
458 | } | |
459 | ||
460 | static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo, | |
461 | struct sock *sk, struct iphdr *iph, | |
462 | struct pdp_ctx *pctx, struct rtable *rt, | |
463 | struct flowi4 *fl4, | |
464 | struct net_device *dev) | |
465 | { | |
466 | pktinfo->sk = sk; | |
467 | pktinfo->iph = iph; | |
468 | pktinfo->pctx = pctx; | |
469 | pktinfo->rt = rt; | |
470 | pktinfo->fl4 = *fl4; | |
471 | pktinfo->dev = dev; | |
472 | } | |
473 | ||
474 | static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, | |
475 | struct gtp_pktinfo *pktinfo) | |
476 | { | |
477 | struct gtp_dev *gtp = netdev_priv(dev); | |
478 | struct pdp_ctx *pctx; | |
479 | struct rtable *rt; | |
480 | struct flowi4 fl4; | |
481 | struct iphdr *iph; | |
459aa660 PN |
482 | __be16 df; |
483 | int mtu; | |
484 | ||
485 | /* Read the IP destination address and resolve the PDP context. | |
486 | * Prepend PDP header with TEI/TID from PDP ctx. | |
487 | */ | |
488 | iph = ip_hdr(skb); | |
91ed81f9 JB |
489 | if (gtp->role == GTP_ROLE_SGSN) |
490 | pctx = ipv4_pdp_find(gtp, iph->saddr); | |
491 | else | |
492 | pctx = ipv4_pdp_find(gtp, iph->daddr); | |
493 | ||
459aa660 PN |
494 | if (!pctx) { |
495 | netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", | |
496 | &iph->daddr); | |
497 | return -ENOENT; | |
498 | } | |
499 | netdev_dbg(dev, "found PDP context %p\n", pctx); | |
500 | ||
ae6336b5 | 501 | rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr); |
459aa660 PN |
502 | if (IS_ERR(rt)) { |
503 | netdev_dbg(dev, "no route to SSGN %pI4\n", | |
ae6336b5 | 504 | &pctx->peer_addr_ip4.s_addr); |
459aa660 PN |
505 | dev->stats.tx_carrier_errors++; |
506 | goto err; | |
507 | } | |
508 | ||
509 | if (rt->dst.dev == dev) { | |
510 | netdev_dbg(dev, "circular route to SSGN %pI4\n", | |
ae6336b5 | 511 | &pctx->peer_addr_ip4.s_addr); |
459aa660 PN |
512 | dev->stats.collisions++; |
513 | goto err_rt; | |
514 | } | |
515 | ||
516 | skb_dst_drop(skb); | |
517 | ||
518 | /* This is similar to tnl_update_pmtu(). */ | |
519 | df = iph->frag_off; | |
520 | if (df) { | |
521 | mtu = dst_mtu(&rt->dst) - dev->hard_header_len - | |
522 | sizeof(struct iphdr) - sizeof(struct udphdr); | |
523 | switch (pctx->gtp_version) { | |
524 | case GTP_V0: | |
525 | mtu -= sizeof(struct gtp0_header); | |
526 | break; | |
527 | case GTP_V1: | |
528 | mtu -= sizeof(struct gtp1_header); | |
529 | break; | |
530 | } | |
531 | } else { | |
532 | mtu = dst_mtu(&rt->dst); | |
533 | } | |
534 | ||
535 | rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu); | |
536 | ||
537 | if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && | |
538 | mtu < ntohs(iph->tot_len)) { | |
539 | netdev_dbg(dev, "packet too big, fragmentation needed\n"); | |
540 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); | |
541 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, | |
542 | htonl(mtu)); | |
543 | goto err_rt; | |
544 | } | |
545 | ||
101cfbc1 | 546 | gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev); |
459aa660 PN |
547 | gtp_push_header(skb, pktinfo); |
548 | ||
549 | return 0; | |
550 | err_rt: | |
551 | ip_rt_put(rt); | |
552 | err: | |
553 | return -EBADMSG; | |
554 | } | |
555 | ||
556 | static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |
557 | { | |
558 | unsigned int proto = ntohs(skb->protocol); | |
559 | struct gtp_pktinfo pktinfo; | |
560 | int err; | |
561 | ||
562 | /* Ensure there is sufficient headroom. */ | |
563 | if (skb_cow_head(skb, dev->needed_headroom)) | |
564 | goto tx_err; | |
565 | ||
566 | skb_reset_inner_headers(skb); | |
567 | ||
568 | /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */ | |
569 | rcu_read_lock(); | |
570 | switch (proto) { | |
571 | case ETH_P_IP: | |
572 | err = gtp_build_skb_ip4(skb, dev, &pktinfo); | |
573 | break; | |
574 | default: | |
575 | err = -EOPNOTSUPP; | |
576 | break; | |
577 | } | |
578 | rcu_read_unlock(); | |
579 | ||
580 | if (err < 0) | |
581 | goto tx_err; | |
582 | ||
583 | switch (proto) { | |
584 | case ETH_P_IP: | |
585 | netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n", | |
586 | &pktinfo.iph->saddr, &pktinfo.iph->daddr); | |
587 | udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb, | |
588 | pktinfo.fl4.saddr, pktinfo.fl4.daddr, | |
589 | pktinfo.iph->tos, | |
590 | ip4_dst_hoplimit(&pktinfo.rt->dst), | |
c6ce1d08 | 591 | 0, |
459aa660 PN |
592 | pktinfo.gtph_port, pktinfo.gtph_port, |
593 | true, false); | |
594 | break; | |
595 | } | |
596 | ||
597 | return NETDEV_TX_OK; | |
598 | tx_err: | |
599 | dev->stats.tx_errors++; | |
600 | dev_kfree_skb(skb); | |
601 | return NETDEV_TX_OK; | |
602 | } | |
603 | ||
604 | static const struct net_device_ops gtp_netdev_ops = { | |
605 | .ndo_init = gtp_dev_init, | |
606 | .ndo_uninit = gtp_dev_uninit, | |
607 | .ndo_start_xmit = gtp_dev_xmit, | |
608 | .ndo_get_stats64 = ip_tunnel_get_stats64, | |
609 | }; | |
610 | ||
611 | static void gtp_link_setup(struct net_device *dev) | |
612 | { | |
613 | dev->netdev_ops = >p_netdev_ops; | |
cf124db5 | 614 | dev->needs_free_netdev = true; |
459aa660 PN |
615 | |
616 | dev->hard_header_len = 0; | |
617 | dev->addr_len = 0; | |
618 | ||
619 | /* Zero header length. */ | |
620 | dev->type = ARPHRD_NONE; | |
621 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | |
622 | ||
623 | dev->priv_flags |= IFF_NO_QUEUE; | |
624 | dev->features |= NETIF_F_LLTX; | |
625 | netif_keep_dst(dev); | |
626 | ||
627 | /* Assume largest header, ie. GTPv0. */ | |
628 | dev->needed_headroom = LL_MAX_HEADER + | |
629 | sizeof(struct iphdr) + | |
630 | sizeof(struct udphdr) + | |
631 | sizeof(struct gtp0_header); | |
632 | } | |
633 | ||
634 | static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); | |
635 | static void gtp_hashtable_free(struct gtp_dev *gtp); | |
1e3a3abd | 636 | static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]); |
459aa660 PN |
637 | |
638 | static int gtp_newlink(struct net *src_net, struct net_device *dev, | |
7a3f4a18 MS |
639 | struct nlattr *tb[], struct nlattr *data[], |
640 | struct netlink_ext_ack *extack) | |
459aa660 | 641 | { |
459aa660 PN |
642 | struct gtp_dev *gtp; |
643 | struct gtp_net *gn; | |
1e3a3abd | 644 | int hashsize, err; |
459aa660 | 645 | |
1e3a3abd | 646 | if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1]) |
459aa660 PN |
647 | return -EINVAL; |
648 | ||
649 | gtp = netdev_priv(dev); | |
650 | ||
1e3a3abd | 651 | err = gtp_encap_enable(gtp, data); |
459aa660 | 652 | if (err < 0) |
1e3a3abd | 653 | return err; |
459aa660 PN |
654 | |
655 | if (!data[IFLA_GTP_PDP_HASHSIZE]) | |
656 | hashsize = 1024; | |
657 | else | |
658 | hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]); | |
659 | ||
660 | err = gtp_hashtable_new(gtp, hashsize); | |
661 | if (err < 0) | |
662 | goto out_encap; | |
663 | ||
664 | err = register_netdevice(dev); | |
665 | if (err < 0) { | |
666 | netdev_dbg(dev, "failed to register new netdev %d\n", err); | |
667 | goto out_hashtable; | |
668 | } | |
669 | ||
670 | gn = net_generic(dev_net(dev), gtp_net_id); | |
671 | list_add_rcu(>p->list, &gn->gtp_dev_list); | |
672 | ||
673 | netdev_dbg(dev, "registered new GTP interface\n"); | |
674 | ||
675 | return 0; | |
676 | ||
677 | out_hashtable: | |
678 | gtp_hashtable_free(gtp); | |
679 | out_encap: | |
680 | gtp_encap_disable(gtp); | |
459aa660 PN |
681 | return err; |
682 | } | |
683 | ||
684 | static void gtp_dellink(struct net_device *dev, struct list_head *head) | |
685 | { | |
686 | struct gtp_dev *gtp = netdev_priv(dev); | |
687 | ||
688 | gtp_encap_disable(gtp); | |
689 | gtp_hashtable_free(gtp); | |
690 | list_del_rcu(>p->list); | |
691 | unregister_netdevice_queue(dev, head); | |
692 | } | |
693 | ||
694 | static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = { | |
695 | [IFLA_GTP_FD0] = { .type = NLA_U32 }, | |
696 | [IFLA_GTP_FD1] = { .type = NLA_U32 }, | |
697 | [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 }, | |
91ed81f9 | 698 | [IFLA_GTP_ROLE] = { .type = NLA_U32 }, |
459aa660 PN |
699 | }; |
700 | ||
a8b8a889 MS |
701 | static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], |
702 | struct netlink_ext_ack *extack) | |
459aa660 PN |
703 | { |
704 | if (!data) | |
705 | return -EINVAL; | |
706 | ||
707 | return 0; | |
708 | } | |
709 | ||
710 | static size_t gtp_get_size(const struct net_device *dev) | |
711 | { | |
712 | return nla_total_size(sizeof(__u32)); /* IFLA_GTP_PDP_HASHSIZE */ | |
713 | } | |
714 | ||
715 | static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev) | |
716 | { | |
717 | struct gtp_dev *gtp = netdev_priv(dev); | |
718 | ||
719 | if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size)) | |
720 | goto nla_put_failure; | |
721 | ||
722 | return 0; | |
723 | ||
724 | nla_put_failure: | |
725 | return -EMSGSIZE; | |
726 | } | |
727 | ||
728 | static struct rtnl_link_ops gtp_link_ops __read_mostly = { | |
729 | .kind = "gtp", | |
730 | .maxtype = IFLA_GTP_MAX, | |
731 | .policy = gtp_policy, | |
732 | .priv_size = sizeof(struct gtp_dev), | |
733 | .setup = gtp_link_setup, | |
734 | .validate = gtp_validate, | |
735 | .newlink = gtp_newlink, | |
736 | .dellink = gtp_dellink, | |
737 | .get_size = gtp_get_size, | |
738 | .fill_info = gtp_fill_info, | |
739 | }; | |
740 | ||
459aa660 PN |
741 | static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize) |
742 | { | |
743 | int i; | |
744 | ||
745 | gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL); | |
746 | if (gtp->addr_hash == NULL) | |
747 | return -ENOMEM; | |
748 | ||
749 | gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL); | |
750 | if (gtp->tid_hash == NULL) | |
751 | goto err1; | |
752 | ||
753 | gtp->hash_size = hsize; | |
754 | ||
755 | for (i = 0; i < hsize; i++) { | |
756 | INIT_HLIST_HEAD(>p->addr_hash[i]); | |
757 | INIT_HLIST_HEAD(>p->tid_hash[i]); | |
758 | } | |
759 | return 0; | |
760 | err1: | |
761 | kfree(gtp->addr_hash); | |
762 | return -ENOMEM; | |
763 | } | |
764 | ||
765 | static void gtp_hashtable_free(struct gtp_dev *gtp) | |
766 | { | |
767 | struct pdp_ctx *pctx; | |
768 | int i; | |
769 | ||
6b5e2e74 AS |
770 | for (i = 0; i < gtp->hash_size; i++) |
771 | hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) | |
772 | pdp_context_delete(pctx); | |
773 | ||
459aa660 PN |
774 | synchronize_rcu(); |
775 | kfree(gtp->addr_hash); | |
776 | kfree(gtp->tid_hash); | |
777 | } | |
778 | ||
1e3a3abd AS |
779 | static struct sock *gtp_encap_enable_socket(int fd, int type, |
780 | struct gtp_dev *gtp) | |
459aa660 PN |
781 | { |
782 | struct udp_tunnel_sock_cfg tuncfg = {NULL}; | |
1e3a3abd AS |
783 | struct socket *sock; |
784 | struct sock *sk; | |
459aa660 PN |
785 | int err; |
786 | ||
1e3a3abd | 787 | pr_debug("enable gtp on %d, %d\n", fd, type); |
459aa660 | 788 | |
1e3a3abd AS |
789 | sock = sockfd_lookup(fd, &err); |
790 | if (!sock) { | |
791 | pr_debug("gtp socket fd=%d not found\n", fd); | |
792 | return NULL; | |
459aa660 PN |
793 | } |
794 | ||
1e3a3abd AS |
795 | if (sock->sk->sk_protocol != IPPROTO_UDP) { |
796 | pr_debug("socket fd=%d not UDP\n", fd); | |
797 | sk = ERR_PTR(-EINVAL); | |
798 | goto out_sock; | |
459aa660 PN |
799 | } |
800 | ||
1e3a3abd AS |
801 | if (rcu_dereference_sk_user_data(sock->sk)) { |
802 | sk = ERR_PTR(-EBUSY); | |
803 | goto out_sock; | |
459aa660 PN |
804 | } |
805 | ||
1e3a3abd AS |
806 | sk = sock->sk; |
807 | sock_hold(sk); | |
459aa660 PN |
808 | |
809 | tuncfg.sk_user_data = gtp; | |
1e3a3abd | 810 | tuncfg.encap_type = type; |
459aa660 PN |
811 | tuncfg.encap_rcv = gtp_encap_recv; |
812 | tuncfg.encap_destroy = gtp_encap_destroy; | |
813 | ||
1e3a3abd | 814 | setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); |
459aa660 | 815 | |
1e3a3abd AS |
816 | out_sock: |
817 | sockfd_put(sock); | |
818 | return sk; | |
819 | } | |
459aa660 | 820 | |
1e3a3abd AS |
821 | static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) |
822 | { | |
823 | struct sock *sk1u = NULL; | |
824 | struct sock *sk0 = NULL; | |
91ed81f9 | 825 | unsigned int role = GTP_ROLE_GGSN; |
1e3a3abd AS |
826 | |
827 | if (data[IFLA_GTP_FD0]) { | |
828 | u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]); | |
829 | ||
830 | sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp); | |
831 | if (IS_ERR(sk0)) | |
832 | return PTR_ERR(sk0); | |
833 | } | |
834 | ||
835 | if (data[IFLA_GTP_FD1]) { | |
836 | u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]); | |
837 | ||
838 | sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp); | |
839 | if (IS_ERR(sk1u)) { | |
840 | if (sk0) | |
841 | gtp_encap_disable_sock(sk0); | |
842 | return PTR_ERR(sk1u); | |
843 | } | |
844 | } | |
845 | ||
91ed81f9 JB |
846 | if (data[IFLA_GTP_ROLE]) { |
847 | role = nla_get_u32(data[IFLA_GTP_ROLE]); | |
848 | if (role > GTP_ROLE_SGSN) | |
849 | return -EINVAL; | |
850 | } | |
851 | ||
1e3a3abd AS |
852 | gtp->sk0 = sk0; |
853 | gtp->sk1u = sk1u; | |
91ed81f9 | 854 | gtp->role = role; |
1e3a3abd AS |
855 | |
856 | return 0; | |
459aa660 PN |
857 | } |
858 | ||
3fb94617 | 859 | static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[]) |
459aa660 | 860 | { |
3fb94617 AS |
861 | struct gtp_dev *gtp = NULL; |
862 | struct net_device *dev; | |
863 | struct net *net; | |
459aa660 | 864 | |
3fb94617 AS |
865 | /* Examine the link attributes and figure out which network namespace |
866 | * we are talking about. | |
867 | */ | |
868 | if (nla[GTPA_NET_NS_FD]) | |
869 | net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD])); | |
870 | else | |
871 | net = get_net(src_net); | |
872 | ||
873 | if (IS_ERR(net)) | |
874 | return NULL; | |
875 | ||
876 | /* Check if there's an existing gtpX device to configure */ | |
877 | dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK])); | |
65d786c2 | 878 | if (dev && dev->netdev_ops == >p_netdev_ops) |
3fb94617 AS |
879 | gtp = netdev_priv(dev); |
880 | ||
881 | put_net(net); | |
882 | return gtp; | |
459aa660 PN |
883 | } |
884 | ||
885 | static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) | |
886 | { | |
887 | pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); | |
888 | pctx->af = AF_INET; | |
ae6336b5 JB |
889 | pctx->peer_addr_ip4.s_addr = |
890 | nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); | |
459aa660 PN |
891 | pctx->ms_addr_ip4.s_addr = |
892 | nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); | |
893 | ||
894 | switch (pctx->gtp_version) { | |
895 | case GTP_V0: | |
896 | /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow | |
897 | * label needs to be the same for uplink and downlink packets, | |
898 | * so let's annotate this. | |
899 | */ | |
900 | pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]); | |
901 | pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]); | |
902 | break; | |
903 | case GTP_V1: | |
904 | pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]); | |
905 | pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]); | |
906 | break; | |
907 | default: | |
908 | break; | |
909 | } | |
910 | } | |
911 | ||
101cfbc1 AS |
912 | static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, |
913 | struct genl_info *info) | |
459aa660 | 914 | { |
3fb94617 | 915 | struct net_device *dev = gtp->dev; |
459aa660 PN |
916 | u32 hash_ms, hash_tid = 0; |
917 | struct pdp_ctx *pctx; | |
918 | bool found = false; | |
919 | __be32 ms_addr; | |
920 | ||
921 | ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); | |
922 | hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; | |
923 | ||
924 | hlist_for_each_entry_rcu(pctx, >p->addr_hash[hash_ms], hlist_addr) { | |
925 | if (pctx->ms_addr_ip4.s_addr == ms_addr) { | |
926 | found = true; | |
927 | break; | |
928 | } | |
929 | } | |
930 | ||
931 | if (found) { | |
932 | if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) | |
933 | return -EEXIST; | |
934 | if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) | |
935 | return -EOPNOTSUPP; | |
936 | ||
937 | ipv4_pdp_fill(pctx, info); | |
938 | ||
939 | if (pctx->gtp_version == GTP_V0) | |
940 | netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n", | |
941 | pctx->u.v0.tid, pctx); | |
942 | else if (pctx->gtp_version == GTP_V1) | |
943 | netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n", | |
944 | pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); | |
945 | ||
946 | return 0; | |
947 | ||
948 | } | |
949 | ||
950 | pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL); | |
951 | if (pctx == NULL) | |
952 | return -ENOMEM; | |
953 | ||
101cfbc1 AS |
954 | sock_hold(sk); |
955 | pctx->sk = sk; | |
5b171f9c | 956 | pctx->dev = gtp->dev; |
459aa660 PN |
957 | ipv4_pdp_fill(pctx, info); |
958 | atomic_set(&pctx->tx_seq, 0); | |
959 | ||
960 | switch (pctx->gtp_version) { | |
961 | case GTP_V0: | |
962 | /* TS 09.60: "The flow label identifies unambiguously a GTP | |
963 | * flow.". We use the tid for this instead, I cannot find a | |
964 | * situation in which this doesn't unambiguosly identify the | |
965 | * PDP context. | |
966 | */ | |
967 | hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size; | |
968 | break; | |
969 | case GTP_V1: | |
970 | hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size; | |
971 | break; | |
972 | } | |
973 | ||
974 | hlist_add_head_rcu(&pctx->hlist_addr, >p->addr_hash[hash_ms]); | |
975 | hlist_add_head_rcu(&pctx->hlist_tid, >p->tid_hash[hash_tid]); | |
976 | ||
977 | switch (pctx->gtp_version) { | |
978 | case GTP_V0: | |
979 | netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n", | |
ae6336b5 | 980 | pctx->u.v0.tid, &pctx->peer_addr_ip4, |
459aa660 PN |
981 | &pctx->ms_addr_ip4, pctx); |
982 | break; | |
983 | case GTP_V1: | |
984 | netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n", | |
985 | pctx->u.v1.i_tei, pctx->u.v1.o_tei, | |
ae6336b5 | 986 | &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx); |
459aa660 PN |
987 | break; |
988 | } | |
989 | ||
990 | return 0; | |
991 | } | |
992 | ||
101cfbc1 AS |
993 | static void pdp_context_free(struct rcu_head *head) |
994 | { | |
995 | struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head); | |
996 | ||
997 | sock_put(pctx->sk); | |
998 | kfree(pctx); | |
999 | } | |
1000 | ||
6b5e2e74 AS |
1001 | static void pdp_context_delete(struct pdp_ctx *pctx) |
1002 | { | |
1003 | hlist_del_rcu(&pctx->hlist_tid); | |
1004 | hlist_del_rcu(&pctx->hlist_addr); | |
101cfbc1 | 1005 | call_rcu(&pctx->rcu_head, pdp_context_free); |
6b5e2e74 AS |
1006 | } |
1007 | ||
459aa660 PN |
1008 | static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) |
1009 | { | |
101cfbc1 | 1010 | unsigned int version; |
3fb94617 | 1011 | struct gtp_dev *gtp; |
101cfbc1 | 1012 | struct sock *sk; |
3fb94617 | 1013 | int err; |
459aa660 PN |
1014 | |
1015 | if (!info->attrs[GTPA_VERSION] || | |
1016 | !info->attrs[GTPA_LINK] || | |
ae6336b5 | 1017 | !info->attrs[GTPA_PEER_ADDRESS] || |
459aa660 PN |
1018 | !info->attrs[GTPA_MS_ADDRESS]) |
1019 | return -EINVAL; | |
1020 | ||
101cfbc1 AS |
1021 | version = nla_get_u32(info->attrs[GTPA_VERSION]); |
1022 | ||
1023 | switch (version) { | |
459aa660 PN |
1024 | case GTP_V0: |
1025 | if (!info->attrs[GTPA_TID] || | |
1026 | !info->attrs[GTPA_FLOW]) | |
1027 | return -EINVAL; | |
1028 | break; | |
1029 | case GTP_V1: | |
1030 | if (!info->attrs[GTPA_I_TEI] || | |
1031 | !info->attrs[GTPA_O_TEI]) | |
1032 | return -EINVAL; | |
1033 | break; | |
1034 | ||
1035 | default: | |
1036 | return -EINVAL; | |
1037 | } | |
1038 | ||
3fb94617 | 1039 | rcu_read_lock(); |
459aa660 | 1040 | |
3fb94617 AS |
1041 | gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); |
1042 | if (!gtp) { | |
1043 | err = -ENODEV; | |
1044 | goto out_unlock; | |
27ee441a | 1045 | } |
459aa660 | 1046 | |
101cfbc1 AS |
1047 | if (version == GTP_V0) |
1048 | sk = gtp->sk0; | |
1049 | else if (version == GTP_V1) | |
1050 | sk = gtp->sk1u; | |
1051 | else | |
1052 | sk = NULL; | |
1053 | ||
1054 | if (!sk) { | |
1055 | err = -ENODEV; | |
1056 | goto out_unlock; | |
1057 | } | |
1058 | ||
1059 | err = ipv4_pdp_add(gtp, sk, info); | |
3fb94617 AS |
1060 | |
1061 | out_unlock: | |
1062 | rcu_read_unlock(); | |
1063 | return err; | |
459aa660 PN |
1064 | } |
1065 | ||
d9e2dd12 AS |
1066 | static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net, |
1067 | struct nlattr *nla[]) | |
459aa660 | 1068 | { |
459aa660 | 1069 | struct gtp_dev *gtp; |
459aa660 | 1070 | |
d9e2dd12 AS |
1071 | gtp = gtp_find_dev(net, nla); |
1072 | if (!gtp) | |
1073 | return ERR_PTR(-ENODEV); | |
459aa660 | 1074 | |
d9e2dd12 AS |
1075 | if (nla[GTPA_MS_ADDRESS]) { |
1076 | __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]); | |
459aa660 | 1077 | |
d9e2dd12 AS |
1078 | return ipv4_pdp_find(gtp, ip); |
1079 | } else if (nla[GTPA_VERSION]) { | |
1080 | u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]); | |
1081 | ||
1082 | if (gtp_version == GTP_V0 && nla[GTPA_TID]) | |
1083 | return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID])); | |
1084 | else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) | |
1085 | return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI])); | |
27ee441a | 1086 | } |
459aa660 | 1087 | |
d9e2dd12 AS |
1088 | return ERR_PTR(-EINVAL); |
1089 | } | |
459aa660 | 1090 | |
d9e2dd12 AS |
1091 | static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[]) |
1092 | { | |
1093 | struct pdp_ctx *pctx; | |
459aa660 | 1094 | |
d9e2dd12 AS |
1095 | if (nla[GTPA_LINK]) |
1096 | pctx = gtp_find_pdp_by_link(net, nla); | |
1097 | else | |
1098 | pctx = ERR_PTR(-EINVAL); | |
1099 | ||
1100 | if (!pctx) | |
1101 | pctx = ERR_PTR(-ENOENT); | |
1102 | ||
1103 | return pctx; | |
1104 | } | |
1105 | ||
1106 | static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info) | |
1107 | { | |
1108 | struct pdp_ctx *pctx; | |
1109 | int err = 0; | |
1110 | ||
1111 | if (!info->attrs[GTPA_VERSION]) | |
1112 | return -EINVAL; | |
1113 | ||
1114 | rcu_read_lock(); | |
1115 | ||
1116 | pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); | |
1117 | if (IS_ERR(pctx)) { | |
1118 | err = PTR_ERR(pctx); | |
3fb94617 AS |
1119 | goto out_unlock; |
1120 | } | |
459aa660 PN |
1121 | |
1122 | if (pctx->gtp_version == GTP_V0) | |
d9e2dd12 | 1123 | netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n", |
459aa660 PN |
1124 | pctx->u.v0.tid, pctx); |
1125 | else if (pctx->gtp_version == GTP_V1) | |
d9e2dd12 | 1126 | netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n", |
459aa660 PN |
1127 | pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); |
1128 | ||
6b5e2e74 | 1129 | pdp_context_delete(pctx); |
459aa660 | 1130 | |
3fb94617 AS |
1131 | out_unlock: |
1132 | rcu_read_unlock(); | |
1133 | return err; | |
459aa660 PN |
1134 | } |
1135 | ||
489111e5 | 1136 | static struct genl_family gtp_genl_family; |
459aa660 PN |
1137 | |
1138 | static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, | |
1139 | u32 type, struct pdp_ctx *pctx) | |
1140 | { | |
1141 | void *genlh; | |
1142 | ||
1143 | genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, 0, | |
1144 | type); | |
1145 | if (genlh == NULL) | |
1146 | goto nlmsg_failure; | |
1147 | ||
1148 | if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || | |
ae6336b5 | 1149 | nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || |
459aa660 PN |
1150 | nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) |
1151 | goto nla_put_failure; | |
1152 | ||
1153 | switch (pctx->gtp_version) { | |
1154 | case GTP_V0: | |
1155 | if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) || | |
1156 | nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow)) | |
1157 | goto nla_put_failure; | |
1158 | break; | |
1159 | case GTP_V1: | |
1160 | if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) || | |
1161 | nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei)) | |
1162 | goto nla_put_failure; | |
1163 | break; | |
1164 | } | |
1165 | genlmsg_end(skb, genlh); | |
1166 | return 0; | |
1167 | ||
1168 | nlmsg_failure: | |
1169 | nla_put_failure: | |
1170 | genlmsg_cancel(skb, genlh); | |
1171 | return -EMSGSIZE; | |
1172 | } | |
1173 | ||
1174 | static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) | |
1175 | { | |
1176 | struct pdp_ctx *pctx = NULL; | |
459aa660 | 1177 | struct sk_buff *skb2; |
459aa660 PN |
1178 | int err; |
1179 | ||
d9e2dd12 | 1180 | if (!info->attrs[GTPA_VERSION]) |
459aa660 | 1181 | return -EINVAL; |
459aa660 | 1182 | |
3fb94617 | 1183 | rcu_read_lock(); |
459aa660 | 1184 | |
d9e2dd12 AS |
1185 | pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); |
1186 | if (IS_ERR(pctx)) { | |
1187 | err = PTR_ERR(pctx); | |
459aa660 PN |
1188 | goto err_unlock; |
1189 | } | |
1190 | ||
1191 | skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); | |
1192 | if (skb2 == NULL) { | |
1193 | err = -ENOMEM; | |
1194 | goto err_unlock; | |
1195 | } | |
1196 | ||
1197 | err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, | |
1198 | info->snd_seq, info->nlhdr->nlmsg_type, pctx); | |
1199 | if (err < 0) | |
1200 | goto err_unlock_free; | |
1201 | ||
1202 | rcu_read_unlock(); | |
1203 | return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid); | |
1204 | ||
1205 | err_unlock_free: | |
1206 | kfree_skb(skb2); | |
1207 | err_unlock: | |
1208 | rcu_read_unlock(); | |
1209 | return err; | |
1210 | } | |
1211 | ||
1212 | static int gtp_genl_dump_pdp(struct sk_buff *skb, | |
1213 | struct netlink_callback *cb) | |
1214 | { | |
1215 | struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; | |
1216 | struct net *net = sock_net(skb->sk); | |
1217 | struct gtp_net *gn = net_generic(net, gtp_net_id); | |
1218 | unsigned long tid = cb->args[1]; | |
1219 | int i, k = cb->args[0], ret; | |
1220 | struct pdp_ctx *pctx; | |
1221 | ||
1222 | if (cb->args[4]) | |
1223 | return 0; | |
1224 | ||
1225 | list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { | |
1226 | if (last_gtp && last_gtp != gtp) | |
1227 | continue; | |
1228 | else | |
1229 | last_gtp = NULL; | |
1230 | ||
1231 | for (i = k; i < gtp->hash_size; i++) { | |
1232 | hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) { | |
1233 | if (tid && tid != pctx->u.tid) | |
1234 | continue; | |
1235 | else | |
1236 | tid = 0; | |
1237 | ||
1238 | ret = gtp_genl_fill_info(skb, | |
1239 | NETLINK_CB(cb->skb).portid, | |
1240 | cb->nlh->nlmsg_seq, | |
1241 | cb->nlh->nlmsg_type, pctx); | |
1242 | if (ret < 0) { | |
1243 | cb->args[0] = i; | |
1244 | cb->args[1] = pctx->u.tid; | |
1245 | cb->args[2] = (unsigned long)gtp; | |
1246 | goto out; | |
1247 | } | |
1248 | } | |
1249 | } | |
1250 | } | |
1251 | cb->args[4] = 1; | |
1252 | out: | |
1253 | return skb->len; | |
1254 | } | |
1255 | ||
1256 | static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { | |
1257 | [GTPA_LINK] = { .type = NLA_U32, }, | |
1258 | [GTPA_VERSION] = { .type = NLA_U32, }, | |
1259 | [GTPA_TID] = { .type = NLA_U64, }, | |
ae6336b5 | 1260 | [GTPA_PEER_ADDRESS] = { .type = NLA_U32, }, |
459aa660 PN |
1261 | [GTPA_MS_ADDRESS] = { .type = NLA_U32, }, |
1262 | [GTPA_FLOW] = { .type = NLA_U16, }, | |
1263 | [GTPA_NET_NS_FD] = { .type = NLA_U32, }, | |
1264 | [GTPA_I_TEI] = { .type = NLA_U32, }, | |
1265 | [GTPA_O_TEI] = { .type = NLA_U32, }, | |
1266 | }; | |
1267 | ||
1268 | static const struct genl_ops gtp_genl_ops[] = { | |
1269 | { | |
1270 | .cmd = GTP_CMD_NEWPDP, | |
1271 | .doit = gtp_genl_new_pdp, | |
1272 | .policy = gtp_genl_policy, | |
1273 | .flags = GENL_ADMIN_PERM, | |
1274 | }, | |
1275 | { | |
1276 | .cmd = GTP_CMD_DELPDP, | |
1277 | .doit = gtp_genl_del_pdp, | |
1278 | .policy = gtp_genl_policy, | |
1279 | .flags = GENL_ADMIN_PERM, | |
1280 | }, | |
1281 | { | |
1282 | .cmd = GTP_CMD_GETPDP, | |
1283 | .doit = gtp_genl_get_pdp, | |
1284 | .dumpit = gtp_genl_dump_pdp, | |
1285 | .policy = gtp_genl_policy, | |
1286 | .flags = GENL_ADMIN_PERM, | |
1287 | }, | |
1288 | }; | |
1289 | ||
56989f6d | 1290 | static struct genl_family gtp_genl_family __ro_after_init = { |
489111e5 JB |
1291 | .name = "gtp", |
1292 | .version = 0, | |
1293 | .hdrsize = 0, | |
1294 | .maxattr = GTPA_MAX, | |
1295 | .netnsok = true, | |
1296 | .module = THIS_MODULE, | |
1297 | .ops = gtp_genl_ops, | |
1298 | .n_ops = ARRAY_SIZE(gtp_genl_ops), | |
1299 | }; | |
1300 | ||
459aa660 PN |
1301 | static int __net_init gtp_net_init(struct net *net) |
1302 | { | |
1303 | struct gtp_net *gn = net_generic(net, gtp_net_id); | |
1304 | ||
1305 | INIT_LIST_HEAD(&gn->gtp_dev_list); | |
1306 | return 0; | |
1307 | } | |
1308 | ||
1309 | static void __net_exit gtp_net_exit(struct net *net) | |
1310 | { | |
1311 | struct gtp_net *gn = net_generic(net, gtp_net_id); | |
1312 | struct gtp_dev *gtp; | |
1313 | LIST_HEAD(list); | |
1314 | ||
1315 | rtnl_lock(); | |
1316 | list_for_each_entry(gtp, &gn->gtp_dev_list, list) | |
1317 | gtp_dellink(gtp->dev, &list); | |
1318 | ||
1319 | unregister_netdevice_many(&list); | |
1320 | rtnl_unlock(); | |
1321 | } | |
1322 | ||
1323 | static struct pernet_operations gtp_net_ops = { | |
1324 | .init = gtp_net_init, | |
1325 | .exit = gtp_net_exit, | |
1326 | .id = >p_net_id, | |
1327 | .size = sizeof(struct gtp_net), | |
1328 | }; | |
1329 | ||
1330 | static int __init gtp_init(void) | |
1331 | { | |
1332 | int err; | |
1333 | ||
1334 | get_random_bytes(>p_h_initval, sizeof(gtp_h_initval)); | |
1335 | ||
1336 | err = rtnl_link_register(>p_link_ops); | |
1337 | if (err < 0) | |
1338 | goto error_out; | |
1339 | ||
489111e5 | 1340 | err = genl_register_family(>p_genl_family); |
459aa660 PN |
1341 | if (err < 0) |
1342 | goto unreg_rtnl_link; | |
1343 | ||
1344 | err = register_pernet_subsys(>p_net_ops); | |
1345 | if (err < 0) | |
1346 | goto unreg_genl_family; | |
1347 | ||
5b5e0928 | 1348 | pr_info("GTP module loaded (pdp ctx size %zd bytes)\n", |
459aa660 PN |
1349 | sizeof(struct pdp_ctx)); |
1350 | return 0; | |
1351 | ||
1352 | unreg_genl_family: | |
1353 | genl_unregister_family(>p_genl_family); | |
1354 | unreg_rtnl_link: | |
1355 | rtnl_link_unregister(>p_link_ops); | |
1356 | error_out: | |
1357 | pr_err("error loading GTP module loaded\n"); | |
1358 | return err; | |
1359 | } | |
1360 | late_initcall(gtp_init); | |
1361 | ||
1362 | static void __exit gtp_fini(void) | |
1363 | { | |
1364 | unregister_pernet_subsys(>p_net_ops); | |
1365 | genl_unregister_family(>p_genl_family); | |
1366 | rtnl_link_unregister(>p_link_ops); | |
1367 | ||
1368 | pr_info("GTP module unloaded\n"); | |
1369 | } | |
1370 | module_exit(gtp_fini); | |
1371 | ||
1372 | MODULE_LICENSE("GPL"); | |
1373 | MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>"); | |
1374 | MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); | |
1375 | MODULE_ALIAS_RTNL_LINK("gtp"); | |
ab729823 | 1376 | MODULE_ALIAS_GENL_FAMILY("gtp"); |