]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
net: qualcomm: rmnet: Move rmnet_mode to rmnet_port
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / qualcomm / rmnet / rmnet_handlers.c
CommitLineData
ceed73a2
SAK
1/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * RMNET Data ingress/egress handler
13 *
14 */
15
16#include <linux/netdevice.h>
17#include <linux/netdev_features.h>
18#include "rmnet_private.h"
19#include "rmnet_config.h"
20#include "rmnet_vnd.h"
21#include "rmnet_map.h"
22#include "rmnet_handlers.h"
23
24#define RMNET_IP_VERSION_4 0x40
25#define RMNET_IP_VERSION_6 0x60
26
27/* Helper Functions */
28
29static void rmnet_set_skb_proto(struct sk_buff *skb)
30{
31 switch (skb->data[0] & 0xF0) {
32 case RMNET_IP_VERSION_4:
33 skb->protocol = htons(ETH_P_IP);
34 break;
35 case RMNET_IP_VERSION_6:
36 skb->protocol = htons(ETH_P_IPV6);
37 break;
38 default:
39 skb->protocol = htons(ETH_P_MAP);
40 break;
41 }
42}
43
44/* Generic handler */
45
46static rx_handler_result_t
d8bbb07a 47rmnet_deliver_skb(struct sk_buff *skb)
ceed73a2 48{
d8bbb07a
SAK
49 skb_reset_transport_header(skb);
50 skb_reset_network_header(skb);
51 rmnet_vnd_rx_fixup(skb, skb->dev);
ceed73a2 52
d8bbb07a
SAK
53 skb->pkt_type = PACKET_HOST;
54 skb_set_mac_header(skb, 0);
55 netif_receive_skb(skb);
ceed73a2
SAK
56 return RX_HANDLER_CONSUMED;
57}
58
ceed73a2
SAK
59/* MAP handler */
60
61static rx_handler_result_t
62__rmnet_map_ingress_handler(struct sk_buff *skb,
b665f4f8 63 struct rmnet_port *port)
ceed73a2
SAK
64{
65 struct rmnet_endpoint *ep;
66 u8 mux_id;
67 u16 len;
68
69 if (RMNET_MAP_GET_CD_BIT(skb)) {
b665f4f8 70 if (port->ingress_data_format
ceed73a2 71 & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
b665f4f8 72 return rmnet_map_command(skb, port);
ceed73a2
SAK
73
74 kfree_skb(skb);
75 return RX_HANDLER_CONSUMED;
76 }
77
78 mux_id = RMNET_MAP_GET_MUX_ID(skb);
79 len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
80
81 if (mux_id >= RMNET_MAX_LOGICAL_EP) {
82 kfree_skb(skb);
83 return RX_HANDLER_CONSUMED;
84 }
85
b665f4f8 86 ep = &port->muxed_ep[mux_id];
ceed73a2 87
b665f4f8 88 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
ceed73a2
SAK
89 skb->dev = ep->egress_dev;
90
91 /* Subtract MAP header */
92 skb_pull(skb, sizeof(struct rmnet_map_header));
93 skb_trim(skb, len);
94 rmnet_set_skb_proto(skb);
d8bbb07a 95 return rmnet_deliver_skb(skb);
ceed73a2
SAK
96}
97
98static rx_handler_result_t
99rmnet_map_ingress_handler(struct sk_buff *skb,
b665f4f8 100 struct rmnet_port *port)
ceed73a2
SAK
101{
102 struct sk_buff *skbn;
103 int rc;
104
b665f4f8
SAK
105 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
106 while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
107 __rmnet_map_ingress_handler(skbn, port);
ceed73a2
SAK
108
109 consume_skb(skb);
110 rc = RX_HANDLER_CONSUMED;
111 } else {
b665f4f8 112 rc = __rmnet_map_ingress_handler(skb, port);
ceed73a2
SAK
113 }
114
115 return rc;
116}
117
118static int rmnet_map_egress_handler(struct sk_buff *skb,
b665f4f8 119 struct rmnet_port *port,
ceed73a2
SAK
120 struct rmnet_endpoint *ep,
121 struct net_device *orig_dev)
122{
123 int required_headroom, additional_header_len;
124 struct rmnet_map_header *map_header;
125
126 additional_header_len = 0;
127 required_headroom = sizeof(struct rmnet_map_header);
128
129 if (skb_headroom(skb) < required_headroom) {
130 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
131 return RMNET_MAP_CONSUMED;
132 }
133
134 map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
135 if (!map_header)
136 return RMNET_MAP_CONSUMED;
137
b665f4f8 138 if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
ceed73a2
SAK
139 if (ep->mux_id == 0xff)
140 map_header->mux_id = 0;
141 else
142 map_header->mux_id = ep->mux_id;
143 }
144
145 skb->protocol = htons(ETH_P_MAP);
146
147 return RMNET_MAP_SUCCESS;
148}
149
150/* Ingress / Egress Entry Points */
151
152/* Processes packet as per ingress data format for receiving device. Logical
153 * endpoint is determined from packet inspection. Packet is then sent to the
154 * egress device listed in the logical endpoint configuration.
155 */
156rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
157{
b665f4f8 158 struct rmnet_port *port;
ceed73a2
SAK
159 struct sk_buff *skb = *pskb;
160 struct net_device *dev;
161 int rc;
162
163 if (!skb)
164 return RX_HANDLER_CONSUMED;
165
166 dev = skb->dev;
b665f4f8 167 port = rmnet_get_port(dev);
ceed73a2 168
d8bbb07a 169 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP)
b665f4f8 170 rc = rmnet_map_ingress_handler(skb, port);
ceed73a2
SAK
171
172 return rc;
173}
174
175/* Modifies packet as per logical endpoint configuration and egress data format
176 * for egress device configured in logical endpoint. Packet is then transmitted
177 * on the egress device.
178 */
179void rmnet_egress_handler(struct sk_buff *skb,
180 struct rmnet_endpoint *ep)
181{
ceed73a2 182 struct net_device *orig_dev;
b665f4f8 183 struct rmnet_port *port;
ceed73a2
SAK
184
185 orig_dev = skb->dev;
186 skb->dev = ep->egress_dev;
187
b665f4f8
SAK
188 port = rmnet_get_port(skb->dev);
189 if (!port) {
ceed73a2
SAK
190 kfree_skb(skb);
191 return;
192 }
193
b665f4f8
SAK
194 if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
195 switch (rmnet_map_egress_handler(skb, port, ep, orig_dev)) {
ceed73a2
SAK
196 case RMNET_MAP_CONSUMED:
197 return;
198
199 case RMNET_MAP_SUCCESS:
200 break;
201
202 default:
203 kfree_skb(skb);
204 return;
205 }
206 }
207
91489632 208 rmnet_vnd_tx_fixup(skb, orig_dev);
ceed73a2
SAK
209
210 dev_queue_xmit(skb);
211}