]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/uwb/i1480/i1480u-wlp/rx.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-eoan-kernel.git] / drivers / uwb / i1480 / i1480u-wlp / rx.c
1 /*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * i1480u's RX handling is simple. i1480u will send the received
24 * network packets broken up in fragments; 1 to N fragments make a
25 * packet, we assemble them together and deliver the packet with netif_rx().
26 *
27 * Beacuse each USB transfer is a *single* fragment (except when the
28 * transfer contains a first fragment), each URB called thus
29 * back contains one or two fragments. So we queue N URBs, each with its own
30 * fragment buffer. When a URB is done, we process it (adding to the
31 * current skb from the fragment buffer until complete). Once
32 * processed, we requeue the URB. There is always a bunch of URBs
33 * ready to take data, so the intergap should be minimal.
34 *
35 * An URB's transfer buffer is the data field of a socket buffer. This
36 * reduces copying as data can be passed directly to network layer. If a
37 * complete packet or 1st fragment is received the URB's transfer buffer is
38 * taken away from it and used to send data to the network layer. In this
39 * case a new transfer buffer is allocated to the URB before being requeued.
40 * If a "NEXT" or "LAST" fragment is received, the fragment contents is
41 * appended to the RX packet under construction and the transfer buffer
42 * is reused. To be able to use this buffer to assemble complete packets
43 * we set each buffer's size to that of the MAX ethernet packet that can
44 * be received. There is thus room for improvement in memory usage.
45 *
46 * When the max tx fragment size increases, we should be able to read
47 * data into the skbs directly with very simple code.
48 *
49 * ROADMAP:
50 *
51 * ENTRY POINTS:
52 *
53 * i1480u_rx_setup(): setup RX context [from i1480u_open()]
54 *
55 * i1480u_rx_release(): release RX context [from i1480u_stop()]
56 *
57 * i1480u_rx_cb(): called when the RX USB URB receives a
58 * packet. It removes the header and pushes it up
59 * the Linux netdev stack with netif_rx().
60 *
61 * i1480u_rx_buffer()
62 * i1480u_drop() and i1480u_fix()
63 * i1480u_skb_deliver
64 *
65 */
66
67 #include <linux/gfp.h>
68 #include <linux/netdevice.h>
69 #include <linux/etherdevice.h>
70 #include "i1480u-wlp.h"
71
72 /*
73 * Setup the RX context
74 *
75 * Each URB is provided with a transfer_buffer that is the data field
76 * of a new socket buffer.
77 */
78 int i1480u_rx_setup(struct i1480u *i1480u)
79 {
80 int result, cnt;
81 struct device *dev = &i1480u->usb_iface->dev;
82 struct net_device *net_dev = i1480u->net_dev;
83 struct usb_endpoint_descriptor *epd;
84 struct sk_buff *skb;
85
86 /* Alloc RX stuff */
87 i1480u->rx_skb = NULL; /* not in process of receiving packet */
88 result = -ENOMEM;
89 epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
90 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
91 struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
92 rx_buf->i1480u = i1480u;
93 skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
94 if (!skb) {
95 dev_err(dev,
96 "RX: cannot allocate RX buffer %d\n", cnt);
97 result = -ENOMEM;
98 goto error;
99 }
100 skb->dev = net_dev;
101 skb->ip_summed = CHECKSUM_NONE;
102 skb_reserve(skb, 2);
103 rx_buf->data = skb;
104 rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
105 if (unlikely(rx_buf->urb == NULL)) {
106 dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
107 result = -ENOMEM;
108 goto error;
109 }
110 usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
111 usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
112 rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
113 i1480u_rx_cb, rx_buf);
114 result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
115 if (unlikely(result < 0)) {
116 dev_err(dev, "RX: cannot submit URB %d: %d\n",
117 cnt, result);
118 goto error;
119 }
120 }
121 return 0;
122
123 error:
124 i1480u_rx_release(i1480u);
125 return result;
126 }
127
128
129 /* Release resources associated to the rx context */
130 void i1480u_rx_release(struct i1480u *i1480u)
131 {
132 int cnt;
133 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
134 if (i1480u->rx_buf[cnt].data)
135 dev_kfree_skb(i1480u->rx_buf[cnt].data);
136 if (i1480u->rx_buf[cnt].urb) {
137 usb_kill_urb(i1480u->rx_buf[cnt].urb);
138 usb_free_urb(i1480u->rx_buf[cnt].urb);
139 }
140 }
141 if (i1480u->rx_skb != NULL)
142 dev_kfree_skb(i1480u->rx_skb);
143 }
144
145 static
146 void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
147 {
148 int cnt;
149 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
150 if (i1480u->rx_buf[cnt].urb)
151 usb_unlink_urb(i1480u->rx_buf[cnt].urb);
152 }
153 }
154
155 /* Fix an out-of-sequence packet */
156 #define i1480u_fix(i1480u, msg...) \
157 do { \
158 if (printk_ratelimit()) \
159 dev_err(&i1480u->usb_iface->dev, msg); \
160 dev_kfree_skb_irq(i1480u->rx_skb); \
161 i1480u->rx_skb = NULL; \
162 i1480u->rx_untd_pkt_size = 0; \
163 } while (0)
164
165
166 /* Drop an out-of-sequence packet */
167 #define i1480u_drop(i1480u, msg...) \
168 do { \
169 if (printk_ratelimit()) \
170 dev_err(&i1480u->usb_iface->dev, msg); \
171 i1480u->net_dev->stats.rx_dropped++; \
172 } while (0)
173
174
175
176
177 /* Finalizes setting up the SKB and delivers it
178 *
179 * We first pass the incoming frame to WLP substack for verification. It
180 * may also be a WLP association frame in which case WLP will take over the
181 * processing. If WLP does not take it over it will still verify it, if the
182 * frame is invalid the skb will be freed by WLP and we will not continue
183 * parsing.
184 * */
185 static
186 void i1480u_skb_deliver(struct i1480u *i1480u)
187 {
188 int should_parse;
189 struct net_device *net_dev = i1480u->net_dev;
190 struct device *dev = &i1480u->usb_iface->dev;
191
192 should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
193 &i1480u->rx_srcaddr);
194 if (!should_parse)
195 goto out;
196 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
197 net_dev->stats.rx_packets++;
198 net_dev->stats.rx_bytes += i1480u->rx_untd_pkt_size;
199
200 netif_rx(i1480u->rx_skb); /* deliver */
201 out:
202 i1480u->rx_skb = NULL;
203 i1480u->rx_untd_pkt_size = 0;
204 }
205
206
207 /*
208 * Process a buffer of data received from the USB RX endpoint
209 *
210 * First fragment arrives with next or last fragment. All other fragments
211 * arrive alone.
212 *
213 * /me hates long functions.
214 */
215 static
216 void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
217 {
218 unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */
219 size_t untd_hdr_size, untd_frg_size;
220 size_t i1480u_hdr_size;
221 struct wlp_rx_hdr *i1480u_hdr = NULL;
222
223 struct i1480u *i1480u = rx_buf->i1480u;
224 struct sk_buff *skb = rx_buf->data;
225 int size_left = rx_buf->urb->actual_length;
226 void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
227 struct untd_hdr *untd_hdr;
228
229 struct net_device *net_dev = i1480u->net_dev;
230 struct device *dev = &i1480u->usb_iface->dev;
231 struct sk_buff *new_skb;
232
233 #if 0
234 dev_fnstart(dev,
235 "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
236 dev_err(dev, "RX packet, %zu bytes\n", size_left);
237 dump_bytes(dev, ptr, size_left);
238 #endif
239 i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
240
241 while (size_left > 0) {
242 if (pkt_completed) {
243 i1480u_drop(i1480u, "RX: fragment follows completed"
244 "packet in same buffer. Dropping\n");
245 break;
246 }
247 untd_hdr = ptr;
248 if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */
249 i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
250 goto out;
251 }
252 if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */
253 i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
254 goto out;
255 }
256 switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */
257 case i1480u_PKT_FRAG_1ST: {
258 struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
259 dev_dbg(dev, "1st fragment\n");
260 untd_hdr_size = sizeof(struct untd_hdr_1st);
261 if (i1480u->rx_skb != NULL)
262 i1480u_fix(i1480u, "RX: 1st fragment out of "
263 "sequence! Fixing\n");
264 if (size_left < untd_hdr_size + i1480u_hdr_size) {
265 i1480u_drop(i1480u, "RX: short 1st fragment! "
266 "Dropping\n");
267 goto out;
268 }
269 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
270 - i1480u_hdr_size;
271 untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
272 if (size_left < untd_hdr_size + untd_frg_size) {
273 i1480u_drop(i1480u,
274 "RX: short payload! Dropping\n");
275 goto out;
276 }
277 i1480u->rx_skb = skb;
278 i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
279 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
280 skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
281 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
282 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
283 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
284 rx_buf->data = NULL; /* need to create new buffer */
285 break;
286 }
287 case i1480u_PKT_FRAG_NXT: {
288 dev_dbg(dev, "nxt fragment\n");
289 untd_hdr_size = sizeof(struct untd_hdr_rst);
290 if (i1480u->rx_skb == NULL) {
291 i1480u_drop(i1480u, "RX: next fragment out of "
292 "sequence! Dropping\n");
293 goto out;
294 }
295 if (size_left < untd_hdr_size) {
296 i1480u_drop(i1480u, "RX: short NXT fragment! "
297 "Dropping\n");
298 goto out;
299 }
300 untd_frg_size = le16_to_cpu(untd_hdr->len);
301 if (size_left < untd_hdr_size + untd_frg_size) {
302 i1480u_drop(i1480u,
303 "RX: short payload! Dropping\n");
304 goto out;
305 }
306 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
307 ptr + untd_hdr_size, untd_frg_size);
308 break;
309 }
310 case i1480u_PKT_FRAG_LST: {
311 dev_dbg(dev, "Lst fragment\n");
312 untd_hdr_size = sizeof(struct untd_hdr_rst);
313 if (i1480u->rx_skb == NULL) {
314 i1480u_drop(i1480u, "RX: last fragment out of "
315 "sequence! Dropping\n");
316 goto out;
317 }
318 if (size_left < untd_hdr_size) {
319 i1480u_drop(i1480u, "RX: short LST fragment! "
320 "Dropping\n");
321 goto out;
322 }
323 untd_frg_size = le16_to_cpu(untd_hdr->len);
324 if (size_left < untd_frg_size + untd_hdr_size) {
325 i1480u_drop(i1480u,
326 "RX: short payload! Dropping\n");
327 goto out;
328 }
329 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
330 ptr + untd_hdr_size, untd_frg_size);
331 pkt_completed = 1;
332 break;
333 }
334 case i1480u_PKT_FRAG_CMP: {
335 dev_dbg(dev, "cmp fragment\n");
336 untd_hdr_size = sizeof(struct untd_hdr_cmp);
337 if (i1480u->rx_skb != NULL)
338 i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
339 " fragment!\n");
340 if (size_left < untd_hdr_size + i1480u_hdr_size) {
341 i1480u_drop(i1480u, "RX: short CMP fragment! "
342 "Dropping\n");
343 goto out;
344 }
345 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
346 untd_frg_size = i1480u->rx_untd_pkt_size;
347 if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
348 i1480u_drop(i1480u,
349 "RX: short payload! Dropping\n");
350 goto out;
351 }
352 i1480u->rx_skb = skb;
353 i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
354 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
355 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
356 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
357 skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
358 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
359 rx_buf->data = NULL; /* for hand off skb to network stack */
360 pkt_completed = 1;
361 i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
362 break;
363 }
364 default:
365 i1480u_drop(i1480u, "RX: unknown packet type %u! "
366 "Dropping\n", untd_hdr_type(untd_hdr));
367 goto out;
368 }
369 size_left -= untd_hdr_size + untd_frg_size;
370 if (size_left > 0)
371 ptr += untd_hdr_size + untd_frg_size;
372 }
373 if (pkt_completed)
374 i1480u_skb_deliver(i1480u);
375 out:
376 /* recreate needed RX buffers*/
377 if (rx_buf->data == NULL) {
378 /* buffer is being used to receive packet, create new */
379 new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
380 if (!new_skb) {
381 if (printk_ratelimit())
382 dev_err(dev,
383 "RX: cannot allocate RX buffer\n");
384 } else {
385 new_skb->dev = net_dev;
386 new_skb->ip_summed = CHECKSUM_NONE;
387 skb_reserve(new_skb, 2);
388 rx_buf->data = new_skb;
389 }
390 }
391 return;
392 }
393
394
395 /*
396 * Called when an RX URB has finished receiving or has found some kind
397 * of error condition.
398 *
399 * LIMITATIONS:
400 *
401 * - We read USB-transfers, each transfer contains a SINGLE fragment
402 * (can contain a complete packet, or a 1st, next, or last fragment
403 * of a packet).
404 * Looks like a transfer can contain more than one fragment (07/18/06)
405 *
406 * - Each transfer buffer is the size of the maximum packet size (minus
407 * headroom), i1480u_MAX_PKT_SIZE - 2
408 *
409 * - We always read the full USB-transfer, no partials.
410 *
411 * - Each transfer is read directly into a skb. This skb will be used to
412 * send data to the upper layers if it is the first fragment or a complete
413 * packet. In the other cases the data will be copied from the skb to
414 * another skb that is being prepared for the upper layers from a prev
415 * first fragment.
416 *
417 * It is simply too much of a pain. Gosh, there should be a unified
418 * SG infrastructure for *everything* [so that I could declare a SG
419 * buffer, pass it to USB for receiving, append some space to it if
420 * I wish, receive more until I have the whole chunk, adapt
421 * pointers on each fragment to remove hardware headers and then
422 * attach that to an skbuff and netif_rx()].
423 */
424 void i1480u_rx_cb(struct urb *urb)
425 {
426 int result;
427 int do_parse_buffer = 1;
428 struct i1480u_rx_buf *rx_buf = urb->context;
429 struct i1480u *i1480u = rx_buf->i1480u;
430 struct device *dev = &i1480u->usb_iface->dev;
431 unsigned long flags;
432 u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
433
434 switch (urb->status) {
435 case 0:
436 break;
437 case -ECONNRESET: /* Not an error, but a controlled situation; */
438 case -ENOENT: /* (we killed the URB)...so, no broadcast */
439 case -ESHUTDOWN: /* going away! */
440 dev_err(dev, "RX URB[%u]: goind down %d\n",
441 rx_buf_idx, urb->status);
442 goto error;
443 default:
444 dev_err(dev, "RX URB[%u]: unknown status %d\n",
445 rx_buf_idx, urb->status);
446 if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
447 EDC_ERROR_TIMEFRAME)) {
448 dev_err(dev, "RX: max acceptable errors exceeded,"
449 " resetting device.\n");
450 i1480u_rx_unlink_urbs(i1480u);
451 wlp_reset_all(&i1480u->wlp);
452 goto error;
453 }
454 do_parse_buffer = 0;
455 break;
456 }
457 spin_lock_irqsave(&i1480u->lock, flags);
458 /* chew the data fragments, extract network packets */
459 if (do_parse_buffer) {
460 i1480u_rx_buffer(rx_buf);
461 if (rx_buf->data) {
462 rx_buf->urb->transfer_buffer = rx_buf->data->data;
463 result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
464 if (result < 0) {
465 dev_err(dev, "RX URB[%u]: cannot submit %d\n",
466 rx_buf_idx, result);
467 }
468 }
469 }
470 spin_unlock_irqrestore(&i1480u->lock, flags);
471 error:
472 return;
473 }
474