]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/irda/irlan/irlan_eth.c
net: use NETDEV_TX_OK instead of 0 in ndo_start_xmit() functions
[mirror_ubuntu-artful-kernel.git] / net / irda / irlan / irlan_eth.c
1 /*********************************************************************
2 *
3 * Filename: irlan_eth.c
4 * Version:
5 * Description:
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Thu Oct 15 08:37:58 1998
9 * Modified at: Tue Mar 21 09:06:41 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
12 * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
13 * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
14 *
15 * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version.
21 *
22 * Neither Dag Brattli nor University of Tromsø admit liability nor
23 * provide warranty for any of this software. This material is
24 * provided "AS-IS" and at no charge.
25 *
26 ********************************************************************/
27
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/module.h>
33 #include <net/arp.h>
34
35 #include <net/irda/irda.h>
36 #include <net/irda/irmod.h>
37 #include <net/irda/irlan_common.h>
38 #include <net/irda/irlan_client.h>
39 #include <net/irda/irlan_event.h>
40 #include <net/irda/irlan_eth.h>
41
42 static int irlan_eth_open(struct net_device *dev);
43 static int irlan_eth_close(struct net_device *dev);
44 static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev);
45 static void irlan_eth_set_multicast_list( struct net_device *dev);
46 static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev);
47
48 static const struct net_device_ops irlan_eth_netdev_ops = {
49 .ndo_open = irlan_eth_open,
50 .ndo_stop = irlan_eth_close,
51 .ndo_start_xmit = irlan_eth_xmit,
52 .ndo_get_stats = irlan_eth_get_stats,
53 .ndo_set_multicast_list = irlan_eth_set_multicast_list,
54 .ndo_change_mtu = eth_change_mtu,
55 .ndo_validate_addr = eth_validate_addr,
56 };
57
58 /*
59 * Function irlan_eth_setup (dev)
60 *
61 * The network device initialization function.
62 *
63 */
64 static void irlan_eth_setup(struct net_device *dev)
65 {
66 ether_setup(dev);
67
68 dev->netdev_ops = &irlan_eth_netdev_ops;
69 dev->destructor = free_netdev;
70
71
72 /*
73 * Lets do all queueing in IrTTP instead of this device driver.
74 * Queueing here as well can introduce some strange latency
75 * problems, which we will avoid by setting the queue size to 0.
76 */
77 /*
78 * The bugs in IrTTP and IrLAN that created this latency issue
79 * have now been fixed, and we can propagate flow control properly
80 * to the network layer. However, this requires a minimal queue of
81 * packets for the device.
82 * Without flow control, the Tx Queue is 14 (ttp) + 0 (dev) = 14
83 * With flow control, the Tx Queue is 7 (ttp) + 4 (dev) = 11
84 * See irlan_eth_flow_indication()...
85 * Note : this number was randomly selected and would need to
86 * be adjusted.
87 * Jean II */
88 dev->tx_queue_len = 4;
89 }
90
91 /*
92 * Function alloc_irlandev
93 *
94 * Allocate network device and control block
95 *
96 */
97 struct net_device *alloc_irlandev(const char *name)
98 {
99 return alloc_netdev(sizeof(struct irlan_cb), name,
100 irlan_eth_setup);
101 }
102
103 /*
104 * Function irlan_eth_open (dev)
105 *
106 * Network device has been opened by user
107 *
108 */
109 static int irlan_eth_open(struct net_device *dev)
110 {
111 struct irlan_cb *self = netdev_priv(dev);
112
113 IRDA_DEBUG(2, "%s()\n", __func__ );
114
115 /* Ready to play! */
116 netif_stop_queue(dev); /* Wait until data link is ready */
117
118 /* We are now open, so time to do some work */
119 self->disconnect_reason = 0;
120 irlan_client_wakeup(self, self->saddr, self->daddr);
121
122 /* Make sure we have a hardware address before we return,
123 so DHCP clients gets happy */
124 return wait_event_interruptible(self->open_wait,
125 !self->tsap_data->connected);
126 }
127
128 /*
129 * Function irlan_eth_close (dev)
130 *
131 * Stop the ether network device, his function will usually be called by
132 * ifconfig down. We should now disconnect the link, We start the
133 * close timer, so that the instance will be removed if we are unable
134 * to discover the remote device after the disconnect.
135 */
136 static int irlan_eth_close(struct net_device *dev)
137 {
138 struct irlan_cb *self = netdev_priv(dev);
139
140 IRDA_DEBUG(2, "%s()\n", __func__ );
141
142 /* Stop device */
143 netif_stop_queue(dev);
144
145 irlan_close_data_channel(self);
146 irlan_close_tsaps(self);
147
148 irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
149 irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
150
151 /* Remove frames queued on the control channel */
152 skb_queue_purge(&self->client.txq);
153
154 self->client.tx_busy = 0;
155
156 return 0;
157 }
158
159 /*
160 * Function irlan_eth_tx (skb)
161 *
162 * Transmits ethernet frames over IrDA link.
163 *
164 */
165 static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev)
166 {
167 struct irlan_cb *self = netdev_priv(dev);
168 int ret;
169
170 /* skb headroom large enough to contain all IrDA-headers? */
171 if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
172 struct sk_buff *new_skb =
173 skb_realloc_headroom(skb, self->max_header_size);
174
175 /* We have to free the original skb anyway */
176 dev_kfree_skb(skb);
177
178 /* Did the realloc succeed? */
179 if (new_skb == NULL)
180 return NETDEV_TX_OK;
181
182 /* Use the new skb instead */
183 skb = new_skb;
184 }
185
186 dev->trans_start = jiffies;
187
188 /* Now queue the packet in the transport layer */
189 if (self->use_udata)
190 ret = irttp_udata_request(self->tsap_data, skb);
191 else
192 ret = irttp_data_request(self->tsap_data, skb);
193
194 if (ret < 0) {
195 /*
196 * IrTTPs tx queue is full, so we just have to
197 * drop the frame! You might think that we should
198 * just return -1 and don't deallocate the frame,
199 * but that is dangerous since it's possible that
200 * we have replaced the original skb with a new
201 * one with larger headroom, and that would really
202 * confuse do_dev_queue_xmit() in dev.c! I have
203 * tried :-) DB
204 */
205 /* irttp_data_request already free the packet */
206 self->stats.tx_dropped++;
207 } else {
208 self->stats.tx_packets++;
209 self->stats.tx_bytes += skb->len;
210 }
211
212 return NETDEV_TX_OK;
213 }
214
215 /*
216 * Function irlan_eth_receive (handle, skb)
217 *
218 * This function gets the data that is received on the data channel
219 *
220 */
221 int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
222 {
223 struct irlan_cb *self = instance;
224
225 if (skb == NULL) {
226 ++self->stats.rx_dropped;
227 return 0;
228 }
229 if (skb->len < ETH_HLEN) {
230 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n",
231 __func__, skb->len);
232 ++self->stats.rx_dropped;
233 dev_kfree_skb(skb);
234 return 0;
235 }
236
237 /*
238 * Adopt this frame! Important to set all these fields since they
239 * might have been previously set by the low level IrDA network
240 * device driver
241 */
242 skb->protocol = eth_type_trans(skb, self->dev); /* Remove eth header */
243
244 self->stats.rx_packets++;
245 self->stats.rx_bytes += skb->len;
246
247 netif_rx(skb); /* Eat it! */
248
249 return 0;
250 }
251
252 /*
253 * Function irlan_eth_flow (status)
254 *
255 * Do flow control between IP/Ethernet and IrLAN/IrTTP. This is done by
256 * controlling the queue stop/start.
257 *
258 * The IrDA link layer has the advantage to have flow control, and
259 * IrTTP now properly handles that. Flow controlling the higher layers
260 * prevent us to drop Tx packets in here (up to 15% for a TCP socket,
261 * more for UDP socket).
262 * Also, this allow us to reduce the overall transmit queue, which means
263 * less latency in case of mixed traffic.
264 * Jean II
265 */
266 void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
267 {
268 struct irlan_cb *self;
269 struct net_device *dev;
270
271 self = (struct irlan_cb *) instance;
272
273 IRDA_ASSERT(self != NULL, return;);
274 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
275
276 dev = self->dev;
277
278 IRDA_ASSERT(dev != NULL, return;);
279
280 IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __func__,
281 flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START",
282 netif_running(dev));
283
284 switch (flow) {
285 case FLOW_STOP:
286 /* IrTTP is full, stop higher layers */
287 netif_stop_queue(dev);
288 break;
289 case FLOW_START:
290 default:
291 /* Tell upper layers that its time to transmit frames again */
292 /* Schedule network layer */
293 netif_wake_queue(dev);
294 break;
295 }
296 }
297
298 /*
299 * Function set_multicast_list (dev)
300 *
301 * Configure the filtering of the device
302 *
303 */
304 #define HW_MAX_ADDRS 4 /* Must query to get it! */
305 static void irlan_eth_set_multicast_list(struct net_device *dev)
306 {
307 struct irlan_cb *self = netdev_priv(dev);
308
309 IRDA_DEBUG(2, "%s()\n", __func__ );
310
311 /* Check if data channel has been connected yet */
312 if (self->client.state != IRLAN_DATA) {
313 IRDA_DEBUG(1, "%s(), delaying!\n", __func__ );
314 return;
315 }
316
317 if (dev->flags & IFF_PROMISC) {
318 /* Enable promiscuous mode */
319 IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
320 }
321 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
322 /* Disable promiscuous mode, use normal mode. */
323 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
324 /* hardware_set_filter(NULL); */
325
326 irlan_set_multicast_filter(self, TRUE);
327 }
328 else if (dev->mc_count) {
329 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
330 /* Walk the address list, and load the filter */
331 /* hardware_set_filter(dev->mc_list); */
332
333 irlan_set_multicast_filter(self, TRUE);
334 }
335 else {
336 IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__ );
337 irlan_set_multicast_filter(self, FALSE);
338 }
339
340 if (dev->flags & IFF_BROADCAST)
341 irlan_set_broadcast_filter(self, TRUE);
342 else
343 irlan_set_broadcast_filter(self, FALSE);
344 }
345
346 /*
347 * Function irlan_get_stats (dev)
348 *
349 * Get the current statistics for this device
350 *
351 */
352 static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev)
353 {
354 struct irlan_cb *self = netdev_priv(dev);
355
356 return &self->stats;
357 }