]>
Commit | Line | Data |
---|---|---|
f89efd52 MP |
1 | /* |
2 | * rionet - Ethernet driver over RapidIO messaging services | |
3 | * | |
4 | * Copyright 2005 MontaVista Software, Inc. | |
5 | * Matt Porter <mporter@kernel.crashing.org> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms of the GNU General Public License as published by the | |
9 | * Free Software Foundation; either version 2 of the License, or (at your | |
10 | * option) any later version. | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/rio.h> | |
18 | #include <linux/rio_drv.h> | |
5a0e3ad6 | 19 | #include <linux/slab.h> |
f89efd52 MP |
20 | #include <linux/rio_ids.h> |
21 | ||
22 | #include <linux/netdevice.h> | |
23 | #include <linux/etherdevice.h> | |
24 | #include <linux/skbuff.h> | |
25 | #include <linux/crc32.h> | |
26 | #include <linux/ethtool.h> | |
f41e2472 | 27 | #include <linux/reboot.h> |
f89efd52 MP |
28 | |
29 | #define DRV_NAME "rionet" | |
2fb717ec | 30 | #define DRV_VERSION "0.3" |
f89efd52 MP |
31 | #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>" |
32 | #define DRV_DESC "Ethernet over RapidIO" | |
33 | ||
34 | MODULE_AUTHOR(DRV_AUTHOR); | |
35 | MODULE_DESCRIPTION(DRV_DESC); | |
36 | MODULE_LICENSE("GPL"); | |
37 | ||
38 | #define RIONET_DEFAULT_MSGLEVEL \ | |
39 | (NETIF_MSG_DRV | \ | |
40 | NETIF_MSG_LINK | \ | |
41 | NETIF_MSG_RX_ERR | \ | |
42 | NETIF_MSG_TX_ERR) | |
43 | ||
44 | #define RIONET_DOORBELL_JOIN 0x1000 | |
45 | #define RIONET_DOORBELL_LEAVE 0x1001 | |
46 | ||
47 | #define RIONET_MAILBOX 0 | |
48 | ||
49 | #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE | |
50 | #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE | |
2fb717ec | 51 | #define RIONET_MAX_NETS 8 |
92444bb3 AJ |
52 | #define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE |
53 | #define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN) | |
f89efd52 MP |
54 | |
55 | struct rionet_private { | |
56 | struct rio_mport *mport; | |
57 | struct sk_buff *rx_skb[RIONET_RX_RING_SIZE]; | |
58 | struct sk_buff *tx_skb[RIONET_TX_RING_SIZE]; | |
f89efd52 MP |
59 | int rx_slot; |
60 | int tx_slot; | |
61 | int tx_cnt; | |
62 | int ack_slot; | |
63 | spinlock_t lock; | |
64 | spinlock_t tx_lock; | |
65 | u32 msg_enable; | |
34ed2ebb | 66 | bool open; |
f89efd52 MP |
67 | }; |
68 | ||
69 | struct rionet_peer { | |
70 | struct list_head node; | |
71 | struct rio_dev *rdev; | |
72 | struct resource *res; | |
73 | }; | |
74 | ||
2fb717ec AB |
75 | struct rionet_net { |
76 | struct net_device *ndev; | |
77 | struct list_head peers; | |
34ed2ebb | 78 | spinlock_t lock; /* net info access lock */ |
2fb717ec AB |
79 | struct rio_dev **active; |
80 | int nact; /* number of active peers */ | |
81 | }; | |
f89efd52 | 82 | |
2fb717ec | 83 | static struct rionet_net nets[RIONET_MAX_NETS]; |
f89efd52 | 84 | |
284fb68d AB |
85 | #define is_rionet_capable(src_ops, dst_ops) \ |
86 | ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ | |
87 | (dst_ops & RIO_DST_OPS_DATA_MSG) && \ | |
f89efd52 MP |
88 | (src_ops & RIO_SRC_OPS_DOORBELL) && \ |
89 | (dst_ops & RIO_DST_OPS_DOORBELL)) | |
90 | #define dev_rionet_capable(dev) \ | |
284fb68d | 91 | is_rionet_capable(dev->src_ops, dev->dst_ops) |
f89efd52 | 92 | |
e0c87bd9 AB |
93 | #define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4)) |
94 | #define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5)) | |
f89efd52 | 95 | |
f89efd52 MP |
96 | static int rionet_rx_clean(struct net_device *ndev) |
97 | { | |
98 | int i; | |
99 | int error = 0; | |
4cf1653a | 100 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
101 | void *data; |
102 | ||
103 | i = rnet->rx_slot; | |
104 | ||
105 | do { | |
106 | if (!rnet->rx_skb[i]) | |
107 | continue; | |
108 | ||
109 | if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX))) | |
110 | break; | |
111 | ||
112 | rnet->rx_skb[i]->data = data; | |
113 | skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); | |
f89efd52 MP |
114 | rnet->rx_skb[i]->protocol = |
115 | eth_type_trans(rnet->rx_skb[i], ndev); | |
116 | error = netif_rx(rnet->rx_skb[i]); | |
117 | ||
118 | if (error == NET_RX_DROP) { | |
09f75cd7 | 119 | ndev->stats.rx_dropped++; |
f89efd52 | 120 | } else { |
09f75cd7 JG |
121 | ndev->stats.rx_packets++; |
122 | ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE; | |
f89efd52 MP |
123 | } |
124 | ||
125 | } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot); | |
126 | ||
127 | return i; | |
128 | } | |
129 | ||
130 | static void rionet_rx_fill(struct net_device *ndev, int end) | |
131 | { | |
132 | int i; | |
4cf1653a | 133 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
134 | |
135 | i = rnet->rx_slot; | |
136 | do { | |
137 | rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE); | |
138 | ||
139 | if (!rnet->rx_skb[i]) | |
140 | break; | |
141 | ||
142 | rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX, | |
143 | rnet->rx_skb[i]->data); | |
144 | } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end); | |
145 | ||
146 | rnet->rx_slot = i; | |
147 | } | |
148 | ||
149 | static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev, | |
150 | struct rio_dev *rdev) | |
151 | { | |
4cf1653a | 152 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
153 | |
154 | rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len); | |
155 | rnet->tx_skb[rnet->tx_slot] = skb; | |
156 | ||
09f75cd7 JG |
157 | ndev->stats.tx_packets++; |
158 | ndev->stats.tx_bytes += skb->len; | |
f89efd52 MP |
159 | |
160 | if (++rnet->tx_cnt == RIONET_TX_RING_SIZE) | |
161 | netif_stop_queue(ndev); | |
162 | ||
163 | ++rnet->tx_slot; | |
164 | rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1); | |
165 | ||
166 | if (netif_msg_tx_queued(rnet)) | |
8df8a475 DM |
167 | printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME, |
168 | skb->len); | |
f89efd52 MP |
169 | |
170 | return 0; | |
171 | } | |
172 | ||
173 | static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |
174 | { | |
175 | int i; | |
4cf1653a | 176 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
177 | struct ethhdr *eth = (struct ethhdr *)skb->data; |
178 | u16 destid; | |
179 | unsigned long flags; | |
7c4a6106 | 180 | int add_num = 1; |
f89efd52 | 181 | |
a6086a89 | 182 | spin_lock_irqsave(&rnet->tx_lock, flags); |
f89efd52 | 183 | |
7c4a6106 | 184 | if (is_multicast_ether_addr(eth->h_dest)) |
2fb717ec | 185 | add_num = nets[rnet->mport->id].nact; |
7c4a6106 AB |
186 | |
187 | if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) { | |
f89efd52 MP |
188 | netif_stop_queue(ndev); |
189 | spin_unlock_irqrestore(&rnet->tx_lock, flags); | |
190 | printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", | |
191 | ndev->name); | |
192 | return NETDEV_TX_BUSY; | |
193 | } | |
194 | ||
abfc89c7 | 195 | if (is_multicast_ether_addr(eth->h_dest)) { |
7c4a6106 | 196 | int count = 0; |
2fb717ec | 197 | |
e0423236 ZW |
198 | for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size); |
199 | i++) | |
2fb717ec | 200 | if (nets[rnet->mport->id].active[i]) { |
f89efd52 | 201 | rionet_queue_tx_msg(skb, ndev, |
2fb717ec | 202 | nets[rnet->mport->id].active[i]); |
7c4a6106 AB |
203 | if (count) |
204 | atomic_inc(&skb->users); | |
205 | count++; | |
206 | } | |
f89efd52 MP |
207 | } else if (RIONET_MAC_MATCH(eth->h_dest)) { |
208 | destid = RIONET_GET_DESTID(eth->h_dest); | |
2fb717ec AB |
209 | if (nets[rnet->mport->id].active[destid]) |
210 | rionet_queue_tx_msg(skb, ndev, | |
211 | nets[rnet->mport->id].active[destid]); | |
e6161d64 AB |
212 | else { |
213 | /* | |
214 | * If the target device was removed from the list of | |
215 | * active peers but we still have TX packets targeting | |
216 | * it just report sending a packet to the target | |
217 | * (without actual packet transfer). | |
218 | */ | |
219 | dev_kfree_skb_any(skb); | |
220 | ndev->stats.tx_packets++; | |
221 | ndev->stats.tx_bytes += skb->len; | |
222 | } | |
f89efd52 MP |
223 | } |
224 | ||
225 | spin_unlock_irqrestore(&rnet->tx_lock, flags); | |
226 | ||
6ed10654 | 227 | return NETDEV_TX_OK; |
f89efd52 MP |
228 | } |
229 | ||
230 | static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid, | |
231 | u16 info) | |
232 | { | |
233 | struct net_device *ndev = dev_id; | |
4cf1653a | 234 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 | 235 | struct rionet_peer *peer; |
34ed2ebb | 236 | unsigned char netid = rnet->mport->id; |
f89efd52 MP |
237 | |
238 | if (netif_msg_intr(rnet)) | |
239 | printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x", | |
240 | DRV_NAME, sid, tid, info); | |
241 | if (info == RIONET_DOORBELL_JOIN) { | |
34ed2ebb AB |
242 | if (!nets[netid].active[sid]) { |
243 | spin_lock(&nets[netid].lock); | |
244 | list_for_each_entry(peer, &nets[netid].peers, node) { | |
7c4a6106 | 245 | if (peer->rdev->destid == sid) { |
34ed2ebb AB |
246 | nets[netid].active[sid] = peer->rdev; |
247 | nets[netid].nact++; | |
7c4a6106 | 248 | } |
f89efd52 | 249 | } |
34ed2ebb AB |
250 | spin_unlock(&nets[netid].lock); |
251 | ||
f89efd52 MP |
252 | rio_mport_send_doorbell(mport, sid, |
253 | RIONET_DOORBELL_JOIN); | |
254 | } | |
255 | } else if (info == RIONET_DOORBELL_LEAVE) { | |
34ed2ebb AB |
256 | spin_lock(&nets[netid].lock); |
257 | if (nets[netid].active[sid]) { | |
258 | nets[netid].active[sid] = NULL; | |
259 | nets[netid].nact--; | |
260 | } | |
261 | spin_unlock(&nets[netid].lock); | |
f89efd52 MP |
262 | } else { |
263 | if (netif_msg_intr(rnet)) | |
264 | printk(KERN_WARNING "%s: unhandled doorbell\n", | |
265 | DRV_NAME); | |
266 | } | |
267 | } | |
268 | ||
269 | static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) | |
270 | { | |
271 | int n; | |
272 | struct net_device *ndev = dev_id; | |
4cf1653a | 273 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
274 | |
275 | if (netif_msg_intr(rnet)) | |
276 | printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n", | |
277 | DRV_NAME, mbox, slot); | |
278 | ||
279 | spin_lock(&rnet->lock); | |
280 | if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot) | |
281 | rionet_rx_fill(ndev, n); | |
282 | spin_unlock(&rnet->lock); | |
283 | } | |
284 | ||
285 | static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) | |
286 | { | |
287 | struct net_device *ndev = dev_id; | |
4cf1653a | 288 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 | 289 | |
36915976 | 290 | spin_lock(&rnet->tx_lock); |
f89efd52 MP |
291 | |
292 | if (netif_msg_intr(rnet)) | |
293 | printk(KERN_INFO | |
294 | "%s: outbound message event, mbox %d slot %d\n", | |
295 | DRV_NAME, mbox, slot); | |
296 | ||
297 | while (rnet->tx_cnt && (rnet->ack_slot != slot)) { | |
298 | /* dma unmap single */ | |
299 | dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]); | |
300 | rnet->tx_skb[rnet->ack_slot] = NULL; | |
301 | ++rnet->ack_slot; | |
302 | rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1); | |
303 | rnet->tx_cnt--; | |
304 | } | |
305 | ||
306 | if (rnet->tx_cnt < RIONET_TX_RING_SIZE) | |
307 | netif_wake_queue(ndev); | |
308 | ||
36915976 | 309 | spin_unlock(&rnet->tx_lock); |
f89efd52 MP |
310 | } |
311 | ||
312 | static int rionet_open(struct net_device *ndev) | |
313 | { | |
314 | int i, rc = 0; | |
34ed2ebb | 315 | struct rionet_peer *peer; |
4cf1653a | 316 | struct rionet_private *rnet = netdev_priv(ndev); |
34ed2ebb AB |
317 | unsigned char netid = rnet->mport->id; |
318 | unsigned long flags; | |
f89efd52 MP |
319 | |
320 | if (netif_msg_ifup(rnet)) | |
321 | printk(KERN_INFO "%s: open\n", DRV_NAME); | |
322 | ||
323 | if ((rc = rio_request_inb_dbell(rnet->mport, | |
324 | (void *)ndev, | |
325 | RIONET_DOORBELL_JOIN, | |
326 | RIONET_DOORBELL_LEAVE, | |
327 | rionet_dbell_event)) < 0) | |
328 | goto out; | |
329 | ||
330 | if ((rc = rio_request_inb_mbox(rnet->mport, | |
331 | (void *)ndev, | |
332 | RIONET_MAILBOX, | |
333 | RIONET_RX_RING_SIZE, | |
334 | rionet_inb_msg_event)) < 0) | |
335 | goto out; | |
336 | ||
337 | if ((rc = rio_request_outb_mbox(rnet->mport, | |
338 | (void *)ndev, | |
339 | RIONET_MAILBOX, | |
340 | RIONET_TX_RING_SIZE, | |
341 | rionet_outb_msg_event)) < 0) | |
342 | goto out; | |
343 | ||
344 | /* Initialize inbound message ring */ | |
345 | for (i = 0; i < RIONET_RX_RING_SIZE; i++) | |
346 | rnet->rx_skb[i] = NULL; | |
347 | rnet->rx_slot = 0; | |
348 | rionet_rx_fill(ndev, 0); | |
349 | ||
350 | rnet->tx_slot = 0; | |
351 | rnet->tx_cnt = 0; | |
352 | rnet->ack_slot = 0; | |
353 | ||
354 | netif_carrier_on(ndev); | |
355 | netif_start_queue(ndev); | |
356 | ||
34ed2ebb AB |
357 | spin_lock_irqsave(&nets[netid].lock, flags); |
358 | list_for_each_entry(peer, &nets[netid].peers, node) { | |
284fb68d AB |
359 | /* Send a join message */ |
360 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); | |
f89efd52 | 361 | } |
34ed2ebb AB |
362 | spin_unlock_irqrestore(&nets[netid].lock, flags); |
363 | rnet->open = true; | |
f89efd52 MP |
364 | |
365 | out: | |
366 | return rc; | |
367 | } | |
368 | ||
369 | static int rionet_close(struct net_device *ndev) | |
370 | { | |
4cf1653a | 371 | struct rionet_private *rnet = netdev_priv(ndev); |
34ed2ebb AB |
372 | struct rionet_peer *peer; |
373 | unsigned char netid = rnet->mport->id; | |
374 | unsigned long flags; | |
f89efd52 MP |
375 | int i; |
376 | ||
377 | if (netif_msg_ifup(rnet)) | |
2fb717ec | 378 | printk(KERN_INFO "%s: close %s\n", DRV_NAME, ndev->name); |
f89efd52 MP |
379 | |
380 | netif_stop_queue(ndev); | |
381 | netif_carrier_off(ndev); | |
34ed2ebb | 382 | rnet->open = false; |
f89efd52 MP |
383 | |
384 | for (i = 0; i < RIONET_RX_RING_SIZE; i++) | |
aaff1e19 | 385 | kfree_skb(rnet->rx_skb[i]); |
f89efd52 | 386 | |
34ed2ebb AB |
387 | spin_lock_irqsave(&nets[netid].lock, flags); |
388 | list_for_each_entry(peer, &nets[netid].peers, node) { | |
389 | if (nets[netid].active[peer->rdev->destid]) { | |
f89efd52 | 390 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE); |
34ed2ebb | 391 | nets[netid].active[peer->rdev->destid] = NULL; |
f89efd52 | 392 | } |
34ed2ebb AB |
393 | if (peer->res) |
394 | rio_release_outb_dbell(peer->rdev, peer->res); | |
f89efd52 | 395 | } |
34ed2ebb | 396 | spin_unlock_irqrestore(&nets[netid].lock, flags); |
f89efd52 MP |
397 | |
398 | rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN, | |
399 | RIONET_DOORBELL_LEAVE); | |
400 | rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX); | |
401 | rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX); | |
402 | ||
403 | return 0; | |
404 | } | |
405 | ||
71db87ba | 406 | static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif) |
f89efd52 | 407 | { |
e6161d64 | 408 | struct rio_dev *rdev = to_rio_dev(dev); |
2fb717ec | 409 | unsigned char netid = rdev->net->hport->id; |
34ed2ebb AB |
410 | struct rionet_peer *peer; |
411 | int state, found = 0; | |
412 | unsigned long flags; | |
f89efd52 | 413 | |
34ed2ebb AB |
414 | if (!dev_rionet_capable(rdev)) |
415 | return; | |
416 | ||
417 | spin_lock_irqsave(&nets[netid].lock, flags); | |
418 | list_for_each_entry(peer, &nets[netid].peers, node) { | |
419 | if (peer->rdev == rdev) { | |
420 | list_del(&peer->node); | |
421 | if (nets[netid].active[rdev->destid]) { | |
422 | state = atomic_read(&rdev->state); | |
423 | if (state != RIO_DEVICE_GONE && | |
424 | state != RIO_DEVICE_INITIALIZING) { | |
425 | rio_send_doorbell(rdev, | |
426 | RIONET_DOORBELL_LEAVE); | |
e6161d64 | 427 | } |
34ed2ebb AB |
428 | nets[netid].active[rdev->destid] = NULL; |
429 | nets[netid].nact--; | |
e6161d64 | 430 | } |
34ed2ebb AB |
431 | found = 1; |
432 | break; | |
e6161d64 | 433 | } |
f89efd52 | 434 | } |
34ed2ebb AB |
435 | spin_unlock_irqrestore(&nets[netid].lock, flags); |
436 | ||
437 | if (found) { | |
438 | if (peer->res) | |
439 | rio_release_outb_dbell(rdev, peer->res); | |
440 | kfree(peer); | |
441 | } | |
f89efd52 MP |
442 | } |
443 | ||
444 | static void rionet_get_drvinfo(struct net_device *ndev, | |
445 | struct ethtool_drvinfo *info) | |
446 | { | |
4cf1653a | 447 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 | 448 | |
7826d43f JP |
449 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
450 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); | |
451 | strlcpy(info->fw_version, "n/a", sizeof(info->fw_version)); | |
452 | strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info)); | |
f89efd52 MP |
453 | } |
454 | ||
455 | static u32 rionet_get_msglevel(struct net_device *ndev) | |
456 | { | |
4cf1653a | 457 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
458 | |
459 | return rnet->msg_enable; | |
460 | } | |
461 | ||
462 | static void rionet_set_msglevel(struct net_device *ndev, u32 value) | |
463 | { | |
4cf1653a | 464 | struct rionet_private *rnet = netdev_priv(ndev); |
f89efd52 MP |
465 | |
466 | rnet->msg_enable = value; | |
467 | } | |
468 | ||
92444bb3 AJ |
469 | static int rionet_change_mtu(struct net_device *ndev, int new_mtu) |
470 | { | |
471 | if ((new_mtu < 68) || (new_mtu > RIONET_MAX_MTU)) { | |
472 | printk(KERN_ERR "%s: Invalid MTU size %d\n", | |
473 | ndev->name, new_mtu); | |
474 | return -EINVAL; | |
475 | } | |
476 | ndev->mtu = new_mtu; | |
477 | return 0; | |
478 | } | |
479 | ||
7282d491 | 480 | static const struct ethtool_ops rionet_ethtool_ops = { |
f89efd52 MP |
481 | .get_drvinfo = rionet_get_drvinfo, |
482 | .get_msglevel = rionet_get_msglevel, | |
483 | .set_msglevel = rionet_set_msglevel, | |
484 | .get_link = ethtool_op_get_link, | |
485 | }; | |
486 | ||
a33a2bb3 AB |
487 | static const struct net_device_ops rionet_netdev_ops = { |
488 | .ndo_open = rionet_open, | |
489 | .ndo_stop = rionet_close, | |
490 | .ndo_start_xmit = rionet_start_xmit, | |
92444bb3 | 491 | .ndo_change_mtu = rionet_change_mtu, |
a33a2bb3 AB |
492 | .ndo_validate_addr = eth_validate_addr, |
493 | .ndo_set_mac_address = eth_mac_addr, | |
494 | }; | |
495 | ||
55caa924 | 496 | static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) |
f89efd52 MP |
497 | { |
498 | int rc = 0; | |
f89efd52 MP |
499 | struct rionet_private *rnet; |
500 | u16 device_id; | |
acc65632 AM |
501 | const size_t rionet_active_bytes = sizeof(void *) * |
502 | RIO_MAX_ROUTE_ENTRIES(mport->sys_size); | |
f89efd52 | 503 | |
2fb717ec AB |
504 | nets[mport->id].active = (struct rio_dev **)__get_free_pages(GFP_KERNEL, |
505 | get_order(rionet_active_bytes)); | |
506 | if (!nets[mport->id].active) { | |
e0423236 ZW |
507 | rc = -ENOMEM; |
508 | goto out; | |
509 | } | |
2fb717ec | 510 | memset((void *)nets[mport->id].active, 0, rionet_active_bytes); |
e0423236 | 511 | |
f89efd52 | 512 | /* Set up private area */ |
4cf1653a | 513 | rnet = netdev_priv(ndev); |
f89efd52 | 514 | rnet->mport = mport; |
34ed2ebb | 515 | rnet->open = false; |
f89efd52 MP |
516 | |
517 | /* Set the default MAC address */ | |
518 | device_id = rio_local_get_device_id(mport); | |
519 | ndev->dev_addr[0] = 0x00; | |
520 | ndev->dev_addr[1] = 0x01; | |
521 | ndev->dev_addr[2] = 0x00; | |
522 | ndev->dev_addr[3] = 0x01; | |
523 | ndev->dev_addr[4] = device_id >> 8; | |
524 | ndev->dev_addr[5] = device_id & 0xff; | |
525 | ||
a33a2bb3 | 526 | ndev->netdev_ops = &rionet_netdev_ops; |
92444bb3 | 527 | ndev->mtu = RIONET_MAX_MTU; |
f89efd52 | 528 | ndev->features = NETIF_F_LLTX; |
2aaf308b | 529 | SET_NETDEV_DEV(ndev, &mport->dev); |
7ad24ea4 | 530 | ndev->ethtool_ops = &rionet_ethtool_ops; |
f89efd52 | 531 | |
f89efd52 MP |
532 | spin_lock_init(&rnet->lock); |
533 | spin_lock_init(&rnet->tx_lock); | |
534 | ||
535 | rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL; | |
536 | ||
537 | rc = register_netdev(ndev); | |
34ed2ebb AB |
538 | if (rc != 0) { |
539 | free_pages((unsigned long)nets[mport->id].active, | |
540 | get_order(rionet_active_bytes)); | |
f89efd52 | 541 | goto out; |
34ed2ebb | 542 | } |
f89efd52 | 543 | |
2fb717ec | 544 | printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n", |
f89efd52 MP |
545 | ndev->name, |
546 | DRV_NAME, | |
547 | DRV_DESC, | |
548 | DRV_VERSION, | |
2fb717ec AB |
549 | ndev->dev_addr, |
550 | mport->name); | |
f89efd52 MP |
551 | |
552 | out: | |
553 | return rc; | |
554 | } | |
555 | ||
e6161d64 | 556 | static int rionet_add_dev(struct device *dev, struct subsys_interface *sif) |
f89efd52 MP |
557 | { |
558 | int rc = -ENODEV; | |
284fb68d | 559 | u32 lsrc_ops, ldst_ops; |
f89efd52 | 560 | struct rionet_peer *peer; |
55caa924 | 561 | struct net_device *ndev = NULL; |
e6161d64 | 562 | struct rio_dev *rdev = to_rio_dev(dev); |
2fb717ec | 563 | unsigned char netid = rdev->net->hport->id; |
f89efd52 | 564 | |
2fb717ec AB |
565 | if (netid >= RIONET_MAX_NETS) |
566 | return rc; | |
f89efd52 MP |
567 | |
568 | /* | |
e6161d64 AB |
569 | * If first time through this net, make sure local device is rionet |
570 | * capable and setup netdev (this step will be skipped in later probes | |
571 | * on the same net). | |
f89efd52 | 572 | */ |
34ed2ebb | 573 | if (!nets[netid].ndev) { |
f89efd52 MP |
574 | rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, |
575 | &lsrc_ops); | |
576 | rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, | |
577 | &ldst_ops); | |
284fb68d | 578 | if (!is_rionet_capable(lsrc_ops, ldst_ops)) { |
f89efd52 | 579 | printk(KERN_ERR |
2fb717ec AB |
580 | "%s: local device %s is not network capable\n", |
581 | DRV_NAME, rdev->net->hport->name); | |
f89efd52 MP |
582 | goto out; |
583 | } | |
584 | ||
2fb717ec AB |
585 | /* Allocate our net_device structure */ |
586 | ndev = alloc_etherdev(sizeof(struct rionet_private)); | |
587 | if (ndev == NULL) { | |
588 | rc = -ENOMEM; | |
589 | goto out; | |
590 | } | |
34ed2ebb | 591 | |
55caa924 | 592 | rc = rionet_setup_netdev(rdev->net->hport, ndev); |
e6161d64 AB |
593 | if (rc) { |
594 | printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n", | |
595 | DRV_NAME, rc); | |
34ed2ebb | 596 | free_netdev(ndev); |
e6161d64 AB |
597 | goto out; |
598 | } | |
599 | ||
2fb717ec | 600 | INIT_LIST_HEAD(&nets[netid].peers); |
34ed2ebb | 601 | spin_lock_init(&nets[netid].lock); |
2fb717ec | 602 | nets[netid].nact = 0; |
34ed2ebb AB |
603 | nets[netid].ndev = ndev; |
604 | } | |
f89efd52 MP |
605 | |
606 | /* | |
607 | * If the remote device has mailbox/doorbell capabilities, | |
608 | * add it to the peer list. | |
609 | */ | |
610 | if (dev_rionet_capable(rdev)) { | |
34ed2ebb AB |
611 | struct rionet_private *rnet; |
612 | unsigned long flags; | |
613 | ||
614 | rnet = netdev_priv(nets[netid].ndev); | |
615 | ||
616 | peer = kzalloc(sizeof(*peer), GFP_KERNEL); | |
617 | if (!peer) { | |
f89efd52 MP |
618 | rc = -ENOMEM; |
619 | goto out; | |
620 | } | |
621 | peer->rdev = rdev; | |
34ed2ebb AB |
622 | peer->res = rio_request_outb_dbell(peer->rdev, |
623 | RIONET_DOORBELL_JOIN, | |
624 | RIONET_DOORBELL_LEAVE); | |
625 | if (!peer->res) { | |
626 | pr_err("%s: error requesting doorbells\n", DRV_NAME); | |
627 | kfree(peer); | |
628 | rc = -ENOMEM; | |
629 | goto out; | |
630 | } | |
631 | ||
632 | spin_lock_irqsave(&nets[netid].lock, flags); | |
2fb717ec | 633 | list_add_tail(&peer->node, &nets[netid].peers); |
34ed2ebb AB |
634 | spin_unlock_irqrestore(&nets[netid].lock, flags); |
635 | pr_debug("%s: %s add peer %s\n", | |
636 | DRV_NAME, __func__, rio_name(rdev)); | |
637 | ||
638 | /* If netdev is already opened, send join request to new peer */ | |
639 | if (rnet->open) | |
640 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); | |
f89efd52 MP |
641 | } |
642 | ||
e6161d64 AB |
643 | return 0; |
644 | out: | |
f89efd52 MP |
645 | return rc; |
646 | } | |
647 | ||
f41e2472 AB |
648 | static int rionet_shutdown(struct notifier_block *nb, unsigned long code, |
649 | void *unused) | |
650 | { | |
34ed2ebb AB |
651 | struct rionet_peer *peer; |
652 | unsigned long flags; | |
f41e2472 AB |
653 | int i; |
654 | ||
655 | pr_debug("%s: %s\n", DRV_NAME, __func__); | |
656 | ||
657 | for (i = 0; i < RIONET_MAX_NETS; i++) { | |
658 | if (!nets[i].ndev) | |
659 | continue; | |
660 | ||
34ed2ebb AB |
661 | spin_lock_irqsave(&nets[i].lock, flags); |
662 | list_for_each_entry(peer, &nets[i].peers, node) { | |
f41e2472 AB |
663 | if (nets[i].active[peer->rdev->destid]) { |
664 | rio_send_doorbell(peer->rdev, | |
665 | RIONET_DOORBELL_LEAVE); | |
666 | nets[i].active[peer->rdev->destid] = NULL; | |
667 | } | |
668 | } | |
34ed2ebb | 669 | spin_unlock_irqrestore(&nets[i].lock, flags); |
f41e2472 AB |
670 | } |
671 | ||
672 | return NOTIFY_DONE; | |
673 | } | |
674 | ||
b7dfca8b AB |
675 | static void rionet_remove_mport(struct device *dev, |
676 | struct class_interface *class_intf) | |
677 | { | |
678 | struct rio_mport *mport = to_rio_mport(dev); | |
679 | struct net_device *ndev; | |
680 | int id = mport->id; | |
681 | ||
682 | pr_debug("%s %s\n", __func__, mport->name); | |
683 | ||
684 | WARN(nets[id].nact, "%s called when connected to %d peers\n", | |
685 | __func__, nets[id].nact); | |
686 | WARN(!nets[id].ndev, "%s called for mport without NDEV\n", | |
687 | __func__); | |
688 | ||
689 | if (nets[id].ndev) { | |
690 | ndev = nets[id].ndev; | |
691 | netif_stop_queue(ndev); | |
692 | unregister_netdev(ndev); | |
693 | ||
694 | free_pages((unsigned long)nets[id].active, | |
695 | get_order(sizeof(void *) * | |
696 | RIO_MAX_ROUTE_ENTRIES(mport->sys_size))); | |
697 | nets[id].active = NULL; | |
698 | free_netdev(ndev); | |
699 | nets[id].ndev = NULL; | |
700 | } | |
701 | } | |
702 | ||
e6161d64 | 703 | #ifdef MODULE |
f89efd52 | 704 | static struct rio_device_id rionet_id_table[] = { |
e6161d64 AB |
705 | {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}, |
706 | { 0, } /* terminate list */ | |
f89efd52 MP |
707 | }; |
708 | ||
e6161d64 AB |
709 | MODULE_DEVICE_TABLE(rapidio, rionet_id_table); |
710 | #endif | |
711 | ||
712 | static struct subsys_interface rionet_interface = { | |
713 | .name = "rionet", | |
714 | .subsys = &rio_bus_type, | |
715 | .add_dev = rionet_add_dev, | |
716 | .remove_dev = rionet_remove_dev, | |
f89efd52 MP |
717 | }; |
718 | ||
f41e2472 AB |
719 | static struct notifier_block rionet_notifier = { |
720 | .notifier_call = rionet_shutdown, | |
721 | }; | |
722 | ||
b7dfca8b AB |
723 | /* the rio_mport_interface is used to handle local mport devices */ |
724 | static struct class_interface rio_mport_interface __refdata = { | |
725 | .class = &rio_mport_class, | |
726 | .add_dev = NULL, | |
727 | .remove_dev = rionet_remove_mport, | |
728 | }; | |
729 | ||
f89efd52 MP |
730 | static int __init rionet_init(void) |
731 | { | |
f41e2472 AB |
732 | int ret; |
733 | ||
734 | ret = register_reboot_notifier(&rionet_notifier); | |
735 | if (ret) { | |
736 | pr_err("%s: failed to register reboot notifier (err=%d)\n", | |
737 | DRV_NAME, ret); | |
738 | return ret; | |
739 | } | |
b7dfca8b AB |
740 | |
741 | ret = class_interface_register(&rio_mport_interface); | |
742 | if (ret) { | |
743 | pr_err("%s: class_interface_register error: %d\n", | |
744 | DRV_NAME, ret); | |
745 | return ret; | |
746 | } | |
747 | ||
e6161d64 | 748 | return subsys_interface_register(&rionet_interface); |
f89efd52 MP |
749 | } |
750 | ||
751 | static void __exit rionet_exit(void) | |
752 | { | |
f41e2472 | 753 | unregister_reboot_notifier(&rionet_notifier); |
e6161d64 | 754 | subsys_interface_unregister(&rionet_interface); |
b7dfca8b | 755 | class_interface_unregister(&rio_mport_interface); |
f89efd52 MP |
756 | } |
757 | ||
2f809985 | 758 | late_initcall(rionet_init); |
f89efd52 | 759 | module_exit(rionet_exit); |