]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/ezchip/nps_enet.c
drivers: add explicit interrupt.h includes
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / ezchip / nps_enet.c
1 /*
2 * Copyright(c) 2015 EZchip Technologies.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 */
16
17 #include <linux/module.h>
18 #include <linux/etherdevice.h>
19 #include <linux/interrupt.h>
20 #include <linux/of_address.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_net.h>
23 #include <linux/of_platform.h>
24 #include "nps_enet.h"
25
26 #define DRV_NAME "nps_mgt_enet"
27
28 static inline bool nps_enet_is_tx_pending(struct nps_enet_priv *priv)
29 {
30 u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
31 u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
32
33 return (!tx_ctrl_ct && priv->tx_skb);
34 }
35
36 static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len)
37 {
38 struct nps_enet_priv *priv = netdev_priv(ndev);
39 u32 i, len = DIV_ROUND_UP(frame_len, sizeof(u32));
40
41 /* Empty Rx FIFO buffer by reading all words */
42 for (i = 0; i < len; i++)
43 nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
44 }
45
46 static void nps_enet_read_rx_fifo(struct net_device *ndev,
47 unsigned char *dst, u32 length)
48 {
49 struct nps_enet_priv *priv = netdev_priv(ndev);
50 s32 i, last = length & (sizeof(u32) - 1);
51 u32 *reg = (u32 *)dst, len = length / sizeof(u32);
52 bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32));
53
54 /* In case dst is not aligned we need an intermediate buffer */
55 if (dst_is_aligned) {
56 ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len);
57 reg += len;
58 } else { /* !dst_is_aligned */
59 for (i = 0; i < len; i++, reg++) {
60 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
61
62 put_unaligned_be32(buf, reg);
63 }
64 }
65 /* copy last bytes (if any) */
66 if (last) {
67 u32 buf;
68
69 ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1);
70 memcpy((u8 *)reg, &buf, last);
71 }
72 }
73
74 static u32 nps_enet_rx_handler(struct net_device *ndev)
75 {
76 u32 frame_len, err = 0;
77 u32 work_done = 0;
78 struct nps_enet_priv *priv = netdev_priv(ndev);
79 struct sk_buff *skb;
80 u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
81 u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
82 u32 rx_ctrl_er = (rx_ctrl_value & RX_CTL_ER_MASK) >> RX_CTL_ER_SHIFT;
83 u32 rx_ctrl_crc = (rx_ctrl_value & RX_CTL_CRC_MASK) >> RX_CTL_CRC_SHIFT;
84
85 frame_len = (rx_ctrl_value & RX_CTL_NR_MASK) >> RX_CTL_NR_SHIFT;
86
87 /* Check if we got RX */
88 if (!rx_ctrl_cr)
89 return work_done;
90
91 /* If we got here there is a work for us */
92 work_done++;
93
94 /* Check Rx error */
95 if (rx_ctrl_er) {
96 ndev->stats.rx_errors++;
97 err = 1;
98 }
99
100 /* Check Rx CRC error */
101 if (rx_ctrl_crc) {
102 ndev->stats.rx_crc_errors++;
103 ndev->stats.rx_dropped++;
104 err = 1;
105 }
106
107 /* Check Frame length Min 64b */
108 if (unlikely(frame_len < ETH_ZLEN)) {
109 ndev->stats.rx_length_errors++;
110 ndev->stats.rx_dropped++;
111 err = 1;
112 }
113
114 if (err)
115 goto rx_irq_clean;
116
117 /* Skb allocation */
118 skb = netdev_alloc_skb_ip_align(ndev, frame_len);
119 if (unlikely(!skb)) {
120 ndev->stats.rx_errors++;
121 ndev->stats.rx_dropped++;
122 goto rx_irq_clean;
123 }
124
125 /* Copy frame from Rx fifo into the skb */
126 nps_enet_read_rx_fifo(ndev, skb->data, frame_len);
127
128 skb_put(skb, frame_len);
129 skb->protocol = eth_type_trans(skb, ndev);
130 skb->ip_summed = CHECKSUM_UNNECESSARY;
131
132 ndev->stats.rx_packets++;
133 ndev->stats.rx_bytes += frame_len;
134 netif_receive_skb(skb);
135
136 goto rx_irq_frame_done;
137
138 rx_irq_clean:
139 /* Clean Rx fifo */
140 nps_enet_clean_rx_fifo(ndev, frame_len);
141
142 rx_irq_frame_done:
143 /* Ack Rx ctrl register */
144 nps_enet_reg_set(priv, NPS_ENET_REG_RX_CTL, 0);
145
146 return work_done;
147 }
148
149 static void nps_enet_tx_handler(struct net_device *ndev)
150 {
151 struct nps_enet_priv *priv = netdev_priv(ndev);
152 u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
153 u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT;
154 u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT;
155
156 /* Check if we got TX */
157 if (!nps_enet_is_tx_pending(priv))
158 return;
159
160 /* Ack Tx ctrl register */
161 nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0);
162
163 /* Check Tx transmit error */
164 if (unlikely(tx_ctrl_et)) {
165 ndev->stats.tx_errors++;
166 } else {
167 ndev->stats.tx_packets++;
168 ndev->stats.tx_bytes += tx_ctrl_nt;
169 }
170
171 dev_kfree_skb(priv->tx_skb);
172 priv->tx_skb = NULL;
173
174 if (netif_queue_stopped(ndev))
175 netif_wake_queue(ndev);
176 }
177
178 /**
179 * nps_enet_poll - NAPI poll handler.
180 * @napi: Pointer to napi_struct structure.
181 * @budget: How many frames to process on one call.
182 *
183 * returns: Number of processed frames
184 */
185 static int nps_enet_poll(struct napi_struct *napi, int budget)
186 {
187 struct net_device *ndev = napi->dev;
188 struct nps_enet_priv *priv = netdev_priv(ndev);
189 u32 work_done;
190
191 nps_enet_tx_handler(ndev);
192 work_done = nps_enet_rx_handler(ndev);
193 if (work_done < budget) {
194 u32 buf_int_enable_value = 0;
195
196 napi_complete_done(napi, work_done);
197
198 /* set tx_done and rx_rdy bits */
199 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
200 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
201
202 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
203 buf_int_enable_value);
204
205 /* in case we will get a tx interrupt while interrupts
206 * are masked, we will lose it since the tx is edge interrupt.
207 * specifically, while executing the code section above,
208 * between nps_enet_tx_handler and the interrupts enable, all
209 * tx requests will be stuck until we will get an rx interrupt.
210 * the two code lines below will solve this situation by
211 * re-adding ourselves to the poll list.
212 */
213 if (nps_enet_is_tx_pending(priv)) {
214 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
215 napi_reschedule(napi);
216 }
217 }
218
219 return work_done;
220 }
221
222 /**
223 * nps_enet_irq_handler - Global interrupt handler for ENET.
224 * @irq: irq number.
225 * @dev_instance: device instance.
226 *
227 * returns: IRQ_HANDLED for all cases.
228 *
229 * EZchip ENET has 2 interrupt causes, and depending on bits raised in
230 * CTRL registers we may tell what is a reason for interrupt to fire up.
231 * We got one for RX and the other for TX (completion).
232 */
233 static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
234 {
235 struct net_device *ndev = dev_instance;
236 struct nps_enet_priv *priv = netdev_priv(ndev);
237 u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
238 u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
239
240 if (nps_enet_is_tx_pending(priv) || rx_ctrl_cr)
241 if (likely(napi_schedule_prep(&priv->napi))) {
242 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
243 __napi_schedule(&priv->napi);
244 }
245
246 return IRQ_HANDLED;
247 }
248
249 static void nps_enet_set_hw_mac_address(struct net_device *ndev)
250 {
251 struct nps_enet_priv *priv = netdev_priv(ndev);
252 u32 ge_mac_cfg_1_value = 0;
253 u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value;
254
255 /* set MAC address in HW */
256 ge_mac_cfg_1_value |= ndev->dev_addr[0] << CFG_1_OCTET_0_SHIFT;
257 ge_mac_cfg_1_value |= ndev->dev_addr[1] << CFG_1_OCTET_1_SHIFT;
258 ge_mac_cfg_1_value |= ndev->dev_addr[2] << CFG_1_OCTET_2_SHIFT;
259 ge_mac_cfg_1_value |= ndev->dev_addr[3] << CFG_1_OCTET_3_SHIFT;
260 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_4_MASK)
261 | ndev->dev_addr[4] << CFG_2_OCTET_4_SHIFT;
262 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_5_MASK)
263 | ndev->dev_addr[5] << CFG_2_OCTET_5_SHIFT;
264
265 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_1,
266 ge_mac_cfg_1_value);
267
268 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
269 *ge_mac_cfg_2_value);
270 }
271
272 /**
273 * nps_enet_hw_reset - Reset the network device.
274 * @ndev: Pointer to the network device.
275 *
276 * This function reset the PCS and TX fifo.
277 * The programming model is to set the relevant reset bits
278 * wait for some time for this to propagate and then unset
279 * the reset bits. This way we ensure that reset procedure
280 * is done successfully by device.
281 */
282 static void nps_enet_hw_reset(struct net_device *ndev)
283 {
284 struct nps_enet_priv *priv = netdev_priv(ndev);
285 u32 ge_rst_value = 0, phase_fifo_ctl_value = 0;
286
287 /* Pcs reset sequence*/
288 ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
289 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
290 usleep_range(10, 20);
291 ge_rst_value = 0;
292 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
293
294 /* Tx fifo reset sequence */
295 phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_RST_SHIFT;
296 phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_INIT_SHIFT;
297 nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
298 phase_fifo_ctl_value);
299 usleep_range(10, 20);
300 phase_fifo_ctl_value = 0;
301 nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
302 phase_fifo_ctl_value);
303 }
304
305 static void nps_enet_hw_enable_control(struct net_device *ndev)
306 {
307 struct nps_enet_priv *priv = netdev_priv(ndev);
308 u32 ge_mac_cfg_0_value = 0, buf_int_enable_value = 0;
309 u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value;
310 u32 *ge_mac_cfg_3_value = &priv->ge_mac_cfg_3_value;
311 s32 max_frame_length;
312
313 /* Enable Rx and Tx statistics */
314 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_STAT_EN_MASK)
315 | NPS_ENET_GE_MAC_CFG_2_STAT_EN << CFG_2_STAT_EN_SHIFT;
316
317 /* Discard packets with different MAC address */
318 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
319 | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT;
320
321 /* Discard multicast packets */
322 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
323 | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT;
324
325 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
326 *ge_mac_cfg_2_value);
327
328 /* Discard Packets bigger than max frame length */
329 max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
330 if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
331 *ge_mac_cfg_3_value =
332 (*ge_mac_cfg_3_value & ~CFG_3_MAX_LEN_MASK)
333 | max_frame_length << CFG_3_MAX_LEN_SHIFT;
334 }
335
336 /* Enable interrupts */
337 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
338 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
339 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
340 buf_int_enable_value);
341
342 /* Write device MAC address to HW */
343 nps_enet_set_hw_mac_address(ndev);
344
345 /* Rx and Tx HW features */
346 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_PAD_EN_SHIFT;
347 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_CRC_EN_SHIFT;
348 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_CRC_STRIP_SHIFT;
349
350 /* IFG configuration */
351 ge_mac_cfg_0_value |=
352 NPS_ENET_GE_MAC_CFG_0_RX_IFG << CFG_0_RX_IFG_SHIFT;
353 ge_mac_cfg_0_value |=
354 NPS_ENET_GE_MAC_CFG_0_TX_IFG << CFG_0_TX_IFG_SHIFT;
355
356 /* preamble configuration */
357 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_PR_CHECK_EN_SHIFT;
358 ge_mac_cfg_0_value |=
359 NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN << CFG_0_TX_PR_LEN_SHIFT;
360
361 /* enable flow control frames */
362 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_FC_EN_SHIFT;
363 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_FC_EN_SHIFT;
364 ge_mac_cfg_0_value |=
365 NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR << CFG_0_TX_FC_RETR_SHIFT;
366 *ge_mac_cfg_3_value = (*ge_mac_cfg_3_value & ~CFG_3_CF_DROP_MASK)
367 | NPS_ENET_ENABLE << CFG_3_CF_DROP_SHIFT;
368
369 /* Enable Rx and Tx */
370 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_EN_SHIFT;
371 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_EN_SHIFT;
372
373 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
374 *ge_mac_cfg_3_value);
375 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
376 ge_mac_cfg_0_value);
377 }
378
379 static void nps_enet_hw_disable_control(struct net_device *ndev)
380 {
381 struct nps_enet_priv *priv = netdev_priv(ndev);
382
383 /* Disable interrupts */
384 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
385
386 /* Disable Rx and Tx */
387 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0, 0);
388 }
389
390 static void nps_enet_send_frame(struct net_device *ndev,
391 struct sk_buff *skb)
392 {
393 struct nps_enet_priv *priv = netdev_priv(ndev);
394 u32 tx_ctrl_value = 0;
395 short length = skb->len;
396 u32 i, len = DIV_ROUND_UP(length, sizeof(u32));
397 u32 *src = (void *)skb->data;
398 bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32));
399
400 /* In case src is not aligned we need an intermediate buffer */
401 if (src_is_aligned)
402 iowrite32_rep(priv->regs_base + NPS_ENET_REG_TX_BUF, src, len);
403 else /* !src_is_aligned */
404 for (i = 0; i < len; i++, src++)
405 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF,
406 get_unaligned_be32(src));
407
408 /* Write the length of the Frame */
409 tx_ctrl_value |= length << TX_CTL_NT_SHIFT;
410
411 tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT;
412 /* Send Frame */
413 nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value);
414 }
415
416 /**
417 * nps_enet_set_mac_address - Set the MAC address for this device.
418 * @ndev: Pointer to net_device structure.
419 * @p: 6 byte Address to be written as MAC address.
420 *
421 * This function copies the HW address from the sockaddr structure to the
422 * net_device structure and updates the address in HW.
423 *
424 * returns: -EBUSY if the net device is busy or 0 if the address is set
425 * successfully.
426 */
427 static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p)
428 {
429 struct sockaddr *addr = p;
430 s32 res;
431
432 if (netif_running(ndev))
433 return -EBUSY;
434
435 res = eth_mac_addr(ndev, p);
436 if (!res) {
437 ether_addr_copy(ndev->dev_addr, addr->sa_data);
438 nps_enet_set_hw_mac_address(ndev);
439 }
440
441 return res;
442 }
443
444 /**
445 * nps_enet_set_rx_mode - Change the receive filtering mode.
446 * @ndev: Pointer to the network device.
447 *
448 * This function enables/disables promiscuous mode
449 */
450 static void nps_enet_set_rx_mode(struct net_device *ndev)
451 {
452 struct nps_enet_priv *priv = netdev_priv(ndev);
453 u32 ge_mac_cfg_2_value = priv->ge_mac_cfg_2_value;
454
455 if (ndev->flags & IFF_PROMISC) {
456 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
457 | NPS_ENET_DISABLE << CFG_2_DISK_DA_SHIFT;
458 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
459 | NPS_ENET_DISABLE << CFG_2_DISK_MC_SHIFT;
460
461 } else {
462 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
463 | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT;
464 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
465 | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT;
466 }
467
468 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value);
469 }
470
471 /**
472 * nps_enet_open - Open the network device.
473 * @ndev: Pointer to the network device.
474 *
475 * returns: 0, on success or non-zero error value on failure.
476 *
477 * This function sets the MAC address, requests and enables an IRQ
478 * for the ENET device and starts the Tx queue.
479 */
480 static s32 nps_enet_open(struct net_device *ndev)
481 {
482 struct nps_enet_priv *priv = netdev_priv(ndev);
483 s32 err;
484
485 /* Reset private variables */
486 priv->tx_skb = NULL;
487 priv->ge_mac_cfg_2_value = 0;
488 priv->ge_mac_cfg_3_value = 0;
489
490 /* ge_mac_cfg_3 default values */
491 priv->ge_mac_cfg_3_value |=
492 NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH << CFG_3_RX_IFG_TH_SHIFT;
493
494 priv->ge_mac_cfg_3_value |=
495 NPS_ENET_GE_MAC_CFG_3_MAX_LEN << CFG_3_MAX_LEN_SHIFT;
496
497 /* Disable HW device */
498 nps_enet_hw_disable_control(ndev);
499
500 /* irq Rx allocation */
501 err = request_irq(priv->irq, nps_enet_irq_handler,
502 0, "enet-rx-tx", ndev);
503 if (err)
504 return err;
505
506 napi_enable(&priv->napi);
507
508 /* Enable HW device */
509 nps_enet_hw_reset(ndev);
510 nps_enet_hw_enable_control(ndev);
511
512 netif_start_queue(ndev);
513
514 return 0;
515 }
516
517 /**
518 * nps_enet_stop - Close the network device.
519 * @ndev: Pointer to the network device.
520 *
521 * This function stops the Tx queue, disables interrupts for the ENET device.
522 */
523 static s32 nps_enet_stop(struct net_device *ndev)
524 {
525 struct nps_enet_priv *priv = netdev_priv(ndev);
526
527 napi_disable(&priv->napi);
528 netif_stop_queue(ndev);
529 nps_enet_hw_disable_control(ndev);
530 free_irq(priv->irq, ndev);
531
532 return 0;
533 }
534
535 /**
536 * nps_enet_start_xmit - Starts the data transmission.
537 * @skb: sk_buff pointer that contains data to be Transmitted.
538 * @ndev: Pointer to net_device structure.
539 *
540 * returns: NETDEV_TX_OK, on success
541 * NETDEV_TX_BUSY, if any of the descriptors are not free.
542 *
543 * This function is invoked from upper layers to initiate transmission.
544 */
545 static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
546 struct net_device *ndev)
547 {
548 struct nps_enet_priv *priv = netdev_priv(ndev);
549
550 /* This driver handles one frame at a time */
551 netif_stop_queue(ndev);
552
553 priv->tx_skb = skb;
554
555 /* make sure tx_skb is actually written to the memory
556 * before the HW is informed and the IRQ is fired.
557 */
558 wmb();
559
560 nps_enet_send_frame(ndev, skb);
561
562 return NETDEV_TX_OK;
563 }
564
565 #ifdef CONFIG_NET_POLL_CONTROLLER
566 static void nps_enet_poll_controller(struct net_device *ndev)
567 {
568 disable_irq(ndev->irq);
569 nps_enet_irq_handler(ndev->irq, ndev);
570 enable_irq(ndev->irq);
571 }
572 #endif
573
574 static const struct net_device_ops nps_netdev_ops = {
575 .ndo_open = nps_enet_open,
576 .ndo_stop = nps_enet_stop,
577 .ndo_start_xmit = nps_enet_start_xmit,
578 .ndo_set_mac_address = nps_enet_set_mac_address,
579 .ndo_set_rx_mode = nps_enet_set_rx_mode,
580 #ifdef CONFIG_NET_POLL_CONTROLLER
581 .ndo_poll_controller = nps_enet_poll_controller,
582 #endif
583 };
584
585 static s32 nps_enet_probe(struct platform_device *pdev)
586 {
587 struct device *dev = &pdev->dev;
588 struct net_device *ndev;
589 struct nps_enet_priv *priv;
590 s32 err = 0;
591 const char *mac_addr;
592 struct resource *res_regs;
593
594 if (!dev->of_node)
595 return -ENODEV;
596
597 ndev = alloc_etherdev(sizeof(struct nps_enet_priv));
598 if (!ndev)
599 return -ENOMEM;
600
601 platform_set_drvdata(pdev, ndev);
602 SET_NETDEV_DEV(ndev, dev);
603 priv = netdev_priv(ndev);
604
605 /* The EZ NET specific entries in the device structure. */
606 ndev->netdev_ops = &nps_netdev_ops;
607 ndev->watchdog_timeo = (400 * HZ / 1000);
608 /* FIXME :: no multicast support yet */
609 ndev->flags &= ~IFF_MULTICAST;
610
611 res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
612 priv->regs_base = devm_ioremap_resource(dev, res_regs);
613 if (IS_ERR(priv->regs_base)) {
614 err = PTR_ERR(priv->regs_base);
615 goto out_netdev;
616 }
617 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base);
618
619 /* set kernel MAC address to dev */
620 mac_addr = of_get_mac_address(dev->of_node);
621 if (mac_addr)
622 ether_addr_copy(ndev->dev_addr, mac_addr);
623 else
624 eth_hw_addr_random(ndev);
625
626 /* Get IRQ number */
627 priv->irq = platform_get_irq(pdev, 0);
628 if (!priv->irq) {
629 dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
630 err = -ENODEV;
631 goto out_netdev;
632 }
633
634 netif_napi_add(ndev, &priv->napi, nps_enet_poll,
635 NPS_ENET_NAPI_POLL_WEIGHT);
636
637 /* Register the driver. Should be the last thing in probe */
638 err = register_netdev(ndev);
639 if (err) {
640 dev_err(dev, "Failed to register ndev for %s, err = 0x%08x\n",
641 ndev->name, (s32)err);
642 goto out_netif_api;
643 }
644
645 dev_info(dev, "(rx/tx=%d)\n", priv->irq);
646 return 0;
647
648 out_netif_api:
649 netif_napi_del(&priv->napi);
650 out_netdev:
651 if (err)
652 free_netdev(ndev);
653
654 return err;
655 }
656
657 static s32 nps_enet_remove(struct platform_device *pdev)
658 {
659 struct net_device *ndev = platform_get_drvdata(pdev);
660 struct nps_enet_priv *priv = netdev_priv(ndev);
661
662 unregister_netdev(ndev);
663 free_netdev(ndev);
664 netif_napi_del(&priv->napi);
665
666 return 0;
667 }
668
669 static const struct of_device_id nps_enet_dt_ids[] = {
670 { .compatible = "ezchip,nps-mgt-enet" },
671 { /* Sentinel */ }
672 };
673 MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
674
675 static struct platform_driver nps_enet_driver = {
676 .probe = nps_enet_probe,
677 .remove = nps_enet_remove,
678 .driver = {
679 .name = DRV_NAME,
680 .of_match_table = nps_enet_dt_ids,
681 },
682 };
683
684 module_platform_driver(nps_enet_driver);
685
686 MODULE_AUTHOR("EZchip Semiconductor");
687 MODULE_LICENSE("GPL v2");