]>
Commit | Line | Data |
---|---|---|
cfb739b4 GKH |
1 | /* |
2 | * Agere Systems Inc. | |
3 | * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs | |
4 | * | |
64f93036 | 5 | * Copyright © 2005 Agere Systems Inc. |
cfb739b4 GKH |
6 | * All rights reserved. |
7 | * http://www.agere.com | |
8 | * | |
9 | *------------------------------------------------------------------------------ | |
10 | * | |
11 | * et1310_tx.c - Routines used to perform data transmission. | |
12 | * | |
13 | *------------------------------------------------------------------------------ | |
14 | * | |
15 | * SOFTWARE LICENSE | |
16 | * | |
17 | * This software is provided subject to the following terms and conditions, | |
18 | * which you should read carefully before using the software. Using this | |
19 | * software indicates your acceptance of these terms and conditions. If you do | |
20 | * not agree with these terms and conditions, do not use the software. | |
21 | * | |
64f93036 | 22 | * Copyright © 2005 Agere Systems Inc. |
cfb739b4 GKH |
23 | * All rights reserved. |
24 | * | |
25 | * Redistribution and use in source or binary forms, with or without | |
26 | * modifications, are permitted provided that the following conditions are met: | |
27 | * | |
28 | * . Redistributions of source code must retain the above copyright notice, this | |
29 | * list of conditions and the following Disclaimer as comments in the code as | |
30 | * well as in the documentation and/or other materials provided with the | |
31 | * distribution. | |
32 | * | |
33 | * . Redistributions in binary form must reproduce the above copyright notice, | |
34 | * this list of conditions and the following Disclaimer in the documentation | |
35 | * and/or other materials provided with the distribution. | |
36 | * | |
37 | * . Neither the name of Agere Systems Inc. nor the names of the contributors | |
38 | * may be used to endorse or promote products derived from this software | |
39 | * without specific prior written permission. | |
40 | * | |
41 | * Disclaimer | |
42 | * | |
64f93036 | 43 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
cfb739b4 GKH |
44 | * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF |
45 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY | |
46 | * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN | |
47 | * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY | |
48 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
49 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
50 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
51 | * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT | |
52 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | |
53 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH | |
54 | * DAMAGE. | |
55 | * | |
56 | */ | |
57 | ||
58 | #include "et131x_version.h" | |
cfb739b4 GKH |
59 | #include "et131x_defs.h" |
60 | ||
61 | #include <linux/pci.h> | |
62 | #include <linux/init.h> | |
63 | #include <linux/module.h> | |
64 | #include <linux/types.h> | |
65 | #include <linux/kernel.h> | |
66 | ||
67 | #include <linux/sched.h> | |
68 | #include <linux/ptrace.h> | |
69 | #include <linux/slab.h> | |
70 | #include <linux/ctype.h> | |
71 | #include <linux/string.h> | |
72 | #include <linux/timer.h> | |
73 | #include <linux/interrupt.h> | |
74 | #include <linux/in.h> | |
75 | #include <linux/delay.h> | |
64f93036 AC |
76 | #include <linux/io.h> |
77 | #include <linux/bitops.h> | |
cfb739b4 | 78 | #include <asm/system.h> |
cfb739b4 GKH |
79 | |
80 | #include <linux/netdevice.h> | |
81 | #include <linux/etherdevice.h> | |
82 | #include <linux/skbuff.h> | |
83 | #include <linux/if_arp.h> | |
84 | #include <linux/ioport.h> | |
85 | ||
86 | #include "et1310_phy.h" | |
cfb739b4 | 87 | #include "et131x_adapter.h" |
cfb739b4 | 88 | #include "et1310_tx.h" |
69ea5fcb | 89 | #include "et131x.h" |
c2557177 | 90 | |
25ad00bb | 91 | static inline void et131x_free_send_packet(struct et131x_adapter *etdev, |
b711b2e0 | 92 | struct tcb *tcb); |
cfb739b4 | 93 | static int et131x_send_packet(struct sk_buff *skb, |
25ad00bb | 94 | struct et131x_adapter *etdev); |
b711b2e0 | 95 | static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb); |
cfb739b4 GKH |
96 | |
97 | /** | |
98 | * et131x_tx_dma_memory_alloc | |
99 | * @adapter: pointer to our private adapter structure | |
100 | * | |
101 | * Returns 0 on success and errno on failure (as defined in errno.h). | |
102 | * | |
103 | * Allocates memory that will be visible both to the device and to the CPU. | |
104 | * The OS will pass us packets, pointers to which we will insert in the Tx | |
105 | * Descriptor queue. The device will read this queue to find the packets in | |
106 | * memory. The device will update the "status" in memory each time it xmits a | |
107 | * packet. | |
108 | */ | |
109 | int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) | |
110 | { | |
111 | int desc_size = 0; | |
b711b2e0 | 112 | struct tx_ring *tx_ring = &adapter->tx_ring; |
cfb739b4 | 113 | |
cfb739b4 | 114 | /* Allocate memory for the TCB's (Transmit Control Block) */ |
32414878 | 115 | adapter->tx_ring.tcb_ring = |
b711b2e0 | 116 | kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA); |
c78732ad | 117 | if (!adapter->tx_ring.tcb_ring) { |
15700039 | 118 | dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); |
cfb739b4 GKH |
119 | return -ENOMEM; |
120 | } | |
121 | ||
122 | /* Allocate enough memory for the Tx descriptor ring, and allocate | |
123 | * some extra so that the ring can be aligned on a 4k boundary. | |
124 | */ | |
b711b2e0 AC |
125 | desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; |
126 | tx_ring->tx_desc_ring = | |
127 | (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size, | |
128 | &tx_ring->tx_desc_ring_pa); | |
129 | if (!adapter->tx_ring.tx_desc_ring) { | |
1458d82b AC |
130 | dev_err(&adapter->pdev->dev, |
131 | "Cannot alloc memory for Tx Ring\n"); | |
cfb739b4 GKH |
132 | return -ENOMEM; |
133 | } | |
134 | ||
135 | /* Save physical address | |
136 | * | |
137 | * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, | |
138 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses | |
139 | * are ever returned, make sure the high part is retrieved here before | |
140 | * storing the adjusted address. | |
141 | */ | |
cfb739b4 | 142 | /* Allocate memory for the Tx status block */ |
c78732ad AC |
143 | tx_ring->tx_status = pci_alloc_consistent(adapter->pdev, |
144 | sizeof(u32), | |
145 | &tx_ring->tx_status_pa); | |
146 | if (!adapter->tx_ring.tx_status_pa) { | |
15700039 AC |
147 | dev_err(&adapter->pdev->dev, |
148 | "Cannot alloc memory for Tx status block\n"); | |
cfb739b4 GKH |
149 | return -ENOMEM; |
150 | } | |
cfb739b4 GKH |
151 | return 0; |
152 | } | |
153 | ||
154 | /** | |
155 | * et131x_tx_dma_memory_free - Free all memory allocated within this module | |
156 | * @adapter: pointer to our private adapter structure | |
157 | * | |
158 | * Returns 0 on success and errno on failure (as defined in errno.h). | |
159 | */ | |
160 | void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) | |
161 | { | |
162 | int desc_size = 0; | |
163 | ||
b711b2e0 | 164 | if (adapter->tx_ring.tx_desc_ring) { |
cfb739b4 | 165 | /* Free memory relating to Tx rings here */ |
b711b2e0 | 166 | desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) |
1458d82b | 167 | + 4096 - 1; |
cfb739b4 GKH |
168 | pci_free_consistent(adapter->pdev, |
169 | desc_size, | |
b711b2e0 AC |
170 | adapter->tx_ring.tx_desc_ring, |
171 | adapter->tx_ring.tx_desc_ring_pa); | |
b711b2e0 | 172 | adapter->tx_ring.tx_desc_ring = NULL; |
cfb739b4 GKH |
173 | } |
174 | ||
175 | /* Free memory for the Tx status block */ | |
c78732ad | 176 | if (adapter->tx_ring.tx_status) { |
cfb739b4 | 177 | pci_free_consistent(adapter->pdev, |
c78732ad AC |
178 | sizeof(u32), |
179 | adapter->tx_ring.tx_status, | |
180 | adapter->tx_ring.tx_status_pa); | |
cfb739b4 | 181 | |
c78732ad | 182 | adapter->tx_ring.tx_status = NULL; |
cfb739b4 | 183 | } |
b711b2e0 | 184 | /* Free the memory for the tcb structures */ |
c78732ad | 185 | kfree(adapter->tx_ring.tcb_ring); |
cfb739b4 GKH |
186 | } |
187 | ||
188 | /** | |
189 | * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore. | |
9fa81099 | 190 | * @etdev: pointer to our private adapter structure |
1458d82b AC |
191 | * |
192 | * Configure the transmit engine with the ring buffers we have created | |
193 | * and prepare it for use. | |
cfb739b4 | 194 | */ |
25ad00bb | 195 | void ConfigTxDmaRegs(struct et131x_adapter *etdev) |
cfb739b4 | 196 | { |
060a78a0 | 197 | struct txdma_regs __iomem *txdma = &etdev->regs->txdma; |
cfb739b4 | 198 | |
cfb739b4 | 199 | /* Load the hardware with the start of the transmit descriptor ring. */ |
9c60684b | 200 | writel((u32) ((u64)etdev->tx_ring.tx_desc_ring_pa >> 32), |
9fa81099 | 201 | &txdma->pr_base_hi); |
9c60684b | 202 | writel((u32) etdev->tx_ring.tx_desc_ring_pa, |
9fa81099 | 203 | &txdma->pr_base_lo); |
cfb739b4 GKH |
204 | |
205 | /* Initialise the transmit DMA engine */ | |
42a03e98 | 206 | writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); |
cfb739b4 | 207 | |
c1996fc2 | 208 | /* Load the completion writeback physical address */ |
c78732ad | 209 | writel((u32)((u64)etdev->tx_ring.tx_status_pa >> 32), |
c1996fc2 | 210 | &txdma->dma_wb_base_hi); |
c78732ad | 211 | writel((u32)etdev->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo); |
cfb739b4 | 212 | |
c78732ad | 213 | *etdev->tx_ring.tx_status = 0; |
cfb739b4 | 214 | |
356c74b4 | 215 | writel(0, &txdma->service_request); |
c78732ad | 216 | etdev->tx_ring.send_idx = 0; |
cfb739b4 GKH |
217 | } |
218 | ||
219 | /** | |
220 | * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 | |
25ad00bb | 221 | * @etdev: pointer to our adapter structure |
cfb739b4 | 222 | */ |
25ad00bb | 223 | void et131x_tx_dma_disable(struct et131x_adapter *etdev) |
cfb739b4 | 224 | { |
cfb739b4 | 225 | /* Setup the tramsmit dma configuration register */ |
bcb903fa AC |
226 | writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT, |
227 | &etdev->regs->txdma.csr); | |
cfb739b4 GKH |
228 | } |
229 | ||
230 | /** | |
231 | * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310. | |
25ad00bb | 232 | * @etdev: pointer to our adapter structure |
cfb739b4 GKH |
233 | * |
234 | * Mainly used after a return to the D0 (full-power) state from a lower state. | |
235 | */ | |
25ad00bb | 236 | void et131x_tx_dma_enable(struct et131x_adapter *etdev) |
cfb739b4 | 237 | { |
5f1377d4 AC |
238 | /* Setup the transmit dma configuration register for normal |
239 | * operation | |
240 | */ | |
241 | writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), | |
242 | &etdev->regs->txdma.csr); | |
cfb739b4 GKH |
243 | } |
244 | ||
245 | /** | |
246 | * et131x_init_send - Initialize send data structures | |
247 | * @adapter: pointer to our private adapter structure | |
248 | */ | |
249 | void et131x_init_send(struct et131x_adapter *adapter) | |
250 | { | |
b711b2e0 | 251 | struct tcb *tcb; |
c1996fc2 | 252 | u32 ct; |
b711b2e0 | 253 | struct tx_ring *tx_ring; |
cfb739b4 | 254 | |
cfb739b4 | 255 | /* Setup some convenience pointers */ |
b711b2e0 | 256 | tx_ring = &adapter->tx_ring; |
c78732ad | 257 | tcb = adapter->tx_ring.tcb_ring; |
cfb739b4 | 258 | |
c78732ad | 259 | tx_ring->tcb_qhead = tcb; |
cfb739b4 | 260 | |
9251d71a | 261 | memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); |
cfb739b4 | 262 | |
9251d71a | 263 | /* Go through and set up each TCB */ |
c78732ad | 264 | for (ct = 0; ct++ < NUM_TCB; tcb++) |
cfb739b4 | 265 | /* Set the link pointer in HW TCB to the next TCB in the |
1458d82b | 266 | * chain |
cfb739b4 | 267 | */ |
c78732ad | 268 | tcb->next = tcb + 1; |
9251d71a | 269 | |
1458d82b | 270 | /* Set the tail pointer */ |
9251d71a | 271 | tcb--; |
c78732ad AC |
272 | tx_ring->tcb_qtail = tcb; |
273 | tcb->next = NULL; | |
cfb739b4 | 274 | /* Curr send queue should now be empty */ |
c78732ad AC |
275 | tx_ring->send_head = NULL; |
276 | tx_ring->send_tail = NULL; | |
cfb739b4 GKH |
277 | } |
278 | ||
279 | /** | |
280 | * et131x_send_packets - This function is called by the OS to send packets | |
281 | * @skb: the packet(s) to send | |
282 | * @netdev:device on which to TX the above packet(s) | |
283 | * | |
284 | * Return 0 in almost all cases; non-zero value in extreme hard failure only | |
285 | */ | |
286 | int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) | |
287 | { | |
288 | int status = 0; | |
25ad00bb | 289 | struct et131x_adapter *etdev = NULL; |
cfb739b4 | 290 | |
25ad00bb | 291 | etdev = netdev_priv(netdev); |
cfb739b4 GKH |
292 | |
293 | /* Send these packets | |
294 | * | |
295 | * NOTE: The Linux Tx entry point is only given one packet at a time | |
296 | * to Tx, so the PacketCount and it's array used makes no sense here | |
297 | */ | |
298 | ||
116badfe | 299 | /* TCB is not available */ |
c78732ad | 300 | if (etdev->tx_ring.used >= NUM_TCB) { |
cfb739b4 GKH |
301 | /* NOTE: If there's an error on send, no need to queue the |
302 | * packet under Linux; if we just send an error up to the | |
303 | * netif layer, it will resend the skb to us. | |
304 | */ | |
cfb739b4 GKH |
305 | status = -ENOMEM; |
306 | } else { | |
307 | /* We need to see if the link is up; if it's not, make the | |
308 | * netif layer think we're good and drop the packet | |
309 | */ | |
c1996fc2 AC |
310 | if ((etdev->Flags & fMP_ADAPTER_FAIL_SEND_MASK) || |
311 | !netif_carrier_ok(netdev)) { | |
cfb739b4 GKH |
312 | dev_kfree_skb_any(skb); |
313 | skb = NULL; | |
314 | ||
25ad00bb | 315 | etdev->net_stats.tx_dropped++; |
cfb739b4 | 316 | } else { |
25ad00bb | 317 | status = et131x_send_packet(skb, etdev); |
c1996fc2 | 318 | if (status != 0 && status != -ENOMEM) { |
cfb739b4 GKH |
319 | /* On any other error, make netif think we're |
320 | * OK and drop the packet | |
321 | */ | |
cfb739b4 GKH |
322 | dev_kfree_skb_any(skb); |
323 | skb = NULL; | |
25ad00bb | 324 | etdev->net_stats.tx_dropped++; |
cfb739b4 GKH |
325 | } |
326 | } | |
327 | } | |
cfb739b4 GKH |
328 | return status; |
329 | } | |
330 | ||
331 | /** | |
332 | * et131x_send_packet - Do the work to send a packet | |
333 | * @skb: the packet(s) to send | |
25ad00bb | 334 | * @etdev: a pointer to the device's private adapter structure |
cfb739b4 GKH |
335 | * |
336 | * Return 0 in almost all cases; non-zero value in extreme hard failure only. | |
337 | * | |
338 | * Assumption: Send spinlock has been acquired | |
339 | */ | |
340 | static int et131x_send_packet(struct sk_buff *skb, | |
25ad00bb | 341 | struct et131x_adapter *etdev) |
cfb739b4 | 342 | { |
c1996fc2 | 343 | int status; |
b711b2e0 | 344 | struct tcb *tcb = NULL; |
116badfe | 345 | u16 *shbufva; |
37628606 | 346 | unsigned long flags; |
cfb739b4 | 347 | |
cfb739b4 | 348 | /* All packets must have at least a MAC address and a protocol type */ |
b711b2e0 | 349 | if (skb->len < ETH_HLEN) |
cfb739b4 | 350 | return -EIO; |
cfb739b4 GKH |
351 | |
352 | /* Get a TCB for this packet */ | |
37628606 | 353 | spin_lock_irqsave(&etdev->TCBReadyQLock, flags); |
cfb739b4 | 354 | |
c78732ad | 355 | tcb = etdev->tx_ring.tcb_qhead; |
cfb739b4 | 356 | |
b711b2e0 | 357 | if (tcb == NULL) { |
37628606 | 358 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); |
cfb739b4 GKH |
359 | return -ENOMEM; |
360 | } | |
361 | ||
c78732ad | 362 | etdev->tx_ring.tcb_qhead = tcb->next; |
cfb739b4 | 363 | |
c78732ad AC |
364 | if (etdev->tx_ring.tcb_qhead == NULL) |
365 | etdev->tx_ring.tcb_qtail = NULL; | |
cfb739b4 | 366 | |
37628606 | 367 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); |
cfb739b4 | 368 | |
c78732ad | 369 | tcb->skb = skb; |
cfb739b4 | 370 | |
1458d82b | 371 | if (skb->data != NULL && skb->len - skb->data_len >= 6) { |
116badfe | 372 | shbufva = (u16 *) skb->data; |
cfb739b4 | 373 | |
9fa81099 AC |
374 | if ((shbufva[0] == 0xffff) && |
375 | (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) { | |
c78732ad | 376 | tcb->flags |= fMP_DEST_BROAD; |
9fa81099 | 377 | } else if ((shbufva[0] & 0x3) == 0x0001) { |
c78732ad | 378 | tcb->flags |= fMP_DEST_MULTI; |
cfb739b4 GKH |
379 | } |
380 | } | |
381 | ||
c78732ad | 382 | tcb->next = NULL; |
cfb739b4 GKH |
383 | |
384 | /* Call the NIC specific send handler. */ | |
c1996fc2 | 385 | status = nic_send_packet(etdev, tcb); |
cfb739b4 GKH |
386 | |
387 | if (status != 0) { | |
37628606 | 388 | spin_lock_irqsave(&etdev->TCBReadyQLock, flags); |
cfb739b4 | 389 | |
1458d82b | 390 | if (etdev->tx_ring.tcb_qtail) |
c78732ad | 391 | etdev->tx_ring.tcb_qtail->next = tcb; |
1458d82b | 392 | else |
cfb739b4 | 393 | /* Apparently ready Q is empty. */ |
c78732ad | 394 | etdev->tx_ring.tcb_qhead = tcb; |
cfb739b4 | 395 | |
c78732ad | 396 | etdev->tx_ring.tcb_qtail = tcb; |
37628606 | 397 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); |
cfb739b4 GKH |
398 | return status; |
399 | } | |
c78732ad | 400 | WARN_ON(etdev->tx_ring.used > NUM_TCB); |
cfb739b4 GKH |
401 | return 0; |
402 | } | |
403 | ||
404 | /** | |
405 | * nic_send_packet - NIC specific send handler for version B silicon. | |
25ad00bb | 406 | * @etdev: pointer to our adapter |
b711b2e0 | 407 | * @tcb: pointer to struct tcb |
cfb739b4 GKH |
408 | * |
409 | * Returns 0 or errno. | |
410 | */ | |
b711b2e0 | 411 | static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb) |
cfb739b4 | 412 | { |
b711b2e0 AC |
413 | u32 i; |
414 | struct tx_desc desc[24]; /* 24 x 16 byte */ | |
415 | u32 frag = 0; | |
416 | u32 thiscopy, remainder; | |
c78732ad | 417 | struct sk_buff *skb = tcb->skb; |
b711b2e0 AC |
418 | u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; |
419 | struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; | |
37628606 | 420 | unsigned long flags; |
cfb739b4 | 421 | |
cfb739b4 GKH |
422 | /* Part of the optimizations of this send routine restrict us to |
423 | * sending 24 fragments at a pass. In practice we should never see | |
424 | * more than 5 fragments. | |
425 | * | |
426 | * NOTE: The older version of this function (below) can handle any | |
427 | * number of fragments. If needed, we can call this function, | |
428 | * although it is less efficient. | |
429 | */ | |
b711b2e0 | 430 | if (nr_frags > 23) |
cfb739b4 | 431 | return -EIO; |
cfb739b4 | 432 | |
b711b2e0 | 433 | memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); |
cfb739b4 | 434 | |
b711b2e0 | 435 | for (i = 0; i < nr_frags; i++) { |
cfb739b4 GKH |
436 | /* If there is something in this element, lets get a |
437 | * descriptor from the ring and get the necessary data | |
438 | */ | |
b711b2e0 | 439 | if (i == 0) { |
cfb739b4 GKH |
440 | /* If the fragments are smaller than a standard MTU, |
441 | * then map them to a single descriptor in the Tx | |
442 | * Desc ring. However, if they're larger, as is | |
443 | * possible with support for jumbo packets, then | |
444 | * split them each across 2 descriptors. | |
445 | * | |
446 | * This will work until we determine why the hardware | |
447 | * doesn't seem to like large fragments. | |
448 | */ | |
b711b2e0 AC |
449 | if ((skb->len - skb->data_len) <= 1514) { |
450 | desc[frag].addr_hi = 0; | |
fb70ed67 AC |
451 | /* Low 16bits are length, high is vlan and |
452 | unused currently so zero */ | |
b711b2e0 AC |
453 | desc[frag].len_vlan = |
454 | skb->len - skb->data_len; | |
cfb739b4 GKH |
455 | |
456 | /* NOTE: Here, the dma_addr_t returned from | |
457 | * pci_map_single() is implicitly cast as a | |
b711b2e0 | 458 | * u32. Although dma_addr_t can be |
cfb739b4 GKH |
459 | * 64-bit, the address returned by |
460 | * pci_map_single() is always 32-bit | |
461 | * addressable (as defined by the pci/dma | |
462 | * subsystem) | |
463 | */ | |
b711b2e0 | 464 | desc[frag++].addr_lo = |
25ad00bb | 465 | pci_map_single(etdev->pdev, |
b711b2e0 AC |
466 | skb->data, |
467 | skb->len - | |
468 | skb->data_len, | |
cfb739b4 GKH |
469 | PCI_DMA_TODEVICE); |
470 | } else { | |
b711b2e0 AC |
471 | desc[frag].addr_hi = 0; |
472 | desc[frag].len_vlan = | |
473 | (skb->len - skb->data_len) / 2; | |
cfb739b4 GKH |
474 | |
475 | /* NOTE: Here, the dma_addr_t returned from | |
476 | * pci_map_single() is implicitly cast as a | |
b711b2e0 | 477 | * u32. Although dma_addr_t can be |
cfb739b4 GKH |
478 | * 64-bit, the address returned by |
479 | * pci_map_single() is always 32-bit | |
480 | * addressable (as defined by the pci/dma | |
481 | * subsystem) | |
482 | */ | |
b711b2e0 | 483 | desc[frag++].addr_lo = |
25ad00bb | 484 | pci_map_single(etdev->pdev, |
b711b2e0 AC |
485 | skb->data, |
486 | ((skb->len - | |
487 | skb->data_len) / 2), | |
cfb739b4 | 488 | PCI_DMA_TODEVICE); |
b711b2e0 | 489 | desc[frag].addr_hi = 0; |
cfb739b4 | 490 | |
b711b2e0 AC |
491 | desc[frag].len_vlan = |
492 | (skb->len - skb->data_len) / 2; | |
cfb739b4 GKH |
493 | |
494 | /* NOTE: Here, the dma_addr_t returned from | |
495 | * pci_map_single() is implicitly cast as a | |
b711b2e0 | 496 | * u32. Although dma_addr_t can be |
cfb739b4 GKH |
497 | * 64-bit, the address returned by |
498 | * pci_map_single() is always 32-bit | |
499 | * addressable (as defined by the pci/dma | |
500 | * subsystem) | |
501 | */ | |
b711b2e0 | 502 | desc[frag++].addr_lo = |
25ad00bb | 503 | pci_map_single(etdev->pdev, |
b711b2e0 AC |
504 | skb->data + |
505 | ((skb->len - | |
506 | skb->data_len) / 2), | |
507 | ((skb->len - | |
508 | skb->data_len) / 2), | |
cfb739b4 GKH |
509 | PCI_DMA_TODEVICE); |
510 | } | |
511 | } else { | |
b711b2e0 AC |
512 | desc[frag].addr_hi = 0; |
513 | desc[frag].len_vlan = | |
514 | frags[i - 1].size; | |
cfb739b4 GKH |
515 | |
516 | /* NOTE: Here, the dma_addr_t returned from | |
b711b2e0 | 517 | * pci_map_page() is implicitly cast as a u32. |
cfb739b4 GKH |
518 | * Although dma_addr_t can be 64-bit, the address |
519 | * returned by pci_map_page() is always 32-bit | |
520 | * addressable (as defined by the pci/dma subsystem) | |
521 | */ | |
b711b2e0 | 522 | desc[frag++].addr_lo = |
25ad00bb | 523 | pci_map_page(etdev->pdev, |
b711b2e0 AC |
524 | frags[i - 1].page, |
525 | frags[i - 1].page_offset, | |
526 | frags[i - 1].size, | |
cfb739b4 GKH |
527 | PCI_DMA_TODEVICE); |
528 | } | |
529 | } | |
530 | ||
b711b2e0 | 531 | if (frag == 0) |
cfb739b4 | 532 | return -EIO; |
cfb739b4 | 533 | |
9fa81099 | 534 | if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { |
1458d82b AC |
535 | if (++etdev->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { |
536 | /* Last element & Interrupt flag */ | |
b711b2e0 | 537 | desc[frag - 1].flags = 0x5; |
c78732ad | 538 | etdev->tx_ring.since_irq = 0; |
b44207ab | 539 | } else { /* Last element */ |
b711b2e0 | 540 | desc[frag - 1].flags = 0x1; |
cfb739b4 | 541 | } |
c78732ad | 542 | } else |
b711b2e0 | 543 | desc[frag - 1].flags = 0x5; |
c78732ad | 544 | |
b711b2e0 | 545 | desc[0].flags |= 2; /* First element flag */ |
cfb739b4 | 546 | |
c78732ad AC |
547 | tcb->index_start = etdev->tx_ring.send_idx; |
548 | tcb->stale = 0; | |
cfb739b4 | 549 | |
ae3a08aa | 550 | spin_lock_irqsave(&etdev->send_hw_lock, flags); |
cfb739b4 | 551 | |
356c74b4 | 552 | thiscopy = NUM_DESC_PER_RING_TX - |
c78732ad | 553 | INDEX10(etdev->tx_ring.send_idx); |
cfb739b4 | 554 | |
b711b2e0 | 555 | if (thiscopy >= frag) { |
9fa81099 | 556 | remainder = 0; |
b711b2e0 | 557 | thiscopy = frag; |
cfb739b4 | 558 | } else { |
b711b2e0 | 559 | remainder = frag - thiscopy; |
cfb739b4 GKH |
560 | } |
561 | ||
b711b2e0 | 562 | memcpy(etdev->tx_ring.tx_desc_ring + |
c78732ad | 563 | INDEX10(etdev->tx_ring.send_idx), desc, |
b711b2e0 | 564 | sizeof(struct tx_desc) * thiscopy); |
cfb739b4 | 565 | |
c78732ad | 566 | add_10bit(&etdev->tx_ring.send_idx, thiscopy); |
cfb739b4 | 567 | |
1458d82b AC |
568 | if (INDEX10(etdev->tx_ring.send_idx) == 0 || |
569 | INDEX10(etdev->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { | |
570 | etdev->tx_ring.send_idx &= ~ET_DMA10_MASK; | |
571 | etdev->tx_ring.send_idx ^= ET_DMA10_WRAP; | |
cfb739b4 GKH |
572 | } |
573 | ||
9fa81099 | 574 | if (remainder) { |
b711b2e0 AC |
575 | memcpy(etdev->tx_ring.tx_desc_ring, |
576 | desc + thiscopy, | |
577 | sizeof(struct tx_desc) * remainder); | |
cfb739b4 | 578 | |
c78732ad | 579 | add_10bit(&etdev->tx_ring.send_idx, remainder); |
cfb739b4 GKH |
580 | } |
581 | ||
c78732ad AC |
582 | if (INDEX10(etdev->tx_ring.send_idx) == 0) { |
583 | if (etdev->tx_ring.send_idx) | |
584 | tcb->index = NUM_DESC_PER_RING_TX - 1; | |
64f93036 | 585 | else |
1458d82b | 586 | tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); |
64f93036 | 587 | } else |
c78732ad | 588 | tcb->index = etdev->tx_ring.send_idx - 1; |
cfb739b4 | 589 | |
37628606 | 590 | spin_lock(&etdev->TCBSendQLock); |
cfb739b4 | 591 | |
c78732ad AC |
592 | if (etdev->tx_ring.send_tail) |
593 | etdev->tx_ring.send_tail->next = tcb; | |
64f93036 | 594 | else |
c78732ad | 595 | etdev->tx_ring.send_head = tcb; |
cfb739b4 | 596 | |
c78732ad | 597 | etdev->tx_ring.send_tail = tcb; |
cfb739b4 | 598 | |
c78732ad | 599 | WARN_ON(tcb->next != NULL); |
cfb739b4 | 600 | |
c78732ad | 601 | etdev->tx_ring.used++; |
cfb739b4 | 602 | |
37628606 | 603 | spin_unlock(&etdev->TCBSendQLock); |
cfb739b4 GKH |
604 | |
605 | /* Write the new write pointer back to the device. */ | |
c78732ad | 606 | writel(etdev->tx_ring.send_idx, |
356c74b4 | 607 | &etdev->regs->txdma.service_request); |
cfb739b4 GKH |
608 | |
609 | /* For Gig only, we use Tx Interrupt coalescing. Enable the software | |
610 | * timer to wake us up if this packet isn't followed by N more. | |
611 | */ | |
9fa81099 | 612 | if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { |
c2557177 | 613 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, |
f3f415a3 | 614 | &etdev->regs->global.watchdog_timer); |
cfb739b4 | 615 | } |
ae3a08aa | 616 | spin_unlock_irqrestore(&etdev->send_hw_lock, flags); |
cfb739b4 | 617 | |
cfb739b4 GKH |
618 | return 0; |
619 | } | |
620 | ||
cfb739b4 GKH |
621 | |
622 | /** | |
b711b2e0 | 623 | * et131x_free_send_packet - Recycle a struct tcb |
25ad00bb | 624 | * @etdev: pointer to our adapter |
b711b2e0 | 625 | * @tcb: pointer to struct tcb |
cfb739b4 | 626 | * |
b711b2e0 | 627 | * Complete the packet if necessary |
cfb739b4 GKH |
628 | * Assumption - Send spinlock has been acquired |
629 | */ | |
25ad00bb | 630 | inline void et131x_free_send_packet(struct et131x_adapter *etdev, |
b711b2e0 | 631 | struct tcb *tcb) |
cfb739b4 | 632 | { |
37628606 | 633 | unsigned long flags; |
b711b2e0 | 634 | struct tx_desc *desc = NULL; |
25ad00bb | 635 | struct net_device_stats *stats = &etdev->net_stats; |
cfb739b4 | 636 | |
c78732ad | 637 | if (tcb->flags & fMP_DEST_BROAD) |
25ad00bb | 638 | atomic_inc(&etdev->Stats.brdcstxmt); |
c78732ad | 639 | else if (tcb->flags & fMP_DEST_MULTI) |
25ad00bb | 640 | atomic_inc(&etdev->Stats.multixmt); |
64f93036 | 641 | else |
25ad00bb | 642 | atomic_inc(&etdev->Stats.unixmt); |
cfb739b4 | 643 | |
c78732ad AC |
644 | if (tcb->skb) { |
645 | stats->tx_bytes += tcb->skb->len; | |
cfb739b4 GKH |
646 | |
647 | /* Iterate through the TX descriptors on the ring | |
648 | * corresponding to this packet and umap the fragments | |
649 | * they point to | |
650 | */ | |
cfb739b4 | 651 | do { |
1458d82b AC |
652 | desc = (struct tx_desc *)(etdev->tx_ring.tx_desc_ring + |
653 | INDEX10(tcb->index_start)); | |
cfb739b4 | 654 | |
25ad00bb | 655 | pci_unmap_single(etdev->pdev, |
b711b2e0 AC |
656 | desc->addr_lo, |
657 | desc->len_vlan, PCI_DMA_TODEVICE); | |
cfb739b4 | 658 | |
c78732ad AC |
659 | add_10bit(&tcb->index_start, 1); |
660 | if (INDEX10(tcb->index_start) >= | |
1458d82b AC |
661 | NUM_DESC_PER_RING_TX) { |
662 | tcb->index_start &= ~ET_DMA10_MASK; | |
663 | tcb->index_start ^= ET_DMA10_WRAP; | |
cfb739b4 | 664 | } |
b711b2e0 | 665 | } while (desc != (etdev->tx_ring.tx_desc_ring + |
c78732ad | 666 | INDEX10(tcb->index))); |
cfb739b4 | 667 | |
c78732ad | 668 | dev_kfree_skb_any(tcb->skb); |
cfb739b4 GKH |
669 | } |
670 | ||
b711b2e0 | 671 | memset(tcb, 0, sizeof(struct tcb)); |
cfb739b4 GKH |
672 | |
673 | /* Add the TCB to the Ready Q */ | |
37628606 | 674 | spin_lock_irqsave(&etdev->TCBReadyQLock, flags); |
cfb739b4 | 675 | |
25ad00bb | 676 | etdev->Stats.opackets++; |
cfb739b4 | 677 | |
c78732ad AC |
678 | if (etdev->tx_ring.tcb_qtail) |
679 | etdev->tx_ring.tcb_qtail->next = tcb; | |
c1996fc2 | 680 | else |
cfb739b4 | 681 | /* Apparently ready Q is empty. */ |
c78732ad | 682 | etdev->tx_ring.tcb_qhead = tcb; |
cfb739b4 | 683 | |
c78732ad | 684 | etdev->tx_ring.tcb_qtail = tcb; |
cfb739b4 | 685 | |
37628606 | 686 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); |
c78732ad | 687 | WARN_ON(etdev->tx_ring.used < 0); |
cfb739b4 GKH |
688 | } |
689 | ||
690 | /** | |
691 | * et131x_free_busy_send_packets - Free and complete the stopped active sends | |
25ad00bb | 692 | * @etdev: pointer to our adapter |
cfb739b4 GKH |
693 | * |
694 | * Assumption - Send spinlock has been acquired | |
695 | */ | |
25ad00bb | 696 | void et131x_free_busy_send_packets(struct et131x_adapter *etdev) |
cfb739b4 | 697 | { |
b711b2e0 | 698 | struct tcb *tcb; |
37628606 | 699 | unsigned long flags; |
b711b2e0 | 700 | u32 freed = 0; |
cfb739b4 | 701 | |
cfb739b4 | 702 | /* Any packets being sent? Check the first TCB on the send list */ |
37628606 | 703 | spin_lock_irqsave(&etdev->TCBSendQLock, flags); |
cfb739b4 | 704 | |
c78732ad | 705 | tcb = etdev->tx_ring.send_head; |
cfb739b4 | 706 | |
1458d82b | 707 | while (tcb != NULL && freed < NUM_TCB) { |
c78732ad | 708 | struct tcb *next = tcb->next; |
cfb739b4 | 709 | |
c78732ad | 710 | etdev->tx_ring.send_head = next; |
cfb739b4 | 711 | |
c78732ad AC |
712 | if (next == NULL) |
713 | etdev->tx_ring.send_tail = NULL; | |
cfb739b4 | 714 | |
c78732ad | 715 | etdev->tx_ring.used--; |
cfb739b4 | 716 | |
37628606 | 717 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); |
cfb739b4 | 718 | |
b711b2e0 AC |
719 | freed++; |
720 | et131x_free_send_packet(etdev, tcb); | |
cfb739b4 | 721 | |
37628606 | 722 | spin_lock_irqsave(&etdev->TCBSendQLock, flags); |
cfb739b4 | 723 | |
c78732ad | 724 | tcb = etdev->tx_ring.send_head; |
cfb739b4 GKH |
725 | } |
726 | ||
b711b2e0 | 727 | WARN_ON(freed == NUM_TCB); |
cfb739b4 | 728 | |
37628606 | 729 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); |
cfb739b4 | 730 | |
c78732ad | 731 | etdev->tx_ring.used = 0; |
cfb739b4 GKH |
732 | } |
733 | ||
734 | /** | |
735 | * et131x_handle_send_interrupt - Interrupt handler for sending processing | |
25ad00bb | 736 | * @etdev: pointer to our adapter |
cfb739b4 GKH |
737 | * |
738 | * Re-claim the send resources, complete sends and get more to send from | |
739 | * the send wait queue. | |
740 | * | |
741 | * Assumption - Send spinlock has been acquired | |
742 | */ | |
25ad00bb | 743 | void et131x_handle_send_interrupt(struct et131x_adapter *etdev) |
cfb739b4 | 744 | { |
37628606 | 745 | unsigned long flags; |
b711b2e0 | 746 | u32 serviced; |
1458d82b | 747 | struct tcb *tcb; |
356c74b4 | 748 | u32 index; |
cfb739b4 | 749 | |
b711b2e0 AC |
750 | serviced = readl(&etdev->regs->txdma.NewServiceComplete); |
751 | index = INDEX10(serviced); | |
cfb739b4 GKH |
752 | |
753 | /* Has the ring wrapped? Process any descriptors that do not have | |
754 | * the same "wrap" indicator as the current completion indicator | |
755 | */ | |
37628606 | 756 | spin_lock_irqsave(&etdev->TCBSendQLock, flags); |
cfb739b4 | 757 | |
c78732ad | 758 | tcb = etdev->tx_ring.send_head; |
356c74b4 | 759 | |
b711b2e0 | 760 | while (tcb && |
c78732ad AC |
761 | ((serviced ^ tcb->index) & ET_DMA10_WRAP) && |
762 | index < INDEX10(tcb->index)) { | |
763 | etdev->tx_ring.used--; | |
764 | etdev->tx_ring.send_head = tcb->next; | |
765 | if (tcb->next == NULL) | |
766 | etdev->tx_ring.send_tail = NULL; | |
cfb739b4 | 767 | |
37628606 | 768 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); |
b711b2e0 | 769 | et131x_free_send_packet(etdev, tcb); |
37628606 | 770 | spin_lock_irqsave(&etdev->TCBSendQLock, flags); |
cfb739b4 GKH |
771 | |
772 | /* Goto the next packet */ | |
c78732ad | 773 | tcb = etdev->tx_ring.send_head; |
cfb739b4 | 774 | } |
b711b2e0 | 775 | while (tcb && |
c78732ad AC |
776 | !((serviced ^ tcb->index) & ET_DMA10_WRAP) |
777 | && index > (tcb->index & ET_DMA10_MASK)) { | |
778 | etdev->tx_ring.used--; | |
779 | etdev->tx_ring.send_head = tcb->next; | |
780 | if (tcb->next == NULL) | |
781 | etdev->tx_ring.send_tail = NULL; | |
cfb739b4 | 782 | |
37628606 | 783 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); |
b711b2e0 | 784 | et131x_free_send_packet(etdev, tcb); |
37628606 | 785 | spin_lock_irqsave(&etdev->TCBSendQLock, flags); |
cfb739b4 GKH |
786 | |
787 | /* Goto the next packet */ | |
c78732ad | 788 | tcb = etdev->tx_ring.send_head; |
cfb739b4 GKH |
789 | } |
790 | ||
791 | /* Wake up the queue when we hit a low-water mark */ | |
1458d82b | 792 | if (etdev->tx_ring.used <= NUM_TCB / 3) |
25ad00bb | 793 | netif_wake_queue(etdev->netdev); |
cfb739b4 | 794 | |
37628606 | 795 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); |
cfb739b4 GKH |
796 | } |
797 |