]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/defxx.c
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / drivers / net / defxx.c
1 /*
2 * File Name:
3 * defxx.c
4 *
5 * Copyright Information:
6 * Copyright Digital Equipment Corporation 1996.
7 *
8 * This software may be used and distributed according to the terms of
9 * the GNU General Public License, incorporated herein by reference.
10 *
11 * Abstract:
12 * A Linux device driver supporting the Digital Equipment Corporation
13 * FDDI EISA and PCI controller families. Supported adapters include:
14 *
15 * DEC FDDIcontroller/EISA (DEFEA)
16 * DEC FDDIcontroller/PCI (DEFPA)
17 *
18 * The original author:
19 * LVS Lawrence V. Stefani <lstefani@yahoo.com>
20 *
21 * Maintainers:
22 * macro Maciej W. Rozycki <macro@linux-mips.org>
23 *
24 * Credits:
25 * I'd like to thank Patricia Cross for helping me get started with
26 * Linux, David Davies for a lot of help upgrading and configuring
27 * my development system and for answering many OS and driver
28 * development questions, and Alan Cox for recommendations and
29 * integration help on getting FDDI support into Linux. LVS
30 *
31 * Driver Architecture:
32 * The driver architecture is largely based on previous driver work
33 * for other operating systems. The upper edge interface and
34 * functions were largely taken from existing Linux device drivers
35 * such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C
36 * driver.
37 *
38 * Adapter Probe -
39 * The driver scans for supported EISA adapters by reading the
40 * SLOT ID register for each EISA slot and making a match
41 * against the expected value.
42 *
43 * Bus-Specific Initialization -
44 * This driver currently supports both EISA and PCI controller
45 * families. While the custom DMA chip and FDDI logic is similar
46 * or identical, the bus logic is very different. After
47 * initialization, the only bus-specific differences is in how the
48 * driver enables and disables interrupts. Other than that, the
49 * run-time critical code behaves the same on both families.
50 * It's important to note that both adapter families are configured
51 * to I/O map, rather than memory map, the adapter registers.
52 *
53 * Driver Open/Close -
54 * In the driver open routine, the driver ISR (interrupt service
55 * routine) is registered and the adapter is brought to an
56 * operational state. In the driver close routine, the opposite
57 * occurs; the driver ISR is deregistered and the adapter is
58 * brought to a safe, but closed state. Users may use consecutive
59 * commands to bring the adapter up and down as in the following
60 * example:
61 * ifconfig fddi0 up
62 * ifconfig fddi0 down
63 * ifconfig fddi0 up
64 *
65 * Driver Shutdown -
66 * Apparently, there is no shutdown or halt routine support under
67 * Linux. This routine would be called during "reboot" or
68 * "shutdown" to allow the driver to place the adapter in a safe
69 * state before a warm reboot occurs. To be really safe, the user
70 * should close the adapter before shutdown (eg. ifconfig fddi0 down)
71 * to ensure that the adapter DMA engine is taken off-line. However,
72 * the current driver code anticipates this problem and always issues
73 * a soft reset of the adapter at the beginning of driver initialization.
74 * A future driver enhancement in this area may occur in 2.1.X where
75 * Alan indicated that a shutdown handler may be implemented.
76 *
77 * Interrupt Service Routine -
78 * The driver supports shared interrupts, so the ISR is registered for
79 * each board with the appropriate flag and the pointer to that board's
80 * device structure. This provides the context during interrupt
81 * processing to support shared interrupts and multiple boards.
82 *
83 * Interrupt enabling/disabling can occur at many levels. At the host
84 * end, you can disable system interrupts, or disable interrupts at the
85 * PIC (on Intel systems). Across the bus, both EISA and PCI adapters
86 * have a bus-logic chip interrupt enable/disable as well as a DMA
87 * controller interrupt enable/disable.
88 *
89 * The driver currently enables and disables adapter interrupts at the
90 * bus-logic chip and assumes that Linux will take care of clearing or
91 * acknowledging any host-based interrupt chips.
92 *
93 * Control Functions -
94 * Control functions are those used to support functions such as adding
95 * or deleting multicast addresses, enabling or disabling packet
96 * reception filters, or other custom/proprietary commands. Presently,
97 * the driver supports the "get statistics", "set multicast list", and
98 * "set mac address" functions defined by Linux. A list of possible
99 * enhancements include:
100 *
101 * - Custom ioctl interface for executing port interface commands
102 * - Custom ioctl interface for adding unicast addresses to
103 * adapter CAM (to support bridge functions).
104 * - Custom ioctl interface for supporting firmware upgrades.
105 *
106 * Hardware (port interface) Support Routines -
107 * The driver function names that start with "dfx_hw_" represent
108 * low-level port interface routines that are called frequently. They
109 * include issuing a DMA or port control command to the adapter,
110 * resetting the adapter, or reading the adapter state. Since the
111 * driver initialization and run-time code must make calls into the
112 * port interface, these routines were written to be as generic and
113 * usable as possible.
114 *
115 * Receive Path -
116 * The adapter DMA engine supports a 256 entry receive descriptor block
117 * of which up to 255 entries can be used at any given time. The
118 * architecture is a standard producer, consumer, completion model in
119 * which the driver "produces" receive buffers to the adapter, the
120 * adapter "consumes" the receive buffers by DMAing incoming packet data,
121 * and the driver "completes" the receive buffers by servicing the
122 * incoming packet, then "produces" a new buffer and starts the cycle
123 * again. Receive buffers can be fragmented in up to 16 fragments
124 * (descriptor entries). For simplicity, this driver posts
125 * single-fragment receive buffers of 4608 bytes, then allocates a
126 * sk_buff, copies the data, then reposts the buffer. To reduce CPU
127 * utilization, a better approach would be to pass up the receive
128 * buffer (no extra copy) then allocate and post a replacement buffer.
129 * This is a performance enhancement that should be looked into at
130 * some point.
131 *
132 * Transmit Path -
133 * Like the receive path, the adapter DMA engine supports a 256 entry
134 * transmit descriptor block of which up to 255 entries can be used at
135 * any given time. Transmit buffers can be fragmented in up to 255
136 * fragments (descriptor entries). This driver always posts one
137 * fragment per transmit packet request.
138 *
139 * The fragment contains the entire packet from FC to end of data.
140 * Before posting the buffer to the adapter, the driver sets a three-byte
141 * packet request header (PRH) which is required by the Motorola MAC chip
142 * used on the adapters. The PRH tells the MAC the type of token to
143 * receive/send, whether or not to generate and append the CRC, whether
144 * synchronous or asynchronous framing is used, etc. Since the PRH
145 * definition is not necessarily consistent across all FDDI chipsets,
146 * the driver, rather than the common FDDI packet handler routines,
147 * sets these bytes.
148 *
149 * To reduce the amount of descriptor fetches needed per transmit request,
150 * the driver takes advantage of the fact that there are at least three
151 * bytes available before the skb->data field on the outgoing transmit
152 * request. This is guaranteed by having fddi_setup() in net_init.c set
153 * dev->hard_header_len to 24 bytes. 21 bytes accounts for the largest
154 * header in an 802.2 SNAP frame. The other 3 bytes are the extra "pad"
155 * bytes which we'll use to store the PRH.
156 *
157 * There's a subtle advantage to adding these pad bytes to the
158 * hard_header_len, it ensures that the data portion of the packet for
159 * an 802.2 SNAP frame is longword aligned. Other FDDI driver
160 * implementations may not need the extra padding and can start copying
161 * or DMAing directly from the FC byte which starts at skb->data. Should
162 * another driver implementation need ADDITIONAL padding, the net_init.c
163 * module should be updated and dev->hard_header_len should be increased.
164 * NOTE: To maintain the alignment on the data portion of the packet,
165 * dev->hard_header_len should always be evenly divisible by 4 and at
166 * least 24 bytes in size.
167 *
168 * Modification History:
169 * Date Name Description
170 * 16-Aug-96 LVS Created.
171 * 20-Aug-96 LVS Updated dfx_probe so that version information
172 * string is only displayed if 1 or more cards are
173 * found. Changed dfx_rcv_queue_process to copy
174 * 3 NULL bytes before FC to ensure that data is
175 * longword aligned in receive buffer.
176 * 09-Sep-96 LVS Updated dfx_ctl_set_multicast_list to enable
177 * LLC group promiscuous mode if multicast list
178 * is too large. LLC individual/group promiscuous
179 * mode is now disabled if IFF_PROMISC flag not set.
180 * dfx_xmt_queue_pkt no longer checks for NULL skb
181 * on Alan Cox recommendation. Added node address
182 * override support.
183 * 12-Sep-96 LVS Reset current address to factory address during
184 * device open. Updated transmit path to post a
185 * single fragment which includes PRH->end of data.
186 * Mar 2000 AC Did various cleanups for 2.3.x
187 * Jun 2000 jgarzik PCI and resource alloc cleanups
188 * Jul 2000 tjeerd Much cleanup and some bug fixes
189 * Sep 2000 tjeerd Fix leak on unload, cosmetic code cleanup
190 * Feb 2001 Skb allocation fixes
191 * Feb 2001 davej PCI enable cleanups.
192 * 04 Aug 2003 macro Converted to the DMA API.
193 * 14 Aug 2004 macro Fix device names reported.
194 */
195
196 /* Include files */
197
198 #include <linux/module.h>
199 #include <linux/kernel.h>
200 #include <linux/string.h>
201 #include <linux/errno.h>
202 #include <linux/ioport.h>
203 #include <linux/slab.h>
204 #include <linux/interrupt.h>
205 #include <linux/pci.h>
206 #include <linux/delay.h>
207 #include <linux/init.h>
208 #include <linux/netdevice.h>
209 #include <linux/fddidevice.h>
210 #include <linux/skbuff.h>
211 #include <linux/bitops.h>
212
213 #include <asm/byteorder.h>
214 #include <asm/io.h>
215
216 #include "defxx.h"
217
218 /* Version information string should be updated prior to each new release! */
219 #define DRV_NAME "defxx"
220 #define DRV_VERSION "v1.07"
221 #define DRV_RELDATE "2004/08/14"
222
223 static char version[] __devinitdata =
224 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
225 " Lawrence V. Stefani and others\n";
226
227 #define DYNAMIC_BUFFERS 1
228
229 #define SKBUFF_RX_COPYBREAK 200
230 /*
231 * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte
232 * alignment for compatibility with old EISA boards.
233 */
234 #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
235
236 /* Define module-wide (static) routines */
237
238 static void dfx_bus_init(struct net_device *dev);
239 static void dfx_bus_config_check(DFX_board_t *bp);
240
241 static int dfx_driver_init(struct net_device *dev, const char *print_name);
242 static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
243
244 static int dfx_open(struct net_device *dev);
245 static int dfx_close(struct net_device *dev);
246
247 static void dfx_int_pr_halt_id(DFX_board_t *bp);
248 static void dfx_int_type_0_process(DFX_board_t *bp);
249 static void dfx_int_common(struct net_device *dev);
250 static void dfx_interrupt(int irq, void *dev_id, struct pt_regs *regs);
251
252 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
253 static void dfx_ctl_set_multicast_list(struct net_device *dev);
254 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
255 static int dfx_ctl_update_cam(DFX_board_t *bp);
256 static int dfx_ctl_update_filters(DFX_board_t *bp);
257
258 static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
259 static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
260 static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
261 static int dfx_hw_adap_state_rd(DFX_board_t *bp);
262 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
263
264 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
265 static void dfx_rcv_queue_process(DFX_board_t *bp);
266 static void dfx_rcv_flush(DFX_board_t *bp);
267
268 static int dfx_xmt_queue_pkt(struct sk_buff *skb, struct net_device *dev);
269 static int dfx_xmt_done(DFX_board_t *bp);
270 static void dfx_xmt_flush(DFX_board_t *bp);
271
272 /* Define module-wide (static) variables */
273
274 static struct net_device *root_dfx_eisa_dev;
275
276 \f
277 /*
278 * =======================
279 * = dfx_port_write_byte =
280 * = dfx_port_read_byte =
281 * = dfx_port_write_long =
282 * = dfx_port_read_long =
283 * =======================
284 *
285 * Overview:
286 * Routines for reading and writing values from/to adapter
287 *
288 * Returns:
289 * None
290 *
291 * Arguments:
292 * bp - pointer to board information
293 * offset - register offset from base I/O address
294 * data - for dfx_port_write_byte and dfx_port_write_long, this
295 * is a value to write.
296 * for dfx_port_read_byte and dfx_port_read_byte, this
297 * is a pointer to store the read value.
298 *
299 * Functional Description:
300 * These routines perform the correct operation to read or write
301 * the adapter register.
302 *
303 * EISA port block base addresses are based on the slot number in which the
304 * controller is installed. For example, if the EISA controller is installed
305 * in slot 4, the port block base address is 0x4000. If the controller is
306 * installed in slot 2, the port block base address is 0x2000, and so on.
307 * This port block can be used to access PDQ, ESIC, and DEFEA on-board
308 * registers using the register offsets defined in DEFXX.H.
309 *
310 * PCI port block base addresses are assigned by the PCI BIOS or system
311 * firmware. There is one 128 byte port block which can be accessed. It
312 * allows for I/O mapping of both PDQ and PFI registers using the register
313 * offsets defined in DEFXX.H.
314 *
315 * Return Codes:
316 * None
317 *
318 * Assumptions:
319 * bp->base_addr is a valid base I/O address for this adapter.
320 * offset is a valid register offset for this adapter.
321 *
322 * Side Effects:
323 * Rather than produce macros for these functions, these routines
324 * are defined using "inline" to ensure that the compiler will
325 * generate inline code and not waste a procedure call and return.
326 * This provides all the benefits of macros, but with the
327 * advantage of strict data type checking.
328 */
329
330 static inline void dfx_port_write_byte(
331 DFX_board_t *bp,
332 int offset,
333 u8 data
334 )
335
336 {
337 u16 port = bp->base_addr + offset;
338
339 outb(data, port);
340 }
341
342 static inline void dfx_port_read_byte(
343 DFX_board_t *bp,
344 int offset,
345 u8 *data
346 )
347
348 {
349 u16 port = bp->base_addr + offset;
350
351 *data = inb(port);
352 }
353
354 static inline void dfx_port_write_long(
355 DFX_board_t *bp,
356 int offset,
357 u32 data
358 )
359
360 {
361 u16 port = bp->base_addr + offset;
362
363 outl(data, port);
364 }
365
366 static inline void dfx_port_read_long(
367 DFX_board_t *bp,
368 int offset,
369 u32 *data
370 )
371
372 {
373 u16 port = bp->base_addr + offset;
374
375 *data = inl(port);
376 }
377
378 \f
379 /*
380 * =============
381 * = dfx_init_one_pci_or_eisa =
382 * =============
383 *
384 * Overview:
385 * Initializes a supported FDDI EISA or PCI controller
386 *
387 * Returns:
388 * Condition code
389 *
390 * Arguments:
391 * pdev - pointer to pci device information (NULL for EISA)
392 * ioaddr - pointer to port (NULL for PCI)
393 *
394 * Functional Description:
395 *
396 * Return Codes:
397 * 0 - This device (fddi0, fddi1, etc) configured successfully
398 * -EBUSY - Failed to get resources, or dfx_driver_init failed.
399 *
400 * Assumptions:
401 * It compiles so it should work :-( (PCI cards do :-)
402 *
403 * Side Effects:
404 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
405 * initialized and the board resources are read and stored in
406 * the device structure.
407 */
408 static int __devinit dfx_init_one_pci_or_eisa(struct pci_dev *pdev, long ioaddr)
409 {
410 static int version_disp;
411 char *print_name = DRV_NAME;
412 struct net_device *dev;
413 DFX_board_t *bp; /* board pointer */
414 int alloc_size; /* total buffer size used */
415 int err;
416
417 if (!version_disp) { /* display version info if adapter is found */
418 version_disp = 1; /* set display flag to TRUE so that */
419 printk(version); /* we only display this string ONCE */
420 }
421
422 if (pdev != NULL)
423 print_name = pci_name(pdev);
424
425 dev = alloc_fddidev(sizeof(*bp));
426 if (!dev) {
427 printk(KERN_ERR "%s: unable to allocate fddidev, aborting\n",
428 print_name);
429 return -ENOMEM;
430 }
431
432 /* Enable PCI device. */
433 if (pdev != NULL) {
434 err = pci_enable_device (pdev);
435 if (err) goto err_out;
436 ioaddr = pci_resource_start (pdev, 1);
437 }
438
439 SET_MODULE_OWNER(dev);
440 SET_NETDEV_DEV(dev, &pdev->dev);
441
442 bp = dev->priv;
443
444 if (!request_region(ioaddr,
445 pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN,
446 print_name)) {
447 printk(KERN_ERR "%s: Cannot reserve I/O resource "
448 "0x%x @ 0x%lx, aborting\n", print_name,
449 pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN, ioaddr);
450 err = -EBUSY;
451 goto err_out;
452 }
453
454 /* Initialize new device structure */
455
456 dev->base_addr = ioaddr; /* save port (I/O) base address */
457
458 dev->get_stats = dfx_ctl_get_stats;
459 dev->open = dfx_open;
460 dev->stop = dfx_close;
461 dev->hard_start_xmit = dfx_xmt_queue_pkt;
462 dev->set_multicast_list = dfx_ctl_set_multicast_list;
463 dev->set_mac_address = dfx_ctl_set_mac_address;
464
465 if (pdev == NULL) {
466 /* EISA board */
467 bp->bus_type = DFX_BUS_TYPE_EISA;
468 bp->next = root_dfx_eisa_dev;
469 root_dfx_eisa_dev = dev;
470 } else {
471 /* PCI board */
472 bp->bus_type = DFX_BUS_TYPE_PCI;
473 bp->pci_dev = pdev;
474 pci_set_drvdata (pdev, dev);
475 pci_set_master (pdev);
476 }
477
478 if (dfx_driver_init(dev, print_name) != DFX_K_SUCCESS) {
479 err = -ENODEV;
480 goto err_out_region;
481 }
482
483 err = register_netdev(dev);
484 if (err)
485 goto err_out_kfree;
486
487 printk("%s: registered as %s\n", print_name, dev->name);
488 return 0;
489
490 err_out_kfree:
491 alloc_size = sizeof(PI_DESCR_BLOCK) +
492 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
493 #ifndef DYNAMIC_BUFFERS
494 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
495 #endif
496 sizeof(PI_CONSUMER_BLOCK) +
497 (PI_ALIGN_K_DESC_BLK - 1);
498 if (bp->kmalloced)
499 pci_free_consistent(pdev, alloc_size,
500 bp->kmalloced, bp->kmalloced_dma);
501 err_out_region:
502 release_region(ioaddr, pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN);
503 err_out:
504 free_netdev(dev);
505 return err;
506 }
507
508 static int __devinit dfx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
509 {
510 return dfx_init_one_pci_or_eisa(pdev, 0);
511 }
512
513 static int __init dfx_eisa_init(void)
514 {
515 int rc = -ENODEV;
516 int i; /* used in for loops */
517 u16 port; /* temporary I/O (port) address */
518 u32 slot_id; /* EISA hardware (slot) ID read from adapter */
519
520 DBG_printk("In dfx_eisa_init...\n");
521
522 /* Scan for FDDI EISA controllers */
523
524 for (i=0; i < DFX_MAX_EISA_SLOTS; i++) /* only scan for up to 16 EISA slots */
525 {
526 port = (i << 12) + PI_ESIC_K_SLOT_ID; /* port = I/O address for reading slot ID */
527 slot_id = inl(port); /* read EISA HW (slot) ID */
528 if ((slot_id & 0xF0FFFFFF) == DEFEA_PRODUCT_ID)
529 {
530 port = (i << 12); /* recalc base addr */
531
532 if (dfx_init_one_pci_or_eisa(NULL, port) == 0) rc = 0;
533 }
534 }
535 return rc;
536 }
537 \f
538 /*
539 * ================
540 * = dfx_bus_init =
541 * ================
542 *
543 * Overview:
544 * Initializes EISA and PCI controller bus-specific logic.
545 *
546 * Returns:
547 * None
548 *
549 * Arguments:
550 * dev - pointer to device information
551 *
552 * Functional Description:
553 * Determine and save adapter IRQ in device table,
554 * then perform bus-specific logic initialization.
555 *
556 * Return Codes:
557 * None
558 *
559 * Assumptions:
560 * dev->base_addr has already been set with the proper
561 * base I/O address for this device.
562 *
563 * Side Effects:
564 * Interrupts are enabled at the adapter bus-specific logic.
565 * Note: Interrupts at the DMA engine (PDQ chip) are not
566 * enabled yet.
567 */
568
569 static void __devinit dfx_bus_init(struct net_device *dev)
570 {
571 DFX_board_t *bp = dev->priv;
572 u8 val; /* used for I/O read/writes */
573
574 DBG_printk("In dfx_bus_init...\n");
575
576 /*
577 * Initialize base I/O address field in bp structure
578 *
579 * Note: bp->base_addr is the same as dev->base_addr.
580 * It's useful because often we'll need to read
581 * or write registers where we already have the
582 * bp pointer instead of the dev pointer. Having
583 * the base address in the bp structure will
584 * save a pointer dereference.
585 *
586 * IMPORTANT!! This field must be defined before
587 * any of the dfx_port_* inline functions are
588 * called.
589 */
590
591 bp->base_addr = dev->base_addr;
592
593 /* And a pointer back to the net_device struct */
594 bp->dev = dev;
595
596 /* Initialize adapter based on bus type */
597
598 if (bp->bus_type == DFX_BUS_TYPE_EISA)
599 {
600 /* Get the interrupt level from the ESIC chip */
601
602 dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &val);
603 switch ((val & PI_CONFIG_STAT_0_M_IRQ) >> PI_CONFIG_STAT_0_V_IRQ)
604 {
605 case PI_CONFIG_STAT_0_IRQ_K_9:
606 dev->irq = 9;
607 break;
608
609 case PI_CONFIG_STAT_0_IRQ_K_10:
610 dev->irq = 10;
611 break;
612
613 case PI_CONFIG_STAT_0_IRQ_K_11:
614 dev->irq = 11;
615 break;
616
617 case PI_CONFIG_STAT_0_IRQ_K_15:
618 dev->irq = 15;
619 break;
620 }
621
622 /* Enable access to I/O on the board by writing 0x03 to Function Control Register */
623
624 dfx_port_write_byte(bp, PI_ESIC_K_FUNCTION_CNTRL, PI_ESIC_K_FUNCTION_CNTRL_IO_ENB);
625
626 /* Set the I/O decode range of the board */
627
628 val = ((dev->base_addr >> 12) << PI_IO_CMP_V_SLOT);
629 dfx_port_write_byte(bp, PI_ESIC_K_IO_CMP_0_1, val);
630 dfx_port_write_byte(bp, PI_ESIC_K_IO_CMP_1_1, val);
631
632 /* Enable access to rest of module (including PDQ and packet memory) */
633
634 dfx_port_write_byte(bp, PI_ESIC_K_SLOT_CNTRL, PI_SLOT_CNTRL_M_ENB);
635
636 /*
637 * Map PDQ registers into I/O space. This is done by clearing a bit
638 * in Burst Holdoff register.
639 */
640
641 dfx_port_read_byte(bp, PI_ESIC_K_BURST_HOLDOFF, &val);
642 dfx_port_write_byte(bp, PI_ESIC_K_BURST_HOLDOFF, (val & ~PI_BURST_HOLDOFF_M_MEM_MAP));
643
644 /* Enable interrupts at EISA bus interface chip (ESIC) */
645
646 dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &val);
647 dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, (val | PI_CONFIG_STAT_0_M_INT_ENB));
648 }
649 else
650 {
651 struct pci_dev *pdev = bp->pci_dev;
652
653 /* Get the interrupt level from the PCI Configuration Table */
654
655 dev->irq = pdev->irq;
656
657 /* Check Latency Timer and set if less than minimal */
658
659 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
660 if (val < PFI_K_LAT_TIMER_MIN) /* if less than min, override with default */
661 {
662 val = PFI_K_LAT_TIMER_DEF;
663 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
664 }
665
666 /* Enable interrupts at PCI bus interface chip (PFI) */
667
668 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, (PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB));
669 }
670 }
671
672 \f
673 /*
674 * ========================
675 * = dfx_bus_config_check =
676 * ========================
677 *
678 * Overview:
679 * Checks the configuration (burst size, full-duplex, etc.) If any parameters
680 * are illegal, then this routine will set new defaults.
681 *
682 * Returns:
683 * None
684 *
685 * Arguments:
686 * bp - pointer to board information
687 *
688 * Functional Description:
689 * For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later
690 * PDQ, and all FDDI PCI controllers, all values are legal.
691 *
692 * Return Codes:
693 * None
694 *
695 * Assumptions:
696 * dfx_adap_init has NOT been called yet so burst size and other items have
697 * not been set.
698 *
699 * Side Effects:
700 * None
701 */
702
703 static void __devinit dfx_bus_config_check(DFX_board_t *bp)
704 {
705 int status; /* return code from adapter port control call */
706 u32 slot_id; /* EISA-bus hardware id (DEC3001, DEC3002,...) */
707 u32 host_data; /* LW data returned from port control call */
708
709 DBG_printk("In dfx_bus_config_check...\n");
710
711 /* Configuration check only valid for EISA adapter */
712
713 if (bp->bus_type == DFX_BUS_TYPE_EISA)
714 {
715 dfx_port_read_long(bp, PI_ESIC_K_SLOT_ID, &slot_id);
716
717 /*
718 * First check if revision 2 EISA controller. Rev. 1 cards used
719 * PDQ revision B, so no workaround needed in this case. Rev. 3
720 * cards used PDQ revision E, so no workaround needed in this
721 * case, either. Only Rev. 2 cards used either Rev. D or E
722 * chips, so we must verify the chip revision on Rev. 2 cards.
723 */
724
725 if (slot_id == DEFEA_PROD_ID_2)
726 {
727 /*
728 * Revision 2 FDDI EISA controller found, so let's check PDQ
729 * revision of adapter.
730 */
731
732 status = dfx_hw_port_ctrl_req(bp,
733 PI_PCTRL_M_SUB_CMD,
734 PI_SUB_CMD_K_PDQ_REV_GET,
735 0,
736 &host_data);
737 if ((status != DFX_K_SUCCESS) || (host_data == 2))
738 {
739 /*
740 * Either we couldn't determine the PDQ revision, or
741 * we determined that it is at revision D. In either case,
742 * we need to implement the workaround.
743 */
744
745 /* Ensure that the burst size is set to 8 longwords or less */
746
747 switch (bp->burst_size)
748 {
749 case PI_PDATA_B_DMA_BURST_SIZE_32:
750 case PI_PDATA_B_DMA_BURST_SIZE_16:
751 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
752 break;
753
754 default:
755 break;
756 }
757
758 /* Ensure that full-duplex mode is not enabled */
759
760 bp->full_duplex_enb = PI_SNMP_K_FALSE;
761 }
762 }
763 }
764 }
765
766 \f
767 /*
768 * ===================
769 * = dfx_driver_init =
770 * ===================
771 *
772 * Overview:
773 * Initializes remaining adapter board structure information
774 * and makes sure adapter is in a safe state prior to dfx_open().
775 *
776 * Returns:
777 * Condition code
778 *
779 * Arguments:
780 * dev - pointer to device information
781 * print_name - printable device name
782 *
783 * Functional Description:
784 * This function allocates additional resources such as the host memory
785 * blocks needed by the adapter (eg. descriptor and consumer blocks).
786 * Remaining bus initialization steps are also completed. The adapter
787 * is also reset so that it is in the DMA_UNAVAILABLE state. The OS
788 * must call dfx_open() to open the adapter and bring it on-line.
789 *
790 * Return Codes:
791 * DFX_K_SUCCESS - initialization succeeded
792 * DFX_K_FAILURE - initialization failed - could not allocate memory
793 * or read adapter MAC address
794 *
795 * Assumptions:
796 * Memory allocated from pci_alloc_consistent() call is physically
797 * contiguous, locked memory.
798 *
799 * Side Effects:
800 * Adapter is reset and should be in DMA_UNAVAILABLE state before
801 * returning from this routine.
802 */
803
804 static int __devinit dfx_driver_init(struct net_device *dev,
805 const char *print_name)
806 {
807 DFX_board_t *bp = dev->priv;
808 int alloc_size; /* total buffer size needed */
809 char *top_v, *curr_v; /* virtual addrs into memory block */
810 dma_addr_t top_p, curr_p; /* physical addrs into memory block */
811 u32 data; /* host data register value */
812
813 DBG_printk("In dfx_driver_init...\n");
814
815 /* Initialize bus-specific hardware registers */
816
817 dfx_bus_init(dev);
818
819 /*
820 * Initialize default values for configurable parameters
821 *
822 * Note: All of these parameters are ones that a user may
823 * want to customize. It'd be nice to break these
824 * out into Space.c or someplace else that's more
825 * accessible/understandable than this file.
826 */
827
828 bp->full_duplex_enb = PI_SNMP_K_FALSE;
829 bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */
830 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
831 bp->rcv_bufs_to_post = RCV_BUFS_DEF;
832
833 /*
834 * Ensure that HW configuration is OK
835 *
836 * Note: Depending on the hardware revision, we may need to modify
837 * some of the configurable parameters to workaround hardware
838 * limitations. We'll perform this configuration check AFTER
839 * setting the parameters to their default values.
840 */
841
842 dfx_bus_config_check(bp);
843
844 /* Disable PDQ interrupts first */
845
846 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
847
848 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
849
850 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
851
852 /* Read the factory MAC address from the adapter then save it */
853
854 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
855 &data) != DFX_K_SUCCESS) {
856 printk("%s: Could not read adapter factory MAC address!\n",
857 print_name);
858 return(DFX_K_FAILURE);
859 }
860 memcpy(&bp->factory_mac_addr[0], &data, sizeof(u32));
861
862 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
863 &data) != DFX_K_SUCCESS) {
864 printk("%s: Could not read adapter factory MAC address!\n",
865 print_name);
866 return(DFX_K_FAILURE);
867 }
868 memcpy(&bp->factory_mac_addr[4], &data, sizeof(u16));
869
870 /*
871 * Set current address to factory address
872 *
873 * Note: Node address override support is handled through
874 * dfx_ctl_set_mac_address.
875 */
876
877 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
878 if (bp->bus_type == DFX_BUS_TYPE_EISA)
879 printk("%s: DEFEA at I/O addr = 0x%lX, IRQ = %d, "
880 "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
881 print_name, dev->base_addr, dev->irq,
882 dev->dev_addr[0], dev->dev_addr[1],
883 dev->dev_addr[2], dev->dev_addr[3],
884 dev->dev_addr[4], dev->dev_addr[5]);
885 else
886 printk("%s: DEFPA at I/O addr = 0x%lX, IRQ = %d, "
887 "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
888 print_name, dev->base_addr, dev->irq,
889 dev->dev_addr[0], dev->dev_addr[1],
890 dev->dev_addr[2], dev->dev_addr[3],
891 dev->dev_addr[4], dev->dev_addr[5]);
892
893 /*
894 * Get memory for descriptor block, consumer block, and other buffers
895 * that need to be DMA read or written to by the adapter.
896 */
897
898 alloc_size = sizeof(PI_DESCR_BLOCK) +
899 PI_CMD_REQ_K_SIZE_MAX +
900 PI_CMD_RSP_K_SIZE_MAX +
901 #ifndef DYNAMIC_BUFFERS
902 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
903 #endif
904 sizeof(PI_CONSUMER_BLOCK) +
905 (PI_ALIGN_K_DESC_BLK - 1);
906 bp->kmalloced = top_v = pci_alloc_consistent(bp->pci_dev, alloc_size,
907 &bp->kmalloced_dma);
908 if (top_v == NULL) {
909 printk("%s: Could not allocate memory for host buffers "
910 "and structures!\n", print_name);
911 return(DFX_K_FAILURE);
912 }
913 memset(top_v, 0, alloc_size); /* zero out memory before continuing */
914 top_p = bp->kmalloced_dma; /* get physical address of buffer */
915
916 /*
917 * To guarantee the 8K alignment required for the descriptor block, 8K - 1
918 * plus the amount of memory needed was allocated. The physical address
919 * is now 8K aligned. By carving up the memory in a specific order,
920 * we'll guarantee the alignment requirements for all other structures.
921 *
922 * Note: If the assumptions change regarding the non-paged, non-cached,
923 * physically contiguous nature of the memory block or the address
924 * alignments, then we'll need to implement a different algorithm
925 * for allocating the needed memory.
926 */
927
928 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
929 curr_v = top_v + (curr_p - top_p);
930
931 /* Reserve space for descriptor block */
932
933 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
934 bp->descr_block_phys = curr_p;
935 curr_v += sizeof(PI_DESCR_BLOCK);
936 curr_p += sizeof(PI_DESCR_BLOCK);
937
938 /* Reserve space for command request buffer */
939
940 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
941 bp->cmd_req_phys = curr_p;
942 curr_v += PI_CMD_REQ_K_SIZE_MAX;
943 curr_p += PI_CMD_REQ_K_SIZE_MAX;
944
945 /* Reserve space for command response buffer */
946
947 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
948 bp->cmd_rsp_phys = curr_p;
949 curr_v += PI_CMD_RSP_K_SIZE_MAX;
950 curr_p += PI_CMD_RSP_K_SIZE_MAX;
951
952 /* Reserve space for the LLC host receive queue buffers */
953
954 bp->rcv_block_virt = curr_v;
955 bp->rcv_block_phys = curr_p;
956
957 #ifndef DYNAMIC_BUFFERS
958 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
959 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
960 #endif
961
962 /* Reserve space for the consumer block */
963
964 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
965 bp->cons_block_phys = curr_p;
966
967 /* Display virtual and physical addresses if debug driver */
968
969 DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n",
970 print_name,
971 (long)bp->descr_block_virt, bp->descr_block_phys);
972 DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n",
973 print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys);
974 DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n",
975 print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys);
976 DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n",
977 print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys);
978 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
979 print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
980
981 return(DFX_K_SUCCESS);
982 }
983
984 \f
985 /*
986 * =================
987 * = dfx_adap_init =
988 * =================
989 *
990 * Overview:
991 * Brings the adapter to the link avail/link unavailable state.
992 *
993 * Returns:
994 * Condition code
995 *
996 * Arguments:
997 * bp - pointer to board information
998 * get_buffers - non-zero if buffers to be allocated
999 *
1000 * Functional Description:
1001 * Issues the low-level firmware/hardware calls necessary to bring
1002 * the adapter up, or to properly reset and restore adapter during
1003 * run-time.
1004 *
1005 * Return Codes:
1006 * DFX_K_SUCCESS - Adapter brought up successfully
1007 * DFX_K_FAILURE - Adapter initialization failed
1008 *
1009 * Assumptions:
1010 * bp->reset_type should be set to a valid reset type value before
1011 * calling this routine.
1012 *
1013 * Side Effects:
1014 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1015 * upon a successful return of this routine.
1016 */
1017
1018 static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1019 {
1020 DBG_printk("In dfx_adap_init...\n");
1021
1022 /* Disable PDQ interrupts first */
1023
1024 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1025
1026 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1027
1028 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1029 {
1030 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1031 return(DFX_K_FAILURE);
1032 }
1033
1034 /*
1035 * When the PDQ is reset, some false Type 0 interrupts may be pending,
1036 * so we'll acknowledge all Type 0 interrupts now before continuing.
1037 */
1038
1039 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1040
1041 /*
1042 * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state
1043 *
1044 * Note: We only need to clear host copies of these registers. The PDQ reset
1045 * takes care of the on-board register values.
1046 */
1047
1048 bp->cmd_req_reg.lword = 0;
1049 bp->cmd_rsp_reg.lword = 0;
1050 bp->rcv_xmt_reg.lword = 0;
1051
1052 /* Clear consumer block before going to DMA_AVAILABLE state */
1053
1054 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1055
1056 /* Initialize the DMA Burst Size */
1057
1058 if (dfx_hw_port_ctrl_req(bp,
1059 PI_PCTRL_M_SUB_CMD,
1060 PI_SUB_CMD_K_BURST_SIZE_SET,
1061 bp->burst_size,
1062 NULL) != DFX_K_SUCCESS)
1063 {
1064 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1065 return(DFX_K_FAILURE);
1066 }
1067
1068 /*
1069 * Set base address of Consumer Block
1070 *
1071 * Assumption: 32-bit physical address of consumer block is 64 byte
1072 * aligned. That is, bits 0-5 of the address must be zero.
1073 */
1074
1075 if (dfx_hw_port_ctrl_req(bp,
1076 PI_PCTRL_M_CONS_BLOCK,
1077 bp->cons_block_phys,
1078 0,
1079 NULL) != DFX_K_SUCCESS)
1080 {
1081 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1082 return(DFX_K_FAILURE);
1083 }
1084
1085 /*
1086 * Set base address of Descriptor Block and bring adapter to DMA_AVAILABLE state
1087 *
1088 * Note: We also set the literal and data swapping requirements in this
1089 * command. Since this driver presently runs on Intel platforms
1090 * which are Little Endian, we'll tell the adapter to byte swap
1091 * data only. This code will need to change when we support
1092 * Big Endian systems (eg. PowerPC).
1093 *
1094 * Assumption: 32-bit physical address of descriptor block is 8Kbyte
1095 * aligned. That is, bits 0-12 of the address must be zero.
1096 */
1097
1098 if (dfx_hw_port_ctrl_req(bp,
1099 PI_PCTRL_M_INIT,
1100 (u32) (bp->descr_block_phys | PI_PDATA_A_INIT_M_BSWAP_DATA),
1101 0,
1102 NULL) != DFX_K_SUCCESS)
1103 {
1104 printk("%s: Could not set descriptor block address!\n", bp->dev->name);
1105 return(DFX_K_FAILURE);
1106 }
1107
1108 /* Set transmit flush timeout value */
1109
1110 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1111 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
1112 bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */
1113 bp->cmd_req_virt->char_set.item[0].item_index = 0;
1114 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
1115 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1116 {
1117 printk("%s: DMA command request failed!\n", bp->dev->name);
1118 return(DFX_K_FAILURE);
1119 }
1120
1121 /* Set the initial values for eFDXEnable and MACTReq MIB objects */
1122
1123 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1124 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
1125 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
1126 bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
1127 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
1128 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
1129 bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
1130 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
1131 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1132 {
1133 printk("%s: DMA command request failed!\n", bp->dev->name);
1134 return(DFX_K_FAILURE);
1135 }
1136
1137 /* Initialize adapter CAM */
1138
1139 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1140 {
1141 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1142 return(DFX_K_FAILURE);
1143 }
1144
1145 /* Initialize adapter filters */
1146
1147 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1148 {
1149 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1150 return(DFX_K_FAILURE);
1151 }
1152
1153 /*
1154 * Remove any existing dynamic buffers (i.e. if the adapter is being
1155 * reinitialized)
1156 */
1157
1158 if (get_buffers)
1159 dfx_rcv_flush(bp);
1160
1161 /* Initialize receive descriptor block and produce buffers */
1162
1163 if (dfx_rcv_init(bp, get_buffers))
1164 {
1165 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1166 if (get_buffers)
1167 dfx_rcv_flush(bp);
1168 return(DFX_K_FAILURE);
1169 }
1170
1171 /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
1172
1173 bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1174 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1175 {
1176 printk("%s: Start command failed\n", bp->dev->name);
1177 if (get_buffers)
1178 dfx_rcv_flush(bp);
1179 return(DFX_K_FAILURE);
1180 }
1181
1182 /* Initialization succeeded, reenable PDQ interrupts */
1183
1184 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1185 return(DFX_K_SUCCESS);
1186 }
1187
1188 \f
1189 /*
1190 * ============
1191 * = dfx_open =
1192 * ============
1193 *
1194 * Overview:
1195 * Opens the adapter
1196 *
1197 * Returns:
1198 * Condition code
1199 *
1200 * Arguments:
1201 * dev - pointer to device information
1202 *
1203 * Functional Description:
1204 * This function brings the adapter to an operational state.
1205 *
1206 * Return Codes:
1207 * 0 - Adapter was successfully opened
1208 * -EAGAIN - Could not register IRQ or adapter initialization failed
1209 *
1210 * Assumptions:
1211 * This routine should only be called for a device that was
1212 * initialized successfully.
1213 *
1214 * Side Effects:
1215 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1216 * if the open is successful.
1217 */
1218
1219 static int dfx_open(struct net_device *dev)
1220 {
1221 int ret;
1222 DFX_board_t *bp = dev->priv;
1223
1224 DBG_printk("In dfx_open...\n");
1225
1226 /* Register IRQ - support shared interrupts by passing device ptr */
1227
1228 ret = request_irq(dev->irq, (void *)dfx_interrupt, SA_SHIRQ, dev->name, dev);
1229 if (ret) {
1230 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1231 return ret;
1232 }
1233
1234 /*
1235 * Set current address to factory MAC address
1236 *
1237 * Note: We've already done this step in dfx_driver_init.
1238 * However, it's possible that a user has set a node
1239 * address override, then closed and reopened the
1240 * adapter. Unless we reset the device address field
1241 * now, we'll continue to use the existing modified
1242 * address.
1243 */
1244
1245 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1246
1247 /* Clear local unicast/multicast address tables and counts */
1248
1249 memset(bp->uc_table, 0, sizeof(bp->uc_table));
1250 memset(bp->mc_table, 0, sizeof(bp->mc_table));
1251 bp->uc_count = 0;
1252 bp->mc_count = 0;
1253
1254 /* Disable promiscuous filter settings */
1255
1256 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
1257 bp->group_prom = PI_FSTATE_K_BLOCK;
1258
1259 spin_lock_init(&bp->lock);
1260
1261 /* Reset and initialize adapter */
1262
1263 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */
1264 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1265 {
1266 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1267 free_irq(dev->irq, dev);
1268 return -EAGAIN;
1269 }
1270
1271 /* Set device structure info */
1272 netif_start_queue(dev);
1273 return(0);
1274 }
1275
1276 \f
1277 /*
1278 * =============
1279 * = dfx_close =
1280 * =============
1281 *
1282 * Overview:
1283 * Closes the device/module.
1284 *
1285 * Returns:
1286 * Condition code
1287 *
1288 * Arguments:
1289 * dev - pointer to device information
1290 *
1291 * Functional Description:
1292 * This routine closes the adapter and brings it to a safe state.
1293 * The interrupt service routine is deregistered with the OS.
1294 * The adapter can be opened again with another call to dfx_open().
1295 *
1296 * Return Codes:
1297 * Always return 0.
1298 *
1299 * Assumptions:
1300 * No further requests for this adapter are made after this routine is
1301 * called. dfx_open() can be called to reset and reinitialize the
1302 * adapter.
1303 *
1304 * Side Effects:
1305 * Adapter should be in DMA_UNAVAILABLE state upon completion of this
1306 * routine.
1307 */
1308
1309 static int dfx_close(struct net_device *dev)
1310 {
1311 DFX_board_t *bp = dev->priv;
1312
1313 DBG_printk("In dfx_close...\n");
1314
1315 /* Disable PDQ interrupts first */
1316
1317 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1318
1319 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1320
1321 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1322
1323 /*
1324 * Flush any pending transmit buffers
1325 *
1326 * Note: It's important that we flush the transmit buffers
1327 * BEFORE we clear our copy of the Type 2 register.
1328 * Otherwise, we'll have no idea how many buffers
1329 * we need to free.
1330 */
1331
1332 dfx_xmt_flush(bp);
1333
1334 /*
1335 * Clear Type 1 and Type 2 registers after adapter reset
1336 *
1337 * Note: Even though we're closing the adapter, it's
1338 * possible that an interrupt will occur after
1339 * dfx_close is called. Without some assurance to
1340 * the contrary we want to make sure that we don't
1341 * process receive and transmit LLC frames and update
1342 * the Type 2 register with bad information.
1343 */
1344
1345 bp->cmd_req_reg.lword = 0;
1346 bp->cmd_rsp_reg.lword = 0;
1347 bp->rcv_xmt_reg.lword = 0;
1348
1349 /* Clear consumer block for the same reason given above */
1350
1351 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1352
1353 /* Release all dynamically allocate skb in the receive ring. */
1354
1355 dfx_rcv_flush(bp);
1356
1357 /* Clear device structure flags */
1358
1359 netif_stop_queue(dev);
1360
1361 /* Deregister (free) IRQ */
1362
1363 free_irq(dev->irq, dev);
1364
1365 return(0);
1366 }
1367
1368 \f
1369 /*
1370 * ======================
1371 * = dfx_int_pr_halt_id =
1372 * ======================
1373 *
1374 * Overview:
1375 * Displays halt id's in string form.
1376 *
1377 * Returns:
1378 * None
1379 *
1380 * Arguments:
1381 * bp - pointer to board information
1382 *
1383 * Functional Description:
1384 * Determine current halt id and display appropriate string.
1385 *
1386 * Return Codes:
1387 * None
1388 *
1389 * Assumptions:
1390 * None
1391 *
1392 * Side Effects:
1393 * None
1394 */
1395
1396 static void dfx_int_pr_halt_id(DFX_board_t *bp)
1397 {
1398 PI_UINT32 port_status; /* PDQ port status register value */
1399 PI_UINT32 halt_id; /* PDQ port status halt ID */
1400
1401 /* Read the latest port status */
1402
1403 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1404
1405 /* Display halt state transition information */
1406
1407 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1408 switch (halt_id)
1409 {
1410 case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1411 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1412 break;
1413
1414 case PI_HALT_ID_K_PARITY_ERROR:
1415 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1416 break;
1417
1418 case PI_HALT_ID_K_HOST_DIR_HALT:
1419 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1420 break;
1421
1422 case PI_HALT_ID_K_SW_FAULT:
1423 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1424 break;
1425
1426 case PI_HALT_ID_K_HW_FAULT:
1427 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1428 break;
1429
1430 case PI_HALT_ID_K_PC_TRACE:
1431 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1432 break;
1433
1434 case PI_HALT_ID_K_DMA_ERROR:
1435 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1436 break;
1437
1438 case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1439 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1440 break;
1441
1442 case PI_HALT_ID_K_BUS_EXCEPTION:
1443 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1444 break;
1445
1446 default:
1447 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1448 break;
1449 }
1450 }
1451
1452 \f
1453 /*
1454 * ==========================
1455 * = dfx_int_type_0_process =
1456 * ==========================
1457 *
1458 * Overview:
1459 * Processes Type 0 interrupts.
1460 *
1461 * Returns:
1462 * None
1463 *
1464 * Arguments:
1465 * bp - pointer to board information
1466 *
1467 * Functional Description:
1468 * Processes all enabled Type 0 interrupts. If the reason for the interrupt
1469 * is a serious fault on the adapter, then an error message is displayed
1470 * and the adapter is reset.
1471 *
1472 * One tricky potential timing window is the rapid succession of "link avail"
1473 * "link unavail" state change interrupts. The acknowledgement of the Type 0
1474 * interrupt must be done before reading the state from the Port Status
1475 * register. This is true because a state change could occur after reading
1476 * the data, but before acknowledging the interrupt. If this state change
1477 * does happen, it would be lost because the driver is using the old state,
1478 * and it will never know about the new state because it subsequently
1479 * acknowledges the state change interrupt.
1480 *
1481 * INCORRECT CORRECT
1482 * read type 0 int reasons read type 0 int reasons
1483 * read adapter state ack type 0 interrupts
1484 * ack type 0 interrupts read adapter state
1485 * ... process interrupt ... ... process interrupt ...
1486 *
1487 * Return Codes:
1488 * None
1489 *
1490 * Assumptions:
1491 * None
1492 *
1493 * Side Effects:
1494 * An adapter reset may occur if the adapter has any Type 0 error interrupts
1495 * or if the port status indicates that the adapter is halted. The driver
1496 * is responsible for reinitializing the adapter with the current CAM
1497 * contents and adapter filter settings.
1498 */
1499
1500 static void dfx_int_type_0_process(DFX_board_t *bp)
1501
1502 {
1503 PI_UINT32 type_0_status; /* Host Interrupt Type 0 register */
1504 PI_UINT32 state; /* current adap state (from port status) */
1505
1506 /*
1507 * Read host interrupt Type 0 register to determine which Type 0
1508 * interrupts are pending. Immediately write it back out to clear
1509 * those interrupts.
1510 */
1511
1512 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1513 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1514
1515 /* Check for Type 0 error interrupts */
1516
1517 if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1518 PI_TYPE_0_STAT_M_PM_PAR_ERR |
1519 PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1520 {
1521 /* Check for Non-Existent Memory error */
1522
1523 if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1524 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1525
1526 /* Check for Packet Memory Parity error */
1527
1528 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1529 printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1530
1531 /* Check for Host Bus Parity error */
1532
1533 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1534 printk("%s: Host Bus Parity Error\n", bp->dev->name);
1535
1536 /* Reset adapter and bring it back on-line */
1537
1538 bp->link_available = PI_K_FALSE; /* link is no longer available */
1539 bp->reset_type = 0; /* rerun on-board diagnostics */
1540 printk("%s: Resetting adapter...\n", bp->dev->name);
1541 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1542 {
1543 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1544 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1545 return;
1546 }
1547 printk("%s: Adapter reset successful!\n", bp->dev->name);
1548 return;
1549 }
1550
1551 /* Check for transmit flush interrupt */
1552
1553 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1554 {
1555 /* Flush any pending xmt's and acknowledge the flush interrupt */
1556
1557 bp->link_available = PI_K_FALSE; /* link is no longer available */
1558 dfx_xmt_flush(bp); /* flush any outstanding packets */
1559 (void) dfx_hw_port_ctrl_req(bp,
1560 PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1561 0,
1562 0,
1563 NULL);
1564 }
1565
1566 /* Check for adapter state change */
1567
1568 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1569 {
1570 /* Get latest adapter state */
1571
1572 state = dfx_hw_adap_state_rd(bp); /* get adapter state */
1573 if (state == PI_STATE_K_HALTED)
1574 {
1575 /*
1576 * Adapter has transitioned to HALTED state, try to reset
1577 * adapter to bring it back on-line. If reset fails,
1578 * leave the adapter in the broken state.
1579 */
1580
1581 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1582 dfx_int_pr_halt_id(bp); /* display halt id as string */
1583
1584 /* Reset adapter and bring it back on-line */
1585
1586 bp->link_available = PI_K_FALSE; /* link is no longer available */
1587 bp->reset_type = 0; /* rerun on-board diagnostics */
1588 printk("%s: Resetting adapter...\n", bp->dev->name);
1589 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1590 {
1591 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1592 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1593 return;
1594 }
1595 printk("%s: Adapter reset successful!\n", bp->dev->name);
1596 }
1597 else if (state == PI_STATE_K_LINK_AVAIL)
1598 {
1599 bp->link_available = PI_K_TRUE; /* set link available flag */
1600 }
1601 }
1602 }
1603
1604 \f
1605 /*
1606 * ==================
1607 * = dfx_int_common =
1608 * ==================
1609 *
1610 * Overview:
1611 * Interrupt service routine (ISR)
1612 *
1613 * Returns:
1614 * None
1615 *
1616 * Arguments:
1617 * bp - pointer to board information
1618 *
1619 * Functional Description:
1620 * This is the ISR which processes incoming adapter interrupts.
1621 *
1622 * Return Codes:
1623 * None
1624 *
1625 * Assumptions:
1626 * This routine assumes PDQ interrupts have not been disabled.
1627 * When interrupts are disabled at the PDQ, the Port Status register
1628 * is automatically cleared. This routine uses the Port Status
1629 * register value to determine whether a Type 0 interrupt occurred,
1630 * so it's important that adapter interrupts are not normally
1631 * enabled/disabled at the PDQ.
1632 *
1633 * It's vital that this routine is NOT reentered for the
1634 * same board and that the OS is not in another section of
1635 * code (eg. dfx_xmt_queue_pkt) for the same board on a
1636 * different thread.
1637 *
1638 * Side Effects:
1639 * Pending interrupts are serviced. Depending on the type of
1640 * interrupt, acknowledging and clearing the interrupt at the
1641 * PDQ involves writing a register to clear the interrupt bit
1642 * or updating completion indices.
1643 */
1644
1645 static void dfx_int_common(struct net_device *dev)
1646 {
1647 DFX_board_t *bp = dev->priv;
1648 PI_UINT32 port_status; /* Port Status register */
1649
1650 /* Process xmt interrupts - frequent case, so always call this routine */
1651
1652 if(dfx_xmt_done(bp)) /* free consumed xmt packets */
1653 netif_wake_queue(dev);
1654
1655 /* Process rcv interrupts - frequent case, so always call this routine */
1656
1657 dfx_rcv_queue_process(bp); /* service received LLC frames */
1658
1659 /*
1660 * Transmit and receive producer and completion indices are updated on the
1661 * adapter by writing to the Type 2 Producer register. Since the frequent
1662 * case is that we'll be processing either LLC transmit or receive buffers,
1663 * we'll optimize I/O writes by doing a single register write here.
1664 */
1665
1666 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1667
1668 /* Read PDQ Port Status register to find out which interrupts need processing */
1669
1670 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1671
1672 /* Process Type 0 interrupts (if any) - infrequent, so only call when needed */
1673
1674 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1675 dfx_int_type_0_process(bp); /* process Type 0 interrupts */
1676 }
1677
1678 \f
1679 /*
1680 * =================
1681 * = dfx_interrupt =
1682 * =================
1683 *
1684 * Overview:
1685 * Interrupt processing routine
1686 *
1687 * Returns:
1688 * None
1689 *
1690 * Arguments:
1691 * irq - interrupt vector
1692 * dev_id - pointer to device information
1693 * regs - pointer to registers structure
1694 *
1695 * Functional Description:
1696 * This routine calls the interrupt processing routine for this adapter. It
1697 * disables and reenables adapter interrupts, as appropriate. We can support
1698 * shared interrupts since the incoming dev_id pointer provides our device
1699 * structure context.
1700 *
1701 * Return Codes:
1702 * None
1703 *
1704 * Assumptions:
1705 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
1706 * on Intel-based systems) is done by the operating system outside this
1707 * routine.
1708 *
1709 * System interrupts are enabled through this call.
1710 *
1711 * Side Effects:
1712 * Interrupts are disabled, then reenabled at the adapter.
1713 */
1714
1715 static void dfx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1716 {
1717 struct net_device *dev = dev_id;
1718 DFX_board_t *bp; /* private board structure pointer */
1719 u8 tmp; /* used for disabling/enabling ints */
1720
1721 /* Get board pointer only if device structure is valid */
1722
1723 bp = dev->priv;
1724
1725 spin_lock(&bp->lock);
1726
1727 /* See if we're already servicing an interrupt */
1728
1729 /* Service adapter interrupts */
1730
1731 if (bp->bus_type == DFX_BUS_TYPE_PCI)
1732 {
1733 /* Disable PDQ-PFI interrupts at PFI */
1734
1735 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, PFI_MODE_M_DMA_ENB);
1736
1737 /* Call interrupt service routine for this adapter */
1738
1739 dfx_int_common(dev);
1740
1741 /* Clear PDQ interrupt status bit and reenable interrupts */
1742
1743 dfx_port_write_long(bp, PFI_K_REG_STATUS, PFI_STATUS_M_PDQ_INT);
1744 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1745 (PFI_MODE_M_PDQ_INT_ENB + PFI_MODE_M_DMA_ENB));
1746 }
1747 else
1748 {
1749 /* Disable interrupts at the ESIC */
1750
1751 dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &tmp);
1752 tmp &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1753 dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, tmp);
1754
1755 /* Call interrupt service routine for this adapter */
1756
1757 dfx_int_common(dev);
1758
1759 /* Reenable interrupts at the ESIC */
1760
1761 dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &tmp);
1762 tmp |= PI_CONFIG_STAT_0_M_INT_ENB;
1763 dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, tmp);
1764 }
1765
1766 spin_unlock(&bp->lock);
1767 }
1768
1769 \f
1770 /*
1771 * =====================
1772 * = dfx_ctl_get_stats =
1773 * =====================
1774 *
1775 * Overview:
1776 * Get statistics for FDDI adapter
1777 *
1778 * Returns:
1779 * Pointer to FDDI statistics structure
1780 *
1781 * Arguments:
1782 * dev - pointer to device information
1783 *
1784 * Functional Description:
1785 * Gets current MIB objects from adapter, then
1786 * returns FDDI statistics structure as defined
1787 * in if_fddi.h.
1788 *
1789 * Note: Since the FDDI statistics structure is
1790 * still new and the device structure doesn't
1791 * have an FDDI-specific get statistics handler,
1792 * we'll return the FDDI statistics structure as
1793 * a pointer to an Ethernet statistics structure.
1794 * That way, at least the first part of the statistics
1795 * structure can be decoded properly, and it allows
1796 * "smart" applications to perform a second cast to
1797 * decode the FDDI-specific statistics.
1798 *
1799 * We'll have to pay attention to this routine as the
1800 * device structure becomes more mature and LAN media
1801 * independent.
1802 *
1803 * Return Codes:
1804 * None
1805 *
1806 * Assumptions:
1807 * None
1808 *
1809 * Side Effects:
1810 * None
1811 */
1812
1813 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
1814 {
1815 DFX_board_t *bp = dev->priv;
1816
1817 /* Fill the bp->stats structure with driver-maintained counters */
1818
1819 bp->stats.gen.rx_packets = bp->rcv_total_frames;
1820 bp->stats.gen.tx_packets = bp->xmt_total_frames;
1821 bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
1822 bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
1823 bp->stats.gen.rx_errors = bp->rcv_crc_errors +
1824 bp->rcv_frame_status_errors +
1825 bp->rcv_length_errors;
1826 bp->stats.gen.tx_errors = bp->xmt_length_errors;
1827 bp->stats.gen.rx_dropped = bp->rcv_discards;
1828 bp->stats.gen.tx_dropped = bp->xmt_discards;
1829 bp->stats.gen.multicast = bp->rcv_multicast_frames;
1830 bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */
1831
1832 /* Get FDDI SMT MIB objects */
1833
1834 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
1835 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1836 return((struct net_device_stats *) &bp->stats);
1837
1838 /* Fill the bp->stats structure with the SMT MIB object values */
1839
1840 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
1841 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
1842 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
1843 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
1844 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
1845 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
1846 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
1847 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
1848 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
1849 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
1850 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
1851 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
1852 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
1853 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
1854 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
1855 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
1856 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
1857 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
1858 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
1859 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
1860 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
1861 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
1862 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
1863 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
1864 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
1865 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
1866 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
1867 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
1868 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
1869 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
1870 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
1871 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
1872 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
1873 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
1874 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
1875 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
1876 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
1877 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
1878 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
1879 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
1880 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
1881 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
1882 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
1883 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
1884 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
1885 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
1886 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
1887 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
1888 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
1889 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
1890 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
1891 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
1892 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
1893 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
1894 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
1895 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
1896 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
1897 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
1898 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
1899 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
1900 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
1901 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
1902 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
1903 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
1904 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
1905 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
1906 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
1907 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
1908 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
1909 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
1910 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
1911 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
1912 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
1913 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
1914 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
1915 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
1916 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
1917 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
1918 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
1919 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
1920 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
1921 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
1922 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
1923 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
1924 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
1925 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
1926 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
1927 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
1928 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
1929 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
1930 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
1931 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
1932
1933 /* Get FDDI counters */
1934
1935 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
1936 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1937 return((struct net_device_stats *) &bp->stats);
1938
1939 /* Fill the bp->stats structure with the FDDI counter values */
1940
1941 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
1942 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
1943 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
1944 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
1945 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
1946 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
1947 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
1948 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
1949 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
1950 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
1951 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
1952
1953 return((struct net_device_stats *) &bp->stats);
1954 }
1955
1956 \f
1957 /*
1958 * ==============================
1959 * = dfx_ctl_set_multicast_list =
1960 * ==============================
1961 *
1962 * Overview:
1963 * Enable/Disable LLC frame promiscuous mode reception
1964 * on the adapter and/or update multicast address table.
1965 *
1966 * Returns:
1967 * None
1968 *
1969 * Arguments:
1970 * dev - pointer to device information
1971 *
1972 * Functional Description:
1973 * This routine follows a fairly simple algorithm for setting the
1974 * adapter filters and CAM:
1975 *
1976 * if IFF_PROMISC flag is set
1977 * enable LLC individual/group promiscuous mode
1978 * else
1979 * disable LLC individual/group promiscuous mode
1980 * if number of incoming multicast addresses >
1981 * (CAM max size - number of unicast addresses in CAM)
1982 * enable LLC group promiscuous mode
1983 * set driver-maintained multicast address count to zero
1984 * else
1985 * disable LLC group promiscuous mode
1986 * set driver-maintained multicast address count to incoming count
1987 * update adapter CAM
1988 * update adapter filters
1989 *
1990 * Return Codes:
1991 * None
1992 *
1993 * Assumptions:
1994 * Multicast addresses are presented in canonical (LSB) format.
1995 *
1996 * Side Effects:
1997 * On-board adapter CAM and filters are updated.
1998 */
1999
2000 static void dfx_ctl_set_multicast_list(struct net_device *dev)
2001 {
2002 DFX_board_t *bp = dev->priv;
2003 int i; /* used as index in for loop */
2004 struct dev_mc_list *dmi; /* ptr to multicast addr entry */
2005
2006 /* Enable LLC frame promiscuous mode, if necessary */
2007
2008 if (dev->flags & IFF_PROMISC)
2009 bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */
2010
2011 /* Else, update multicast address table */
2012
2013 else
2014 {
2015 bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */
2016 /*
2017 * Check whether incoming multicast address count exceeds table size
2018 *
2019 * Note: The adapters utilize an on-board 64 entry CAM for
2020 * supporting perfect filtering of multicast packets
2021 * and bridge functions when adding unicast addresses.
2022 * There is no hash function available. To support
2023 * additional multicast addresses, the all multicast
2024 * filter (LLC group promiscuous mode) must be enabled.
2025 *
2026 * The firmware reserves two CAM entries for SMT-related
2027 * multicast addresses, which leaves 62 entries available.
2028 * The following code ensures that we're not being asked
2029 * to add more than 62 addresses to the CAM. If we are,
2030 * the driver will enable the all multicast filter.
2031 * Should the number of multicast addresses drop below
2032 * the high water mark, the filter will be disabled and
2033 * perfect filtering will be used.
2034 */
2035
2036 if (dev->mc_count > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2037 {
2038 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
2039 bp->mc_count = 0; /* Don't add mc addrs to CAM */
2040 }
2041 else
2042 {
2043 bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
2044 bp->mc_count = dev->mc_count; /* Add mc addrs to CAM */
2045 }
2046
2047 /* Copy addresses to multicast address table, then update adapter CAM */
2048
2049 dmi = dev->mc_list; /* point to first multicast addr */
2050 for (i=0; i < bp->mc_count; i++)
2051 {
2052 memcpy(&bp->mc_table[i*FDDI_K_ALEN], dmi->dmi_addr, FDDI_K_ALEN);
2053 dmi = dmi->next; /* point to next multicast addr */
2054 }
2055 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2056 {
2057 DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2058 }
2059 else
2060 {
2061 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
2062 }
2063 }
2064
2065 /* Update adapter filters */
2066
2067 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2068 {
2069 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2070 }
2071 else
2072 {
2073 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2074 }
2075 }
2076
2077 \f
2078 /*
2079 * ===========================
2080 * = dfx_ctl_set_mac_address =
2081 * ===========================
2082 *
2083 * Overview:
2084 * Add node address override (unicast address) to adapter
2085 * CAM and update dev_addr field in device table.
2086 *
2087 * Returns:
2088 * None
2089 *
2090 * Arguments:
2091 * dev - pointer to device information
2092 * addr - pointer to sockaddr structure containing unicast address to add
2093 *
2094 * Functional Description:
2095 * The adapter supports node address overrides by adding one or more
2096 * unicast addresses to the adapter CAM. This is similar to adding
2097 * multicast addresses. In this routine we'll update the driver and
2098 * device structures with the new address, then update the adapter CAM
2099 * to ensure that the adapter will copy and strip frames destined and
2100 * sourced by that address.
2101 *
2102 * Return Codes:
2103 * Always returns zero.
2104 *
2105 * Assumptions:
2106 * The address pointed to by addr->sa_data is a valid unicast
2107 * address and is presented in canonical (LSB) format.
2108 *
2109 * Side Effects:
2110 * On-board adapter CAM is updated. On-board adapter filters
2111 * may be updated.
2112 */
2113
2114 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2115 {
2116 DFX_board_t *bp = dev->priv;
2117 struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
2118
2119 /* Copy unicast address to driver-maintained structs and update count */
2120
2121 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */
2122 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */
2123 bp->uc_count = 1;
2124
2125 /*
2126 * Verify we're not exceeding the CAM size by adding unicast address
2127 *
2128 * Note: It's possible that before entering this routine we've
2129 * already filled the CAM with 62 multicast addresses.
2130 * Since we need to place the node address override into
2131 * the CAM, we have to check to see that we're not
2132 * exceeding the CAM size. If we are, we have to enable
2133 * the LLC group (multicast) promiscuous mode filter as
2134 * in dfx_ctl_set_multicast_list.
2135 */
2136
2137 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2138 {
2139 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
2140 bp->mc_count = 0; /* Don't add mc addrs to CAM */
2141
2142 /* Update adapter filters */
2143
2144 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2145 {
2146 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2147 }
2148 else
2149 {
2150 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2151 }
2152 }
2153
2154 /* Update adapter CAM with new unicast address */
2155
2156 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2157 {
2158 DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2159 }
2160 else
2161 {
2162 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2163 }
2164 return(0); /* always return zero */
2165 }
2166
2167 \f
2168 /*
2169 * ======================
2170 * = dfx_ctl_update_cam =
2171 * ======================
2172 *
2173 * Overview:
2174 * Procedure to update adapter CAM (Content Addressable Memory)
2175 * with desired unicast and multicast address entries.
2176 *
2177 * Returns:
2178 * Condition code
2179 *
2180 * Arguments:
2181 * bp - pointer to board information
2182 *
2183 * Functional Description:
2184 * Updates adapter CAM with current contents of board structure
2185 * unicast and multicast address tables. Since there are only 62
2186 * free entries in CAM, this routine ensures that the command
2187 * request buffer is not overrun.
2188 *
2189 * Return Codes:
2190 * DFX_K_SUCCESS - Request succeeded
2191 * DFX_K_FAILURE - Request failed
2192 *
2193 * Assumptions:
2194 * All addresses being added (unicast and multicast) are in canonical
2195 * order.
2196 *
2197 * Side Effects:
2198 * On-board adapter CAM is updated.
2199 */
2200
2201 static int dfx_ctl_update_cam(DFX_board_t *bp)
2202 {
2203 int i; /* used as index */
2204 PI_LAN_ADDR *p_addr; /* pointer to CAM entry */
2205
2206 /*
2207 * Fill in command request information
2208 *
2209 * Note: Even though both the unicast and multicast address
2210 * table entries are stored as contiguous 6 byte entries,
2211 * the firmware address filter set command expects each
2212 * entry to be two longwords (8 bytes total). We must be
2213 * careful to only copy the six bytes of each unicast and
2214 * multicast table entry into each command entry. This
2215 * is also why we must first clear the entire command
2216 * request buffer.
2217 */
2218
2219 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */
2220 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2221 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2222
2223 /* Now add unicast addresses to command request buffer, if any */
2224
2225 for (i=0; i < (int)bp->uc_count; i++)
2226 {
2227 if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2228 {
2229 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2230 p_addr++; /* point to next command entry */
2231 }
2232 }
2233
2234 /* Now add multicast addresses to command request buffer, if any */
2235
2236 for (i=0; i < (int)bp->mc_count; i++)
2237 {
2238 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2239 {
2240 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2241 p_addr++; /* point to next command entry */
2242 }
2243 }
2244
2245 /* Issue command to update adapter CAM, then return */
2246
2247 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2248 return(DFX_K_FAILURE);
2249 return(DFX_K_SUCCESS);
2250 }
2251
2252 \f
2253 /*
2254 * ==========================
2255 * = dfx_ctl_update_filters =
2256 * ==========================
2257 *
2258 * Overview:
2259 * Procedure to update adapter filters with desired
2260 * filter settings.
2261 *
2262 * Returns:
2263 * Condition code
2264 *
2265 * Arguments:
2266 * bp - pointer to board information
2267 *
2268 * Functional Description:
2269 * Enables or disables filter using current filter settings.
2270 *
2271 * Return Codes:
2272 * DFX_K_SUCCESS - Request succeeded.
2273 * DFX_K_FAILURE - Request failed.
2274 *
2275 * Assumptions:
2276 * We must always pass up packets destined to the broadcast
2277 * address (FF-FF-FF-FF-FF-FF), so we'll always keep the
2278 * broadcast filter enabled.
2279 *
2280 * Side Effects:
2281 * On-board adapter filters are updated.
2282 */
2283
2284 static int dfx_ctl_update_filters(DFX_board_t *bp)
2285 {
2286 int i = 0; /* used as index */
2287
2288 /* Fill in command request information */
2289
2290 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2291
2292 /* Initialize Broadcast filter - * ALWAYS ENABLED * */
2293
2294 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
2295 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
2296
2297 /* Initialize LLC Individual/Group Promiscuous filter */
2298
2299 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
2300 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
2301
2302 /* Initialize LLC Group Promiscuous filter */
2303
2304 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
2305 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
2306
2307 /* Terminate the item code list */
2308
2309 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
2310
2311 /* Issue command to update adapter filters, then return */
2312
2313 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2314 return(DFX_K_FAILURE);
2315 return(DFX_K_SUCCESS);
2316 }
2317
2318 \f
2319 /*
2320 * ======================
2321 * = dfx_hw_dma_cmd_req =
2322 * ======================
2323 *
2324 * Overview:
2325 * Sends PDQ DMA command to adapter firmware
2326 *
2327 * Returns:
2328 * Condition code
2329 *
2330 * Arguments:
2331 * bp - pointer to board information
2332 *
2333 * Functional Description:
2334 * The command request and response buffers are posted to the adapter in the manner
2335 * described in the PDQ Port Specification:
2336 *
2337 * 1. Command Response Buffer is posted to adapter.
2338 * 2. Command Request Buffer is posted to adapter.
2339 * 3. Command Request consumer index is polled until it indicates that request
2340 * buffer has been DMA'd to adapter.
2341 * 4. Command Response consumer index is polled until it indicates that response
2342 * buffer has been DMA'd from adapter.
2343 *
2344 * This ordering ensures that a response buffer is already available for the firmware
2345 * to use once it's done processing the request buffer.
2346 *
2347 * Return Codes:
2348 * DFX_K_SUCCESS - DMA command succeeded
2349 * DFX_K_OUTSTATE - Adapter is NOT in proper state
2350 * DFX_K_HW_TIMEOUT - DMA command timed out
2351 *
2352 * Assumptions:
2353 * Command request buffer has already been filled with desired DMA command.
2354 *
2355 * Side Effects:
2356 * None
2357 */
2358
2359 static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2360 {
2361 int status; /* adapter status */
2362 int timeout_cnt; /* used in for loops */
2363
2364 /* Make sure the adapter is in a state that we can issue the DMA command in */
2365
2366 status = dfx_hw_adap_state_rd(bp);
2367 if ((status == PI_STATE_K_RESET) ||
2368 (status == PI_STATE_K_HALTED) ||
2369 (status == PI_STATE_K_DMA_UNAVAIL) ||
2370 (status == PI_STATE_K_UPGRADE))
2371 return(DFX_K_OUTSTATE);
2372
2373 /* Put response buffer on the command response queue */
2374
2375 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2376 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2377 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2378
2379 /* Bump (and wrap) the producer index and write out to register */
2380
2381 bp->cmd_rsp_reg.index.prod += 1;
2382 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2383 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2384
2385 /* Put request buffer on the command request queue */
2386
2387 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2388 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2389 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2390
2391 /* Bump (and wrap) the producer index and write out to register */
2392
2393 bp->cmd_req_reg.index.prod += 1;
2394 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2395 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2396
2397 /*
2398 * Here we wait for the command request consumer index to be equal
2399 * to the producer, indicating that the adapter has DMAed the request.
2400 */
2401
2402 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2403 {
2404 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2405 break;
2406 udelay(100); /* wait for 100 microseconds */
2407 }
2408 if (timeout_cnt == 0)
2409 return(DFX_K_HW_TIMEOUT);
2410
2411 /* Bump (and wrap) the completion index and write out to register */
2412
2413 bp->cmd_req_reg.index.comp += 1;
2414 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2415 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2416
2417 /*
2418 * Here we wait for the command response consumer index to be equal
2419 * to the producer, indicating that the adapter has DMAed the response.
2420 */
2421
2422 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2423 {
2424 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2425 break;
2426 udelay(100); /* wait for 100 microseconds */
2427 }
2428 if (timeout_cnt == 0)
2429 return(DFX_K_HW_TIMEOUT);
2430
2431 /* Bump (and wrap) the completion index and write out to register */
2432
2433 bp->cmd_rsp_reg.index.comp += 1;
2434 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2435 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2436 return(DFX_K_SUCCESS);
2437 }
2438
2439 \f
2440 /*
2441 * ========================
2442 * = dfx_hw_port_ctrl_req =
2443 * ========================
2444 *
2445 * Overview:
2446 * Sends PDQ port control command to adapter firmware
2447 *
2448 * Returns:
2449 * Host data register value in host_data if ptr is not NULL
2450 *
2451 * Arguments:
2452 * bp - pointer to board information
2453 * command - port control command
2454 * data_a - port data A register value
2455 * data_b - port data B register value
2456 * host_data - ptr to host data register value
2457 *
2458 * Functional Description:
2459 * Send generic port control command to adapter by writing
2460 * to various PDQ port registers, then polling for completion.
2461 *
2462 * Return Codes:
2463 * DFX_K_SUCCESS - port control command succeeded
2464 * DFX_K_HW_TIMEOUT - port control command timed out
2465 *
2466 * Assumptions:
2467 * None
2468 *
2469 * Side Effects:
2470 * None
2471 */
2472
2473 static int dfx_hw_port_ctrl_req(
2474 DFX_board_t *bp,
2475 PI_UINT32 command,
2476 PI_UINT32 data_a,
2477 PI_UINT32 data_b,
2478 PI_UINT32 *host_data
2479 )
2480
2481 {
2482 PI_UINT32 port_cmd; /* Port Control command register value */
2483 int timeout_cnt; /* used in for loops */
2484
2485 /* Set Command Error bit in command longword */
2486
2487 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2488
2489 /* Issue port command to the adapter */
2490
2491 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2492 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2493 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2494
2495 /* Now wait for command to complete */
2496
2497 if (command == PI_PCTRL_M_BLAST_FLASH)
2498 timeout_cnt = 600000; /* set command timeout count to 60 seconds */
2499 else
2500 timeout_cnt = 20000; /* set command timeout count to 2 seconds */
2501
2502 for (; timeout_cnt > 0; timeout_cnt--)
2503 {
2504 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2505 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2506 break;
2507 udelay(100); /* wait for 100 microseconds */
2508 }
2509 if (timeout_cnt == 0)
2510 return(DFX_K_HW_TIMEOUT);
2511
2512 /*
2513 * If the address of host_data is non-zero, assume caller has supplied a
2514 * non NULL pointer, and return the contents of the HOST_DATA register in
2515 * it.
2516 */
2517
2518 if (host_data != NULL)
2519 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2520 return(DFX_K_SUCCESS);
2521 }
2522
2523 \f
2524 /*
2525 * =====================
2526 * = dfx_hw_adap_reset =
2527 * =====================
2528 *
2529 * Overview:
2530 * Resets adapter
2531 *
2532 * Returns:
2533 * None
2534 *
2535 * Arguments:
2536 * bp - pointer to board information
2537 * type - type of reset to perform
2538 *
2539 * Functional Description:
2540 * Issue soft reset to adapter by writing to PDQ Port Reset
2541 * register. Use incoming reset type to tell adapter what
2542 * kind of reset operation to perform.
2543 *
2544 * Return Codes:
2545 * None
2546 *
2547 * Assumptions:
2548 * This routine merely issues a soft reset to the adapter.
2549 * It is expected that after this routine returns, the caller
2550 * will appropriately poll the Port Status register for the
2551 * adapter to enter the proper state.
2552 *
2553 * Side Effects:
2554 * Internal adapter registers are cleared.
2555 */
2556
2557 static void dfx_hw_adap_reset(
2558 DFX_board_t *bp,
2559 PI_UINT32 type
2560 )
2561
2562 {
2563 /* Set Reset type and assert reset */
2564
2565 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */
2566 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2567
2568 /* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */
2569
2570 udelay(20);
2571
2572 /* Deassert reset */
2573
2574 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2575 }
2576
2577 \f
2578 /*
2579 * ========================
2580 * = dfx_hw_adap_state_rd =
2581 * ========================
2582 *
2583 * Overview:
2584 * Returns current adapter state
2585 *
2586 * Returns:
2587 * Adapter state per PDQ Port Specification
2588 *
2589 * Arguments:
2590 * bp - pointer to board information
2591 *
2592 * Functional Description:
2593 * Reads PDQ Port Status register and returns adapter state.
2594 *
2595 * Return Codes:
2596 * None
2597 *
2598 * Assumptions:
2599 * None
2600 *
2601 * Side Effects:
2602 * None
2603 */
2604
2605 static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2606 {
2607 PI_UINT32 port_status; /* Port Status register value */
2608
2609 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2610 return((port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE);
2611 }
2612
2613 \f
2614 /*
2615 * =====================
2616 * = dfx_hw_dma_uninit =
2617 * =====================
2618 *
2619 * Overview:
2620 * Brings adapter to DMA_UNAVAILABLE state
2621 *
2622 * Returns:
2623 * Condition code
2624 *
2625 * Arguments:
2626 * bp - pointer to board information
2627 * type - type of reset to perform
2628 *
2629 * Functional Description:
2630 * Bring adapter to DMA_UNAVAILABLE state by performing the following:
2631 * 1. Set reset type bit in Port Data A Register then reset adapter.
2632 * 2. Check that adapter is in DMA_UNAVAILABLE state.
2633 *
2634 * Return Codes:
2635 * DFX_K_SUCCESS - adapter is in DMA_UNAVAILABLE state
2636 * DFX_K_HW_TIMEOUT - adapter did not reset properly
2637 *
2638 * Assumptions:
2639 * None
2640 *
2641 * Side Effects:
2642 * Internal adapter registers are cleared.
2643 */
2644
2645 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2646 {
2647 int timeout_cnt; /* used in for loops */
2648
2649 /* Set reset type bit and reset adapter */
2650
2651 dfx_hw_adap_reset(bp, type);
2652
2653 /* Now wait for adapter to enter DMA_UNAVAILABLE state */
2654
2655 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2656 {
2657 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2658 break;
2659 udelay(100); /* wait for 100 microseconds */
2660 }
2661 if (timeout_cnt == 0)
2662 return(DFX_K_HW_TIMEOUT);
2663 return(DFX_K_SUCCESS);
2664 }
2665 \f
2666 /*
2667 * Align an sk_buff to a boundary power of 2
2668 *
2669 */
2670
2671 static void my_skb_align(struct sk_buff *skb, int n)
2672 {
2673 unsigned long x = (unsigned long)skb->data;
2674 unsigned long v;
2675
2676 v = ALIGN(x, n); /* Where we want to be */
2677
2678 skb_reserve(skb, v - x);
2679 }
2680
2681 \f
2682 /*
2683 * ================
2684 * = dfx_rcv_init =
2685 * ================
2686 *
2687 * Overview:
2688 * Produces buffers to adapter LLC Host receive descriptor block
2689 *
2690 * Returns:
2691 * None
2692 *
2693 * Arguments:
2694 * bp - pointer to board information
2695 * get_buffers - non-zero if buffers to be allocated
2696 *
2697 * Functional Description:
2698 * This routine can be called during dfx_adap_init() or during an adapter
2699 * reset. It initializes the descriptor block and produces all allocated
2700 * LLC Host queue receive buffers.
2701 *
2702 * Return Codes:
2703 * Return 0 on success or -ENOMEM if buffer allocation failed (when using
2704 * dynamic buffer allocation). If the buffer allocation failed, the
2705 * already allocated buffers will not be released and the caller should do
2706 * this.
2707 *
2708 * Assumptions:
2709 * The PDQ has been reset and the adapter and driver maintained Type 2
2710 * register indices are cleared.
2711 *
2712 * Side Effects:
2713 * Receive buffers are posted to the adapter LLC queue and the adapter
2714 * is notified.
2715 */
2716
2717 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2718 {
2719 int i, j; /* used in for loop */
2720
2721 /*
2722 * Since each receive buffer is a single fragment of same length, initialize
2723 * first longword in each receive descriptor for entire LLC Host descriptor
2724 * block. Also initialize second longword in each receive descriptor with
2725 * physical address of receive buffer. We'll always allocate receive
2726 * buffers in powers of 2 so that we can easily fill the 256 entry descriptor
2727 * block and produce new receive buffers by simply updating the receive
2728 * producer index.
2729 *
2730 * Assumptions:
2731 * To support all shipping versions of PDQ, the receive buffer size
2732 * must be mod 128 in length and the physical address must be 128 byte
2733 * aligned. In other words, bits 0-6 of the length and address must
2734 * be zero for the following descriptor field entries to be correct on
2735 * all PDQ-based boards. We guaranteed both requirements during
2736 * driver initialization when we allocated memory for the receive buffers.
2737 */
2738
2739 if (get_buffers) {
2740 #ifdef DYNAMIC_BUFFERS
2741 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
2742 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2743 {
2744 struct sk_buff *newskb = __dev_alloc_skb(NEW_SKB_SIZE, GFP_NOIO);
2745 if (!newskb)
2746 return -ENOMEM;
2747 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2748 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2749 /*
2750 * align to 128 bytes for compatibility with
2751 * the old EISA boards.
2752 */
2753
2754 my_skb_align(newskb, 128);
2755 bp->descr_block_virt->rcv_data[i + j].long_1 =
2756 (u32)pci_map_single(bp->pci_dev, newskb->data,
2757 NEW_SKB_SIZE,
2758 PCI_DMA_FROMDEVICE);
2759 /*
2760 * p_rcv_buff_va is only used inside the
2761 * kernel so we put the skb pointer here.
2762 */
2763 bp->p_rcv_buff_va[i+j] = (char *) newskb;
2764 }
2765 #else
2766 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
2767 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2768 {
2769 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2770 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2771 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
2772 bp->p_rcv_buff_va[i+j] = (char *) (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
2773 }
2774 #endif
2775 }
2776
2777 /* Update receive producer and Type 2 register */
2778
2779 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
2780 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
2781 return 0;
2782 }
2783
2784 \f
2785 /*
2786 * =========================
2787 * = dfx_rcv_queue_process =
2788 * =========================
2789 *
2790 * Overview:
2791 * Process received LLC frames.
2792 *
2793 * Returns:
2794 * None
2795 *
2796 * Arguments:
2797 * bp - pointer to board information
2798 *
2799 * Functional Description:
2800 * Received LLC frames are processed until there are no more consumed frames.
2801 * Once all frames are processed, the receive buffers are returned to the
2802 * adapter. Note that this algorithm fixes the length of time that can be spent
2803 * in this routine, because there are a fixed number of receive buffers to
2804 * process and buffers are not produced until this routine exits and returns
2805 * to the ISR.
2806 *
2807 * Return Codes:
2808 * None
2809 *
2810 * Assumptions:
2811 * None
2812 *
2813 * Side Effects:
2814 * None
2815 */
2816
2817 static void dfx_rcv_queue_process(
2818 DFX_board_t *bp
2819 )
2820
2821 {
2822 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
2823 char *p_buff; /* ptr to start of packet receive buffer (FMC descriptor) */
2824 u32 descr, pkt_len; /* FMC descriptor field and packet length */
2825 struct sk_buff *skb; /* pointer to a sk_buff to hold incoming packet data */
2826
2827 /* Service all consumed LLC receive frames */
2828
2829 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
2830 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
2831 {
2832 /* Process any errors */
2833
2834 int entry;
2835
2836 entry = bp->rcv_xmt_reg.index.rcv_comp;
2837 #ifdef DYNAMIC_BUFFERS
2838 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
2839 #else
2840 p_buff = (char *) bp->p_rcv_buff_va[entry];
2841 #endif
2842 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
2843
2844 if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
2845 {
2846 if (descr & PI_FMC_DESCR_M_RCC_CRC)
2847 bp->rcv_crc_errors++;
2848 else
2849 bp->rcv_frame_status_errors++;
2850 }
2851 else
2852 {
2853 int rx_in_place = 0;
2854
2855 /* The frame was received without errors - verify packet length */
2856
2857 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
2858 pkt_len -= 4; /* subtract 4 byte CRC */
2859 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
2860 bp->rcv_length_errors++;
2861 else{
2862 #ifdef DYNAMIC_BUFFERS
2863 if (pkt_len > SKBUFF_RX_COPYBREAK) {
2864 struct sk_buff *newskb;
2865
2866 newskb = dev_alloc_skb(NEW_SKB_SIZE);
2867 if (newskb){
2868 rx_in_place = 1;
2869
2870 my_skb_align(newskb, 128);
2871 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
2872 pci_unmap_single(bp->pci_dev,
2873 bp->descr_block_virt->rcv_data[entry].long_1,
2874 NEW_SKB_SIZE,
2875 PCI_DMA_FROMDEVICE);
2876 skb_reserve(skb, RCV_BUFF_K_PADDING);
2877 bp->p_rcv_buff_va[entry] = (char *)newskb;
2878 bp->descr_block_virt->rcv_data[entry].long_1 =
2879 (u32)pci_map_single(bp->pci_dev,
2880 newskb->data,
2881 NEW_SKB_SIZE,
2882 PCI_DMA_FROMDEVICE);
2883 } else
2884 skb = NULL;
2885 } else
2886 #endif
2887 skb = dev_alloc_skb(pkt_len+3); /* alloc new buffer to pass up, add room for PRH */
2888 if (skb == NULL)
2889 {
2890 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
2891 bp->rcv_discards++;
2892 break;
2893 }
2894 else {
2895 #ifndef DYNAMIC_BUFFERS
2896 if (! rx_in_place)
2897 #endif
2898 {
2899 /* Receive buffer allocated, pass receive packet up */
2900
2901 memcpy(skb->data, p_buff + RCV_BUFF_K_PADDING, pkt_len+3);
2902 }
2903
2904 skb_reserve(skb,3); /* adjust data field so that it points to FC byte */
2905 skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */
2906 skb->dev = bp->dev; /* pass up device pointer */
2907
2908 skb->protocol = fddi_type_trans(skb, bp->dev);
2909 bp->rcv_total_bytes += skb->len;
2910 netif_rx(skb);
2911
2912 /* Update the rcv counters */
2913 bp->dev->last_rx = jiffies;
2914 bp->rcv_total_frames++;
2915 if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
2916 bp->rcv_multicast_frames++;
2917 }
2918 }
2919 }
2920
2921 /*
2922 * Advance the producer (for recycling) and advance the completion
2923 * (for servicing received frames). Note that it is okay to
2924 * advance the producer without checking that it passes the
2925 * completion index because they are both advanced at the same
2926 * rate.
2927 */
2928
2929 bp->rcv_xmt_reg.index.rcv_prod += 1;
2930 bp->rcv_xmt_reg.index.rcv_comp += 1;
2931 }
2932 }
2933
2934 \f
2935 /*
2936 * =====================
2937 * = dfx_xmt_queue_pkt =
2938 * =====================
2939 *
2940 * Overview:
2941 * Queues packets for transmission
2942 *
2943 * Returns:
2944 * Condition code
2945 *
2946 * Arguments:
2947 * skb - pointer to sk_buff to queue for transmission
2948 * dev - pointer to device information
2949 *
2950 * Functional Description:
2951 * Here we assume that an incoming skb transmit request
2952 * is contained in a single physically contiguous buffer
2953 * in which the virtual address of the start of packet
2954 * (skb->data) can be converted to a physical address
2955 * by using pci_map_single().
2956 *
2957 * Since the adapter architecture requires a three byte
2958 * packet request header to prepend the start of packet,
2959 * we'll write the three byte field immediately prior to
2960 * the FC byte. This assumption is valid because we've
2961 * ensured that dev->hard_header_len includes three pad
2962 * bytes. By posting a single fragment to the adapter,
2963 * we'll reduce the number of descriptor fetches and
2964 * bus traffic needed to send the request.
2965 *
2966 * Also, we can't free the skb until after it's been DMA'd
2967 * out by the adapter, so we'll queue it in the driver and
2968 * return it in dfx_xmt_done.
2969 *
2970 * Return Codes:
2971 * 0 - driver queued packet, link is unavailable, or skbuff was bad
2972 * 1 - caller should requeue the sk_buff for later transmission
2973 *
2974 * Assumptions:
2975 * First and foremost, we assume the incoming skb pointer
2976 * is NOT NULL and is pointing to a valid sk_buff structure.
2977 *
2978 * The outgoing packet is complete, starting with the
2979 * frame control byte including the last byte of data,
2980 * but NOT including the 4 byte CRC. We'll let the
2981 * adapter hardware generate and append the CRC.
2982 *
2983 * The entire packet is stored in one physically
2984 * contiguous buffer which is not cached and whose
2985 * 32-bit physical address can be determined.
2986 *
2987 * It's vital that this routine is NOT reentered for the
2988 * same board and that the OS is not in another section of
2989 * code (eg. dfx_int_common) for the same board on a
2990 * different thread.
2991 *
2992 * Side Effects:
2993 * None
2994 */
2995
2996 static int dfx_xmt_queue_pkt(
2997 struct sk_buff *skb,
2998 struct net_device *dev
2999 )
3000
3001 {
3002 DFX_board_t *bp = dev->priv;
3003 u8 prod; /* local transmit producer index */
3004 PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */
3005 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3006 unsigned long flags;
3007
3008 netif_stop_queue(dev);
3009
3010 /*
3011 * Verify that incoming transmit request is OK
3012 *
3013 * Note: The packet size check is consistent with other
3014 * Linux device drivers, although the correct packet
3015 * size should be verified before calling the
3016 * transmit routine.
3017 */
3018
3019 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3020 {
3021 printk("%s: Invalid packet length - %u bytes\n",
3022 dev->name, skb->len);
3023 bp->xmt_length_errors++; /* bump error counter */
3024 netif_wake_queue(dev);
3025 dev_kfree_skb(skb);
3026 return(0); /* return "success" */
3027 }
3028 /*
3029 * See if adapter link is available, if not, free buffer
3030 *
3031 * Note: If the link isn't available, free buffer and return 0
3032 * rather than tell the upper layer to requeue the packet.
3033 * The methodology here is that by the time the link
3034 * becomes available, the packet to be sent will be
3035 * fairly stale. By simply dropping the packet, the
3036 * higher layer protocols will eventually time out
3037 * waiting for response packets which it won't receive.
3038 */
3039
3040 if (bp->link_available == PI_K_FALSE)
3041 {
3042 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */
3043 bp->link_available = PI_K_TRUE; /* if so, set flag and continue */
3044 else
3045 {
3046 bp->xmt_discards++; /* bump error counter */
3047 dev_kfree_skb(skb); /* free sk_buff now */
3048 netif_wake_queue(dev);
3049 return(0); /* return "success" */
3050 }
3051 }
3052
3053 spin_lock_irqsave(&bp->lock, flags);
3054
3055 /* Get the current producer and the next free xmt data descriptor */
3056
3057 prod = bp->rcv_xmt_reg.index.xmt_prod;
3058 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3059
3060 /*
3061 * Get pointer to auxiliary queue entry to contain information
3062 * for this packet.
3063 *
3064 * Note: The current xmt producer index will become the
3065 * current xmt completion index when we complete this
3066 * packet later on. So, we'll get the pointer to the
3067 * next auxiliary queue entry now before we bump the
3068 * producer index.
3069 */
3070
3071 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */
3072
3073 /* Write the three PRH bytes immediately before the FC byte */
3074
3075 skb_push(skb,3);
3076 skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */
3077 skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */
3078 skb->data[2] = DFX_PRH2_BYTE; /* specification */
3079
3080 /*
3081 * Write the descriptor with buffer info and bump producer
3082 *
3083 * Note: Since we need to start DMA from the packet request
3084 * header, we'll add 3 bytes to the DMA buffer length,
3085 * and we'll determine the physical address of the
3086 * buffer from the PRH, not skb->data.
3087 *
3088 * Assumptions:
3089 * 1. Packet starts with the frame control (FC) byte
3090 * at skb->data.
3091 * 2. The 4-byte CRC is not appended to the buffer or
3092 * included in the length.
3093 * 3. Packet length (skb->len) is from FC to end of
3094 * data, inclusive.
3095 * 4. The packet length does not exceed the maximum
3096 * FDDI LLC frame length of 4491 bytes.
3097 * 5. The entire packet is contained in a physically
3098 * contiguous, non-cached, locked memory space
3099 * comprised of a single buffer pointed to by
3100 * skb->data.
3101 * 6. The physical address of the start of packet
3102 * can be determined from the virtual address
3103 * by using pci_map_single() and is only 32-bits
3104 * wide.
3105 */
3106
3107 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3108 p_xmt_descr->long_1 = (u32)pci_map_single(bp->pci_dev, skb->data,
3109 skb->len, PCI_DMA_TODEVICE);
3110
3111 /*
3112 * Verify that descriptor is actually available
3113 *
3114 * Note: If descriptor isn't available, return 1 which tells
3115 * the upper layer to requeue the packet for later
3116 * transmission.
3117 *
3118 * We need to ensure that the producer never reaches the
3119 * completion, except to indicate that the queue is empty.
3120 */
3121
3122 if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3123 {
3124 skb_pull(skb,3);
3125 spin_unlock_irqrestore(&bp->lock, flags);
3126 return(1); /* requeue packet for later */
3127 }
3128
3129 /*
3130 * Save info for this packet for xmt done indication routine
3131 *
3132 * Normally, we'd save the producer index in the p_xmt_drv_descr
3133 * structure so that we'd have it handy when we complete this
3134 * packet later (in dfx_xmt_done). However, since the current
3135 * transmit architecture guarantees a single fragment for the
3136 * entire packet, we can simply bump the completion index by
3137 * one (1) for each completed packet.
3138 *
3139 * Note: If this assumption changes and we're presented with
3140 * an inconsistent number of transmit fragments for packet
3141 * data, we'll need to modify this code to save the current
3142 * transmit producer index.
3143 */
3144
3145 p_xmt_drv_descr->p_skb = skb;
3146
3147 /* Update Type 2 register */
3148
3149 bp->rcv_xmt_reg.index.xmt_prod = prod;
3150 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3151 spin_unlock_irqrestore(&bp->lock, flags);
3152 netif_wake_queue(dev);
3153 return(0); /* packet queued to adapter */
3154 }
3155
3156 \f
3157 /*
3158 * ================
3159 * = dfx_xmt_done =
3160 * ================
3161 *
3162 * Overview:
3163 * Processes all frames that have been transmitted.
3164 *
3165 * Returns:
3166 * None
3167 *
3168 * Arguments:
3169 * bp - pointer to board information
3170 *
3171 * Functional Description:
3172 * For all consumed transmit descriptors that have not
3173 * yet been completed, we'll free the skb we were holding
3174 * onto using dev_kfree_skb and bump the appropriate
3175 * counters.
3176 *
3177 * Return Codes:
3178 * None
3179 *
3180 * Assumptions:
3181 * The Type 2 register is not updated in this routine. It is
3182 * assumed that it will be updated in the ISR when dfx_xmt_done
3183 * returns.
3184 *
3185 * Side Effects:
3186 * None
3187 */
3188
3189 static int dfx_xmt_done(DFX_board_t *bp)
3190 {
3191 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3192 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
3193 u8 comp; /* local transmit completion index */
3194 int freed = 0; /* buffers freed */
3195
3196 /* Service all consumed transmit frames */
3197
3198 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3199 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3200 {
3201 /* Get pointer to the transmit driver descriptor block information */
3202
3203 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3204
3205 /* Increment transmit counters */
3206
3207 bp->xmt_total_frames++;
3208 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3209
3210 /* Return skb to operating system */
3211 comp = bp->rcv_xmt_reg.index.xmt_comp;
3212 pci_unmap_single(bp->pci_dev,
3213 bp->descr_block_virt->xmt_data[comp].long_1,
3214 p_xmt_drv_descr->p_skb->len,
3215 PCI_DMA_TODEVICE);
3216 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
3217
3218 /*
3219 * Move to start of next packet by updating completion index
3220 *
3221 * Here we assume that a transmit packet request is always
3222 * serviced by posting one fragment. We can therefore
3223 * simplify the completion code by incrementing the
3224 * completion index by one. This code will need to be
3225 * modified if this assumption changes. See comments
3226 * in dfx_xmt_queue_pkt for more details.
3227 */
3228
3229 bp->rcv_xmt_reg.index.xmt_comp += 1;
3230 freed++;
3231 }
3232 return freed;
3233 }
3234
3235 \f
3236 /*
3237 * =================
3238 * = dfx_rcv_flush =
3239 * =================
3240 *
3241 * Overview:
3242 * Remove all skb's in the receive ring.
3243 *
3244 * Returns:
3245 * None
3246 *
3247 * Arguments:
3248 * bp - pointer to board information
3249 *
3250 * Functional Description:
3251 * Free's all the dynamically allocated skb's that are
3252 * currently attached to the device receive ring. This
3253 * function is typically only used when the device is
3254 * initialized or reinitialized.
3255 *
3256 * Return Codes:
3257 * None
3258 *
3259 * Side Effects:
3260 * None
3261 */
3262 #ifdef DYNAMIC_BUFFERS
3263 static void dfx_rcv_flush( DFX_board_t *bp )
3264 {
3265 int i, j;
3266
3267 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3268 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3269 {
3270 struct sk_buff *skb;
3271 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3272 if (skb)
3273 dev_kfree_skb(skb);
3274 bp->p_rcv_buff_va[i+j] = NULL;
3275 }
3276
3277 }
3278 #else
3279 static inline void dfx_rcv_flush( DFX_board_t *bp )
3280 {
3281 }
3282 #endif /* DYNAMIC_BUFFERS */
3283
3284 /*
3285 * =================
3286 * = dfx_xmt_flush =
3287 * =================
3288 *
3289 * Overview:
3290 * Processes all frames whether they've been transmitted
3291 * or not.
3292 *
3293 * Returns:
3294 * None
3295 *
3296 * Arguments:
3297 * bp - pointer to board information
3298 *
3299 * Functional Description:
3300 * For all produced transmit descriptors that have not
3301 * yet been completed, we'll free the skb we were holding
3302 * onto using dev_kfree_skb and bump the appropriate
3303 * counters. Of course, it's possible that some of
3304 * these transmit requests actually did go out, but we
3305 * won't make that distinction here. Finally, we'll
3306 * update the consumer index to match the producer.
3307 *
3308 * Return Codes:
3309 * None
3310 *
3311 * Assumptions:
3312 * This routine does NOT update the Type 2 register. It
3313 * is assumed that this routine is being called during a
3314 * transmit flush interrupt, or a shutdown or close routine.
3315 *
3316 * Side Effects:
3317 * None
3318 */
3319
3320 static void dfx_xmt_flush( DFX_board_t *bp )
3321 {
3322 u32 prod_cons; /* rcv/xmt consumer block longword */
3323 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3324 u8 comp; /* local transmit completion index */
3325
3326 /* Flush all outstanding transmit frames */
3327
3328 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3329 {
3330 /* Get pointer to the transmit driver descriptor block information */
3331
3332 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3333
3334 /* Return skb to operating system */
3335 comp = bp->rcv_xmt_reg.index.xmt_comp;
3336 pci_unmap_single(bp->pci_dev,
3337 bp->descr_block_virt->xmt_data[comp].long_1,
3338 p_xmt_drv_descr->p_skb->len,
3339 PCI_DMA_TODEVICE);
3340 dev_kfree_skb(p_xmt_drv_descr->p_skb);
3341
3342 /* Increment transmit error counter */
3343
3344 bp->xmt_discards++;
3345
3346 /*
3347 * Move to start of next packet by updating completion index
3348 *
3349 * Here we assume that a transmit packet request is always
3350 * serviced by posting one fragment. We can therefore
3351 * simplify the completion code by incrementing the
3352 * completion index by one. This code will need to be
3353 * modified if this assumption changes. See comments
3354 * in dfx_xmt_queue_pkt for more details.
3355 */
3356
3357 bp->rcv_xmt_reg.index.xmt_comp += 1;
3358 }
3359
3360 /* Update the transmit consumer index in the consumer block */
3361
3362 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3363 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3364 bp->cons_block_virt->xmt_rcv_data = prod_cons;
3365 }
3366
3367 static void __devexit dfx_remove_one_pci_or_eisa(struct pci_dev *pdev, struct net_device *dev)
3368 {
3369 DFX_board_t *bp = dev->priv;
3370 int alloc_size; /* total buffer size used */
3371
3372 unregister_netdev(dev);
3373 release_region(dev->base_addr, pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN );
3374
3375 alloc_size = sizeof(PI_DESCR_BLOCK) +
3376 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3377 #ifndef DYNAMIC_BUFFERS
3378 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3379 #endif
3380 sizeof(PI_CONSUMER_BLOCK) +
3381 (PI_ALIGN_K_DESC_BLK - 1);
3382 if (bp->kmalloced)
3383 pci_free_consistent(pdev, alloc_size, bp->kmalloced,
3384 bp->kmalloced_dma);
3385 free_netdev(dev);
3386 }
3387
3388 static void __devexit dfx_remove_one (struct pci_dev *pdev)
3389 {
3390 struct net_device *dev = pci_get_drvdata(pdev);
3391
3392 dfx_remove_one_pci_or_eisa(pdev, dev);
3393 pci_set_drvdata(pdev, NULL);
3394 }
3395
3396 static struct pci_device_id dfx_pci_tbl[] = {
3397 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI, PCI_ANY_ID, PCI_ANY_ID, },
3398 { 0, }
3399 };
3400 MODULE_DEVICE_TABLE(pci, dfx_pci_tbl);
3401
3402 static struct pci_driver dfx_driver = {
3403 .name = "defxx",
3404 .probe = dfx_init_one,
3405 .remove = __devexit_p(dfx_remove_one),
3406 .id_table = dfx_pci_tbl,
3407 };
3408
3409 static int dfx_have_pci;
3410 static int dfx_have_eisa;
3411
3412
3413 static void __exit dfx_eisa_cleanup(void)
3414 {
3415 struct net_device *dev = root_dfx_eisa_dev;
3416
3417 while (dev)
3418 {
3419 struct net_device *tmp;
3420 DFX_board_t *bp;
3421
3422 bp = (DFX_board_t*)dev->priv;
3423 tmp = bp->next;
3424 dfx_remove_one_pci_or_eisa(NULL, dev);
3425 dev = tmp;
3426 }
3427 }
3428
3429 static int __init dfx_init(void)
3430 {
3431 int rc_pci, rc_eisa;
3432
3433 rc_pci = pci_module_init(&dfx_driver);
3434 if (rc_pci >= 0) dfx_have_pci = 1;
3435
3436 rc_eisa = dfx_eisa_init();
3437 if (rc_eisa >= 0) dfx_have_eisa = 1;
3438
3439 return ((rc_eisa < 0) ? 0 : rc_eisa) + ((rc_pci < 0) ? 0 : rc_pci);
3440 }
3441
3442 static void __exit dfx_cleanup(void)
3443 {
3444 if (dfx_have_pci)
3445 pci_unregister_driver(&dfx_driver);
3446 if (dfx_have_eisa)
3447 dfx_eisa_cleanup();
3448
3449 }
3450
3451 module_init(dfx_init);
3452 module_exit(dfx_cleanup);
3453 MODULE_AUTHOR("Lawrence V. Stefani");
3454 MODULE_DESCRIPTION("DEC FDDIcontroller EISA/PCI (DEFEA/DEFPA) driver "
3455 DRV_VERSION " " DRV_RELDATE);
3456 MODULE_LICENSE("GPL");
3457
3458 \f
3459 /*
3460 * Local variables:
3461 * kernel-compile-command: "gcc -D__KERNEL__ -I/root/linux/include -Wall -Wstrict-prototypes -O2 -pipe -fomit-frame-pointer -fno-strength-reduce -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2 -c defxx.c"
3462 * End:
3463 */