]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/fddi/skfp/skfddi.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / fddi / skfp / skfddi.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * File Name:
4 * skfddi.c
5 *
6 * Copyright Information:
7 * Copyright SysKonnect 1998,1999.
8 *
9 * The information in this file is provided "AS IS" without warranty.
10 *
11 * Abstract:
12 * A Linux device driver supporting the SysKonnect FDDI PCI controller
13 * familie.
14 *
15 * Maintainers:
16 * CG Christoph Goos (cgoos@syskonnect.de)
17 *
18 * Contributors:
19 * DM David S. Miller
20 *
21 * Address all question to:
22 * linux@syskonnect.de
23 *
24 * The technical manual for the adapters is available from SysKonnect's
25 * web pages: www.syskonnect.com
26 * Goto "Support" and search Knowledge Base for "manual".
27 *
28 * Driver Architecture:
29 * The driver architecture is based on the DEC FDDI driver by
30 * Lawrence V. Stefani and several ethernet drivers.
31 * I also used an existing Windows NT miniport driver.
32 * All hardware dependent functions are handled by the SysKonnect
33 * Hardware Module.
34 * The only headerfiles that are directly related to this source
35 * are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
36 * The others belong to the SysKonnect FDDI Hardware Module and
37 * should better not be changed.
38 *
39 * Modification History:
40 * Date Name Description
41 * 02-Mar-98 CG Created.
42 *
43 * 10-Mar-99 CG Support for 2.2.x added.
44 * 25-Mar-99 CG Corrected IRQ routing for SMP (APIC)
45 * 26-Oct-99 CG Fixed compilation error on 2.2.13
46 * 12-Nov-99 CG Source code release
47 * 22-Nov-99 CG Included in kernel source.
48 * 07-May-00 DM 64 bit fixes, new dma interface
49 * 31-Jul-03 DB Audit copy_*_user in skfp_ioctl
50 * Daniele Bellucci <bellucda@tiscali.it>
51 * 03-Dec-03 SH Convert to PCI device model
52 *
53 * Compilation options (-Dxxx):
54 * DRIVERDEBUG print lots of messages to log file
55 * DUMPPACKETS print received/transmitted packets to logfile
56 *
57 * Tested cpu architectures:
58 * - i386
59 * - sparc64
60 */
61
62 /* Version information string - should be updated prior to */
63 /* each new release!!! */
64 #define VERSION "2.07"
65
66 static const char * const boot_msg =
67 "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
68 " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
69
70 /* Include files */
71
72 #include <linux/capability.h>
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/interrupt.h>
78 #include <linux/pci.h>
79 #include <linux/netdevice.h>
80 #include <linux/fddidevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/bitops.h>
83 #include <linux/gfp.h>
84
85 #include <asm/byteorder.h>
86 #include <asm/io.h>
87 #include <linux/uaccess.h>
88
89 #include "h/types.h"
90 #undef ADDR // undo Linux definition
91 #include "h/skfbi.h"
92 #include "h/fddi.h"
93 #include "h/smc.h"
94 #include "h/smtstate.h"
95
96
97 // Define module-wide (static) routines
98 static int skfp_driver_init(struct net_device *dev);
99 static int skfp_open(struct net_device *dev);
100 static int skfp_close(struct net_device *dev);
101 static irqreturn_t skfp_interrupt(int irq, void *dev_id);
102 static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
103 static void skfp_ctl_set_multicast_list(struct net_device *dev);
104 static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
105 static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
106 static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
107 static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
108 struct net_device *dev);
109 static void send_queued_packets(struct s_smc *smc);
110 static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
111 static void ResetAdapter(struct s_smc *smc);
112
113
114 // Functions needed by the hardware module
115 void *mac_drv_get_space(struct s_smc *smc, u_int size);
116 void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
117 unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
118 unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
119 void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
120 int flag);
121 void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
122 void llc_restart_tx(struct s_smc *smc);
123 void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
124 int frag_count, int len);
125 void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
126 int frag_count);
127 void mac_drv_fill_rxd(struct s_smc *smc);
128 void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
129 int frag_count);
130 int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
131 int la_len);
132 void dump_data(unsigned char *Data, int length);
133
134 // External functions from the hardware module
135 extern u_int mac_drv_check_space(void);
136 extern int mac_drv_init(struct s_smc *smc);
137 extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
138 int len, int frame_status);
139 extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
140 int frame_len, int frame_status);
141 extern void fddi_isr(struct s_smc *smc);
142 extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
143 int len, int frame_status);
144 extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
145 extern void mac_drv_clear_rx_queue(struct s_smc *smc);
146 extern void enable_tx_irq(struct s_smc *smc, u_short queue);
147
148 static const struct pci_device_id skfddi_pci_tbl[] = {
149 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
150 { } /* Terminating entry */
151 };
152 MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
153 MODULE_LICENSE("GPL");
154 MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
155
156 // Define module-wide (static) variables
157
158 static int num_boards; /* total number of adapters configured */
159
160 static const struct net_device_ops skfp_netdev_ops = {
161 .ndo_open = skfp_open,
162 .ndo_stop = skfp_close,
163 .ndo_start_xmit = skfp_send_pkt,
164 .ndo_get_stats = skfp_ctl_get_stats,
165 .ndo_set_rx_mode = skfp_ctl_set_multicast_list,
166 .ndo_set_mac_address = skfp_ctl_set_mac_address,
167 .ndo_do_ioctl = skfp_ioctl,
168 };
169
170 /*
171 * =================
172 * = skfp_init_one =
173 * =================
174 *
175 * Overview:
176 * Probes for supported FDDI PCI controllers
177 *
178 * Returns:
179 * Condition code
180 *
181 * Arguments:
182 * pdev - pointer to PCI device information
183 *
184 * Functional Description:
185 * This is now called by PCI driver registration process
186 * for each board found.
187 *
188 * Return Codes:
189 * 0 - This device (fddi0, fddi1, etc) configured successfully
190 * -ENODEV - No devices present, or no SysKonnect FDDI PCI device
191 * present for this device name
192 *
193 *
194 * Side Effects:
195 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
196 * initialized and the board resources are read and stored in
197 * the device structure.
198 */
199 static int skfp_init_one(struct pci_dev *pdev,
200 const struct pci_device_id *ent)
201 {
202 struct net_device *dev;
203 struct s_smc *smc; /* board pointer */
204 void __iomem *mem;
205 int err;
206
207 pr_debug("entering skfp_init_one\n");
208
209 if (num_boards == 0)
210 printk("%s\n", boot_msg);
211
212 err = pci_enable_device(pdev);
213 if (err)
214 return err;
215
216 err = pci_request_regions(pdev, "skfddi");
217 if (err)
218 goto err_out1;
219
220 pci_set_master(pdev);
221
222 #ifdef MEM_MAPPED_IO
223 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
224 printk(KERN_ERR "skfp: region is not an MMIO resource\n");
225 err = -EIO;
226 goto err_out2;
227 }
228
229 mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
230 #else
231 if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
232 printk(KERN_ERR "skfp: region is not PIO resource\n");
233 err = -EIO;
234 goto err_out2;
235 }
236
237 mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
238 #endif
239 if (!mem) {
240 printk(KERN_ERR "skfp: Unable to map register, "
241 "FDDI adapter will be disabled.\n");
242 err = -EIO;
243 goto err_out2;
244 }
245
246 dev = alloc_fddidev(sizeof(struct s_smc));
247 if (!dev) {
248 printk(KERN_ERR "skfp: Unable to allocate fddi device, "
249 "FDDI adapter will be disabled.\n");
250 err = -ENOMEM;
251 goto err_out3;
252 }
253
254 dev->irq = pdev->irq;
255 dev->netdev_ops = &skfp_netdev_ops;
256
257 SET_NETDEV_DEV(dev, &pdev->dev);
258
259 /* Initialize board structure with bus-specific info */
260 smc = netdev_priv(dev);
261 smc->os.dev = dev;
262 smc->os.bus_type = SK_BUS_TYPE_PCI;
263 smc->os.pdev = *pdev;
264 smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
265 smc->os.MaxFrameSize = MAX_FRAME_SIZE;
266 smc->os.dev = dev;
267 smc->hw.slot = -1;
268 smc->hw.iop = mem;
269 smc->os.ResetRequested = FALSE;
270 skb_queue_head_init(&smc->os.SendSkbQueue);
271
272 dev->base_addr = (unsigned long)mem;
273
274 err = skfp_driver_init(dev);
275 if (err)
276 goto err_out4;
277
278 err = register_netdev(dev);
279 if (err)
280 goto err_out5;
281
282 ++num_boards;
283 pci_set_drvdata(pdev, dev);
284
285 if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
286 (pdev->subsystem_device & 0xff00) == 0x5800)
287 printk("%s: SysKonnect FDDI PCI adapter"
288 " found (SK-%04X)\n", dev->name,
289 pdev->subsystem_device);
290 else
291 printk("%s: FDDI PCI adapter found\n", dev->name);
292
293 return 0;
294 err_out5:
295 if (smc->os.SharedMemAddr)
296 dma_free_coherent(&pdev->dev, smc->os.SharedMemSize,
297 smc->os.SharedMemAddr,
298 smc->os.SharedMemDMA);
299 dma_free_coherent(&pdev->dev, MAX_FRAME_SIZE,
300 smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
301 err_out4:
302 free_netdev(dev);
303 err_out3:
304 #ifdef MEM_MAPPED_IO
305 iounmap(mem);
306 #else
307 ioport_unmap(mem);
308 #endif
309 err_out2:
310 pci_release_regions(pdev);
311 err_out1:
312 pci_disable_device(pdev);
313 return err;
314 }
315
316 /*
317 * Called for each adapter board from pci_unregister_driver
318 */
319 static void skfp_remove_one(struct pci_dev *pdev)
320 {
321 struct net_device *p = pci_get_drvdata(pdev);
322 struct s_smc *lp = netdev_priv(p);
323
324 unregister_netdev(p);
325
326 if (lp->os.SharedMemAddr) {
327 dma_free_coherent(&pdev->dev,
328 lp->os.SharedMemSize,
329 lp->os.SharedMemAddr,
330 lp->os.SharedMemDMA);
331 lp->os.SharedMemAddr = NULL;
332 }
333 if (lp->os.LocalRxBuffer) {
334 dma_free_coherent(&pdev->dev,
335 MAX_FRAME_SIZE,
336 lp->os.LocalRxBuffer,
337 lp->os.LocalRxBufferDMA);
338 lp->os.LocalRxBuffer = NULL;
339 }
340 #ifdef MEM_MAPPED_IO
341 iounmap(lp->hw.iop);
342 #else
343 ioport_unmap(lp->hw.iop);
344 #endif
345 pci_release_regions(pdev);
346 free_netdev(p);
347
348 pci_disable_device(pdev);
349 }
350
351 /*
352 * ====================
353 * = skfp_driver_init =
354 * ====================
355 *
356 * Overview:
357 * Initializes remaining adapter board structure information
358 * and makes sure adapter is in a safe state prior to skfp_open().
359 *
360 * Returns:
361 * Condition code
362 *
363 * Arguments:
364 * dev - pointer to device information
365 *
366 * Functional Description:
367 * This function allocates additional resources such as the host memory
368 * blocks needed by the adapter.
369 * The adapter is also reset. The OS must call skfp_open() to open
370 * the adapter and bring it on-line.
371 *
372 * Return Codes:
373 * 0 - initialization succeeded
374 * -1 - initialization failed
375 */
376 static int skfp_driver_init(struct net_device *dev)
377 {
378 struct s_smc *smc = netdev_priv(dev);
379 skfddi_priv *bp = &smc->os;
380 int err = -EIO;
381
382 pr_debug("entering skfp_driver_init\n");
383
384 // set the io address in private structures
385 bp->base_addr = dev->base_addr;
386
387 // Get the interrupt level from the PCI Configuration Table
388 smc->hw.irq = dev->irq;
389
390 spin_lock_init(&bp->DriverLock);
391
392 // Allocate invalid frame
393 bp->LocalRxBuffer = dma_alloc_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
394 &bp->LocalRxBufferDMA,
395 GFP_ATOMIC);
396 if (!bp->LocalRxBuffer) {
397 printk("could not allocate mem for ");
398 printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
399 goto fail;
400 }
401
402 // Determine the required size of the 'shared' memory area.
403 bp->SharedMemSize = mac_drv_check_space();
404 pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
405 if (bp->SharedMemSize > 0) {
406 bp->SharedMemSize += 16; // for descriptor alignment
407
408 bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
409 bp->SharedMemSize,
410 &bp->SharedMemDMA,
411 GFP_ATOMIC);
412 if (!bp->SharedMemAddr) {
413 printk("could not allocate mem for ");
414 printk("hardware module: %ld byte\n",
415 bp->SharedMemSize);
416 goto fail;
417 }
418
419 } else {
420 bp->SharedMemAddr = NULL;
421 }
422
423 bp->SharedMemHeap = 0;
424
425 card_stop(smc); // Reset adapter.
426
427 pr_debug("mac_drv_init()..\n");
428 if (mac_drv_init(smc) != 0) {
429 pr_debug("mac_drv_init() failed\n");
430 goto fail;
431 }
432 read_address(smc, NULL);
433 pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
434 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
435
436 smt_reset_defaults(smc, 0);
437
438 return 0;
439
440 fail:
441 if (bp->SharedMemAddr) {
442 dma_free_coherent(&bp->pdev.dev,
443 bp->SharedMemSize,
444 bp->SharedMemAddr,
445 bp->SharedMemDMA);
446 bp->SharedMemAddr = NULL;
447 }
448 if (bp->LocalRxBuffer) {
449 dma_free_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
450 bp->LocalRxBuffer, bp->LocalRxBufferDMA);
451 bp->LocalRxBuffer = NULL;
452 }
453 return err;
454 } // skfp_driver_init
455
456
457 /*
458 * =============
459 * = skfp_open =
460 * =============
461 *
462 * Overview:
463 * Opens the adapter
464 *
465 * Returns:
466 * Condition code
467 *
468 * Arguments:
469 * dev - pointer to device information
470 *
471 * Functional Description:
472 * This function brings the adapter to an operational state.
473 *
474 * Return Codes:
475 * 0 - Adapter was successfully opened
476 * -EAGAIN - Could not register IRQ
477 */
478 static int skfp_open(struct net_device *dev)
479 {
480 struct s_smc *smc = netdev_priv(dev);
481 int err;
482
483 pr_debug("entering skfp_open\n");
484 /* Register IRQ - support shared interrupts by passing device ptr */
485 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
486 dev->name, dev);
487 if (err)
488 return err;
489
490 /*
491 * Set current address to factory MAC address
492 *
493 * Note: We've already done this step in skfp_driver_init.
494 * However, it's possible that a user has set a node
495 * address override, then closed and reopened the
496 * adapter. Unless we reset the device address field
497 * now, we'll continue to use the existing modified
498 * address.
499 */
500 read_address(smc, NULL);
501 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
502
503 init_smt(smc, NULL);
504 smt_online(smc, 1);
505 STI_FBI();
506
507 /* Clear local multicast address tables */
508 mac_clear_multicast(smc);
509
510 /* Disable promiscuous filter settings */
511 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
512
513 netif_start_queue(dev);
514 return 0;
515 } // skfp_open
516
517
518 /*
519 * ==============
520 * = skfp_close =
521 * ==============
522 *
523 * Overview:
524 * Closes the device/module.
525 *
526 * Returns:
527 * Condition code
528 *
529 * Arguments:
530 * dev - pointer to device information
531 *
532 * Functional Description:
533 * This routine closes the adapter and brings it to a safe state.
534 * The interrupt service routine is deregistered with the OS.
535 * The adapter can be opened again with another call to skfp_open().
536 *
537 * Return Codes:
538 * Always return 0.
539 *
540 * Assumptions:
541 * No further requests for this adapter are made after this routine is
542 * called. skfp_open() can be called to reset and reinitialize the
543 * adapter.
544 */
545 static int skfp_close(struct net_device *dev)
546 {
547 struct s_smc *smc = netdev_priv(dev);
548 skfddi_priv *bp = &smc->os;
549
550 CLI_FBI();
551 smt_reset_defaults(smc, 1);
552 card_stop(smc);
553 mac_drv_clear_tx_queue(smc);
554 mac_drv_clear_rx_queue(smc);
555
556 netif_stop_queue(dev);
557 /* Deregister (free) IRQ */
558 free_irq(dev->irq, dev);
559
560 skb_queue_purge(&bp->SendSkbQueue);
561 bp->QueueSkb = MAX_TX_QUEUE_LEN;
562
563 return 0;
564 } // skfp_close
565
566
567 /*
568 * ==================
569 * = skfp_interrupt =
570 * ==================
571 *
572 * Overview:
573 * Interrupt processing routine
574 *
575 * Returns:
576 * None
577 *
578 * Arguments:
579 * irq - interrupt vector
580 * dev_id - pointer to device information
581 *
582 * Functional Description:
583 * This routine calls the interrupt processing routine for this adapter. It
584 * disables and reenables adapter interrupts, as appropriate. We can support
585 * shared interrupts since the incoming dev_id pointer provides our device
586 * structure context. All the real work is done in the hardware module.
587 *
588 * Return Codes:
589 * None
590 *
591 * Assumptions:
592 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
593 * on Intel-based systems) is done by the operating system outside this
594 * routine.
595 *
596 * System interrupts are enabled through this call.
597 *
598 * Side Effects:
599 * Interrupts are disabled, then reenabled at the adapter.
600 */
601
602 static irqreturn_t skfp_interrupt(int irq, void *dev_id)
603 {
604 struct net_device *dev = dev_id;
605 struct s_smc *smc; /* private board structure pointer */
606 skfddi_priv *bp;
607
608 smc = netdev_priv(dev);
609 bp = &smc->os;
610
611 // IRQs enabled or disabled ?
612 if (inpd(ADDR(B0_IMSK)) == 0) {
613 // IRQs are disabled: must be shared interrupt
614 return IRQ_NONE;
615 }
616 // Note: At this point, IRQs are enabled.
617 if ((inpd(ISR_A) & smc->hw.is_imask) == 0) { // IRQ?
618 // Adapter did not issue an IRQ: must be shared interrupt
619 return IRQ_NONE;
620 }
621 CLI_FBI(); // Disable IRQs from our adapter.
622 spin_lock(&bp->DriverLock);
623
624 // Call interrupt handler in hardware module (HWM).
625 fddi_isr(smc);
626
627 if (smc->os.ResetRequested) {
628 ResetAdapter(smc);
629 smc->os.ResetRequested = FALSE;
630 }
631 spin_unlock(&bp->DriverLock);
632 STI_FBI(); // Enable IRQs from our adapter.
633
634 return IRQ_HANDLED;
635 } // skfp_interrupt
636
637
638 /*
639 * ======================
640 * = skfp_ctl_get_stats =
641 * ======================
642 *
643 * Overview:
644 * Get statistics for FDDI adapter
645 *
646 * Returns:
647 * Pointer to FDDI statistics structure
648 *
649 * Arguments:
650 * dev - pointer to device information
651 *
652 * Functional Description:
653 * Gets current MIB objects from adapter, then
654 * returns FDDI statistics structure as defined
655 * in if_fddi.h.
656 *
657 * Note: Since the FDDI statistics structure is
658 * still new and the device structure doesn't
659 * have an FDDI-specific get statistics handler,
660 * we'll return the FDDI statistics structure as
661 * a pointer to an Ethernet statistics structure.
662 * That way, at least the first part of the statistics
663 * structure can be decoded properly.
664 * We'll have to pay attention to this routine as the
665 * device structure becomes more mature and LAN media
666 * independent.
667 *
668 */
669 static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
670 {
671 struct s_smc *bp = netdev_priv(dev);
672
673 /* Fill the bp->stats structure with driver-maintained counters */
674
675 bp->os.MacStat.port_bs_flag[0] = 0x1234;
676 bp->os.MacStat.port_bs_flag[1] = 0x5678;
677 // goos: need to fill out fddi statistic
678 #if 0
679 /* Get FDDI SMT MIB objects */
680
681 /* Fill the bp->stats structure with the SMT MIB object values */
682
683 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
684 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
685 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
686 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
687 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
688 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
689 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
690 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
691 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
692 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
693 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
694 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
695 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
696 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
697 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
698 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
699 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
700 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
701 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
702 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
703 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
704 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
705 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
706 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
707 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
708 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
709 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
710 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
711 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
712 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
713 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
714 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
715 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
716 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
717 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
718 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
719 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
720 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
721 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
722 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
723 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
724 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
725 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
726 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
727 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
728 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
729 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
730 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
731 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
732 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
733 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
734 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
735 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
736 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
737 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
738 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
739 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
740 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
741 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
742 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
743 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
744 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
745 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
746 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
747 memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
748 memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
749 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
750 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
751 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
752 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
753 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
754 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
755 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
756 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
757 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
758 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
759 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
760 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
761 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
762 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
763 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
764 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
765 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
766 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
767 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
768 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
769 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
770 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
771 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
772 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
773 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
774 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
775
776
777 /* Fill the bp->stats structure with the FDDI counter values */
778
779 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
780 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
781 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
782 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
783 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
784 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
785 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
786 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
787 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
788 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
789 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
790
791 #endif
792 return (struct net_device_stats *)&bp->os.MacStat;
793 } // ctl_get_stat
794
795
796 /*
797 * ==============================
798 * = skfp_ctl_set_multicast_list =
799 * ==============================
800 *
801 * Overview:
802 * Enable/Disable LLC frame promiscuous mode reception
803 * on the adapter and/or update multicast address table.
804 *
805 * Returns:
806 * None
807 *
808 * Arguments:
809 * dev - pointer to device information
810 *
811 * Functional Description:
812 * This function acquires the driver lock and only calls
813 * skfp_ctl_set_multicast_list_wo_lock then.
814 * This routine follows a fairly simple algorithm for setting the
815 * adapter filters and CAM:
816 *
817 * if IFF_PROMISC flag is set
818 * enable promiscuous mode
819 * else
820 * disable promiscuous mode
821 * if number of multicast addresses <= max. multicast number
822 * add mc addresses to adapter table
823 * else
824 * enable promiscuous mode
825 * update adapter filters
826 *
827 * Assumptions:
828 * Multicast addresses are presented in canonical (LSB) format.
829 *
830 * Side Effects:
831 * On-board adapter filters are updated.
832 */
833 static void skfp_ctl_set_multicast_list(struct net_device *dev)
834 {
835 struct s_smc *smc = netdev_priv(dev);
836 skfddi_priv *bp = &smc->os;
837 unsigned long Flags;
838
839 spin_lock_irqsave(&bp->DriverLock, Flags);
840 skfp_ctl_set_multicast_list_wo_lock(dev);
841 spin_unlock_irqrestore(&bp->DriverLock, Flags);
842 } // skfp_ctl_set_multicast_list
843
844
845
846 static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
847 {
848 struct s_smc *smc = netdev_priv(dev);
849 struct netdev_hw_addr *ha;
850
851 /* Enable promiscuous mode, if necessary */
852 if (dev->flags & IFF_PROMISC) {
853 mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
854 pr_debug("PROMISCUOUS MODE ENABLED\n");
855 }
856 /* Else, update multicast address table */
857 else {
858 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
859 pr_debug("PROMISCUOUS MODE DISABLED\n");
860
861 // Reset all MC addresses
862 mac_clear_multicast(smc);
863 mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
864
865 if (dev->flags & IFF_ALLMULTI) {
866 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
867 pr_debug("ENABLE ALL MC ADDRESSES\n");
868 } else if (!netdev_mc_empty(dev)) {
869 if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
870 /* use exact filtering */
871
872 // point to first multicast addr
873 netdev_for_each_mc_addr(ha, dev) {
874 mac_add_multicast(smc,
875 (struct fddi_addr *)ha->addr,
876 1);
877
878 pr_debug("ENABLE MC ADDRESS: %pMF\n",
879 ha->addr);
880 }
881
882 } else { // more MC addresses than HW supports
883
884 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
885 pr_debug("ENABLE ALL MC ADDRESSES\n");
886 }
887 } else { // no MC addresses
888
889 pr_debug("DISABLE ALL MC ADDRESSES\n");
890 }
891
892 /* Update adapter filters */
893 mac_update_multicast(smc);
894 }
895 } // skfp_ctl_set_multicast_list_wo_lock
896
897
898 /*
899 * ===========================
900 * = skfp_ctl_set_mac_address =
901 * ===========================
902 *
903 * Overview:
904 * set new mac address on adapter and update dev_addr field in device table.
905 *
906 * Returns:
907 * None
908 *
909 * Arguments:
910 * dev - pointer to device information
911 * addr - pointer to sockaddr structure containing unicast address to set
912 *
913 * Assumptions:
914 * The address pointed to by addr->sa_data is a valid unicast
915 * address and is presented in canonical (LSB) format.
916 */
917 static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
918 {
919 struct s_smc *smc = netdev_priv(dev);
920 struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
921 skfddi_priv *bp = &smc->os;
922 unsigned long Flags;
923
924
925 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
926 spin_lock_irqsave(&bp->DriverLock, Flags);
927 ResetAdapter(smc);
928 spin_unlock_irqrestore(&bp->DriverLock, Flags);
929
930 return 0; /* always return zero */
931 } // skfp_ctl_set_mac_address
932
933
934 /*
935 * ==============
936 * = skfp_ioctl =
937 * ==============
938 *
939 * Overview:
940 *
941 * Perform IOCTL call functions here. Some are privileged operations and the
942 * effective uid is checked in those cases.
943 *
944 * Returns:
945 * status value
946 * 0 - success
947 * other - failure
948 *
949 * Arguments:
950 * dev - pointer to device information
951 * rq - pointer to ioctl request structure
952 * cmd - ?
953 *
954 */
955
956
957 static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
958 {
959 struct s_smc *smc = netdev_priv(dev);
960 skfddi_priv *lp = &smc->os;
961 struct s_skfp_ioctl ioc;
962 int status = 0;
963
964 if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
965 return -EFAULT;
966
967 switch (ioc.cmd) {
968 case SKFP_GET_STATS: /* Get the driver statistics */
969 ioc.len = sizeof(lp->MacStat);
970 status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
971 ? -EFAULT : 0;
972 break;
973 case SKFP_CLR_STATS: /* Zero out the driver statistics */
974 if (!capable(CAP_NET_ADMIN)) {
975 status = -EPERM;
976 } else {
977 memset(&lp->MacStat, 0, sizeof(lp->MacStat));
978 }
979 break;
980 default:
981 printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd);
982 status = -EOPNOTSUPP;
983
984 } // switch
985
986 return status;
987 } // skfp_ioctl
988
989
990 /*
991 * =====================
992 * = skfp_send_pkt =
993 * =====================
994 *
995 * Overview:
996 * Queues a packet for transmission and try to transmit it.
997 *
998 * Returns:
999 * Condition code
1000 *
1001 * Arguments:
1002 * skb - pointer to sk_buff to queue for transmission
1003 * dev - pointer to device information
1004 *
1005 * Functional Description:
1006 * Here we assume that an incoming skb transmit request
1007 * is contained in a single physically contiguous buffer
1008 * in which the virtual address of the start of packet
1009 * (skb->data) can be converted to a physical address
1010 * by using pci_map_single().
1011 *
1012 * We have an internal queue for packets we can not send
1013 * immediately. Packets in this queue can be given to the
1014 * adapter if transmit buffers are freed.
1015 *
1016 * We can't free the skb until after it's been DMA'd
1017 * out by the adapter, so we'll keep it in the driver and
1018 * return it in mac_drv_tx_complete.
1019 *
1020 * Return Codes:
1021 * 0 - driver has queued and/or sent packet
1022 * 1 - caller should requeue the sk_buff for later transmission
1023 *
1024 * Assumptions:
1025 * The entire packet is stored in one physically
1026 * contiguous buffer which is not cached and whose
1027 * 32-bit physical address can be determined.
1028 *
1029 * It's vital that this routine is NOT reentered for the
1030 * same board and that the OS is not in another section of
1031 * code (eg. skfp_interrupt) for the same board on a
1032 * different thread.
1033 *
1034 * Side Effects:
1035 * None
1036 */
1037 static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
1038 struct net_device *dev)
1039 {
1040 struct s_smc *smc = netdev_priv(dev);
1041 skfddi_priv *bp = &smc->os;
1042
1043 pr_debug("skfp_send_pkt\n");
1044
1045 /*
1046 * Verify that incoming transmit request is OK
1047 *
1048 * Note: The packet size check is consistent with other
1049 * Linux device drivers, although the correct packet
1050 * size should be verified before calling the
1051 * transmit routine.
1052 */
1053
1054 if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
1055 bp->MacStat.gen.tx_errors++; /* bump error counter */
1056 // dequeue packets from xmt queue and send them
1057 netif_start_queue(dev);
1058 dev_kfree_skb(skb);
1059 return NETDEV_TX_OK; /* return "success" */
1060 }
1061 if (bp->QueueSkb == 0) { // return with tbusy set: queue full
1062
1063 netif_stop_queue(dev);
1064 return NETDEV_TX_BUSY;
1065 }
1066 bp->QueueSkb--;
1067 skb_queue_tail(&bp->SendSkbQueue, skb);
1068 send_queued_packets(netdev_priv(dev));
1069 if (bp->QueueSkb == 0) {
1070 netif_stop_queue(dev);
1071 }
1072 return NETDEV_TX_OK;
1073
1074 } // skfp_send_pkt
1075
1076
1077 /*
1078 * =======================
1079 * = send_queued_packets =
1080 * =======================
1081 *
1082 * Overview:
1083 * Send packets from the driver queue as long as there are some and
1084 * transmit resources are available.
1085 *
1086 * Returns:
1087 * None
1088 *
1089 * Arguments:
1090 * smc - pointer to smc (adapter) structure
1091 *
1092 * Functional Description:
1093 * Take a packet from queue if there is any. If not, then we are done.
1094 * Check if there are resources to send the packet. If not, requeue it
1095 * and exit.
1096 * Set packet descriptor flags and give packet to adapter.
1097 * Check if any send resources can be freed (we do not use the
1098 * transmit complete interrupt).
1099 */
1100 static void send_queued_packets(struct s_smc *smc)
1101 {
1102 skfddi_priv *bp = &smc->os;
1103 struct sk_buff *skb;
1104 unsigned char fc;
1105 int queue;
1106 struct s_smt_fp_txd *txd; // Current TxD.
1107 dma_addr_t dma_address;
1108 unsigned long Flags;
1109
1110 int frame_status; // HWM tx frame status.
1111
1112 pr_debug("send queued packets\n");
1113 for (;;) {
1114 // send first buffer from queue
1115 skb = skb_dequeue(&bp->SendSkbQueue);
1116
1117 if (!skb) {
1118 pr_debug("queue empty\n");
1119 return;
1120 } // queue empty !
1121
1122 spin_lock_irqsave(&bp->DriverLock, Flags);
1123 fc = skb->data[0];
1124 queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
1125 #ifdef ESS
1126 // Check if the frame may/must be sent as a synchronous frame.
1127
1128 if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1129 // It's an LLC frame.
1130 if (!smc->ess.sync_bw_available)
1131 fc &= ~FC_SYNC_BIT; // No bandwidth available.
1132
1133 else { // Bandwidth is available.
1134
1135 if (smc->mib.fddiESSSynchTxMode) {
1136 // Send as sync. frame.
1137 fc |= FC_SYNC_BIT;
1138 }
1139 }
1140 }
1141 #endif // ESS
1142 frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
1143
1144 if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
1145 // Unable to send the frame.
1146
1147 if ((frame_status & RING_DOWN) != 0) {
1148 // Ring is down.
1149 pr_debug("Tx attempt while ring down.\n");
1150 } else if ((frame_status & OUT_OF_TXD) != 0) {
1151 pr_debug("%s: out of TXDs.\n", bp->dev->name);
1152 } else {
1153 pr_debug("%s: out of transmit resources",
1154 bp->dev->name);
1155 }
1156
1157 // Note: We will retry the operation as soon as
1158 // transmit resources become available.
1159 skb_queue_head(&bp->SendSkbQueue, skb);
1160 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1161 return; // Packet has been queued.
1162
1163 } // if (unable to send frame)
1164
1165 bp->QueueSkb++; // one packet less in local queue
1166
1167 // source address in packet ?
1168 CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
1169
1170 txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
1171
1172 dma_address = pci_map_single(&bp->pdev, skb->data,
1173 skb->len, PCI_DMA_TODEVICE);
1174 if (frame_status & LAN_TX) {
1175 txd->txd_os.skb = skb; // save skb
1176 txd->txd_os.dma_addr = dma_address; // save dma mapping
1177 }
1178 hwm_tx_frag(smc, skb->data, dma_address, skb->len,
1179 frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
1180
1181 if (!(frame_status & LAN_TX)) { // local only frame
1182 pci_unmap_single(&bp->pdev, dma_address,
1183 skb->len, PCI_DMA_TODEVICE);
1184 dev_kfree_skb_irq(skb);
1185 }
1186 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1187 } // for
1188
1189 return; // never reached
1190
1191 } // send_queued_packets
1192
1193
1194 /************************
1195 *
1196 * CheckSourceAddress
1197 *
1198 * Verify if the source address is set. Insert it if necessary.
1199 *
1200 ************************/
1201 static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1202 {
1203 unsigned char SRBit;
1204
1205 if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0) // source routing bit
1206
1207 return;
1208 if ((unsigned short) frame[1 + 10] != 0)
1209 return;
1210 SRBit = frame[1 + 6] & 0x01;
1211 memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
1212 frame[8] |= SRBit;
1213 } // CheckSourceAddress
1214
1215
1216 /************************
1217 *
1218 * ResetAdapter
1219 *
1220 * Reset the adapter and bring it back to operational mode.
1221 * Args
1222 * smc - A pointer to the SMT context struct.
1223 * Out
1224 * Nothing.
1225 *
1226 ************************/
1227 static void ResetAdapter(struct s_smc *smc)
1228 {
1229
1230 pr_debug("[fddi: ResetAdapter]\n");
1231
1232 // Stop the adapter.
1233
1234 card_stop(smc); // Stop all activity.
1235
1236 // Clear the transmit and receive descriptor queues.
1237 mac_drv_clear_tx_queue(smc);
1238 mac_drv_clear_rx_queue(smc);
1239
1240 // Restart the adapter.
1241
1242 smt_reset_defaults(smc, 1); // Initialize the SMT module.
1243
1244 init_smt(smc, (smc->os.dev)->dev_addr); // Initialize the hardware.
1245
1246 smt_online(smc, 1); // Insert into the ring again.
1247 STI_FBI();
1248
1249 // Restore original receive mode (multicasts, promiscuous, etc.).
1250 skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
1251 } // ResetAdapter
1252
1253
1254 //--------------- functions called by hardware module ----------------
1255
1256 /************************
1257 *
1258 * llc_restart_tx
1259 *
1260 * The hardware driver calls this routine when the transmit complete
1261 * interrupt bits (end of frame) for the synchronous or asynchronous
1262 * queue is set.
1263 *
1264 * NOTE The hardware driver calls this function also if no packets are queued.
1265 * The routine must be able to handle this case.
1266 * Args
1267 * smc - A pointer to the SMT context struct.
1268 * Out
1269 * Nothing.
1270 *
1271 ************************/
1272 void llc_restart_tx(struct s_smc *smc)
1273 {
1274 skfddi_priv *bp = &smc->os;
1275
1276 pr_debug("[llc_restart_tx]\n");
1277
1278 // Try to send queued packets
1279 spin_unlock(&bp->DriverLock);
1280 send_queued_packets(smc);
1281 spin_lock(&bp->DriverLock);
1282 netif_start_queue(bp->dev);// system may send again if it was blocked
1283
1284 } // llc_restart_tx
1285
1286
1287 /************************
1288 *
1289 * mac_drv_get_space
1290 *
1291 * The hardware module calls this function to allocate the memory
1292 * for the SMT MBufs if the define MB_OUTSIDE_SMC is specified.
1293 * Args
1294 * smc - A pointer to the SMT context struct.
1295 *
1296 * size - Size of memory in bytes to allocate.
1297 * Out
1298 * != 0 A pointer to the virtual address of the allocated memory.
1299 * == 0 Allocation error.
1300 *
1301 ************************/
1302 void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1303 {
1304 void *virt;
1305
1306 pr_debug("mac_drv_get_space (%d bytes), ", size);
1307 virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
1308
1309 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1310 printk("Unexpected SMT memory size requested: %d\n", size);
1311 return NULL;
1312 }
1313 smc->os.SharedMemHeap += size; // Move heap pointer.
1314
1315 pr_debug("mac_drv_get_space end\n");
1316 pr_debug("virt addr: %lx\n", (ulong) virt);
1317 pr_debug("bus addr: %lx\n", (ulong)
1318 (smc->os.SharedMemDMA +
1319 ((char *) virt - (char *)smc->os.SharedMemAddr)));
1320 return virt;
1321 } // mac_drv_get_space
1322
1323
1324 /************************
1325 *
1326 * mac_drv_get_desc_mem
1327 *
1328 * This function is called by the hardware dependent module.
1329 * It allocates the memory for the RxD and TxD descriptors.
1330 *
1331 * This memory must be non-cached, non-movable and non-swappable.
1332 * This memory should start at a physical page boundary.
1333 * Args
1334 * smc - A pointer to the SMT context struct.
1335 *
1336 * size - Size of memory in bytes to allocate.
1337 * Out
1338 * != 0 A pointer to the virtual address of the allocated memory.
1339 * == 0 Allocation error.
1340 *
1341 ************************/
1342 void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1343 {
1344
1345 char *virt;
1346
1347 pr_debug("mac_drv_get_desc_mem\n");
1348
1349 // Descriptor memory must be aligned on 16-byte boundary.
1350
1351 virt = mac_drv_get_space(smc, size);
1352
1353 size = (u_int) (16 - (((unsigned long) virt) & 15UL));
1354 size = size % 16;
1355
1356 pr_debug("Allocate %u bytes alignment gap ", size);
1357 pr_debug("for descriptor memory.\n");
1358
1359 if (!mac_drv_get_space(smc, size)) {
1360 printk("fddi: Unable to align descriptor memory.\n");
1361 return NULL;
1362 }
1363 return virt + size;
1364 } // mac_drv_get_desc_mem
1365
1366
1367 /************************
1368 *
1369 * mac_drv_virt2phys
1370 *
1371 * Get the physical address of a given virtual address.
1372 * Args
1373 * smc - A pointer to the SMT context struct.
1374 *
1375 * virt - A (virtual) pointer into our 'shared' memory area.
1376 * Out
1377 * Physical address of the given virtual address.
1378 *
1379 ************************/
1380 unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1381 {
1382 return smc->os.SharedMemDMA +
1383 ((char *) virt - (char *)smc->os.SharedMemAddr);
1384 } // mac_drv_virt2phys
1385
1386
1387 /************************
1388 *
1389 * dma_master
1390 *
1391 * The HWM calls this function, when the driver leads through a DMA
1392 * transfer. If the OS-specific module must prepare the system hardware
1393 * for the DMA transfer, it should do it in this function.
1394 *
1395 * The hardware module calls this dma_master if it wants to send an SMT
1396 * frame. This means that the virt address passed in here is part of
1397 * the 'shared' memory area.
1398 * Args
1399 * smc - A pointer to the SMT context struct.
1400 *
1401 * virt - The virtual address of the data.
1402 *
1403 * len - The length in bytes of the data.
1404 *
1405 * flag - Indicates the transmit direction and the buffer type:
1406 * DMA_RD (0x01) system RAM ==> adapter buffer memory
1407 * DMA_WR (0x02) adapter buffer memory ==> system RAM
1408 * SMT_BUF (0x80) SMT buffer
1409 *
1410 * >> NOTE: SMT_BUF and DMA_RD are always set for PCI. <<
1411 * Out
1412 * Returns the pyhsical address for the DMA transfer.
1413 *
1414 ************************/
1415 u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1416 {
1417 return smc->os.SharedMemDMA +
1418 ((char *) virt - (char *)smc->os.SharedMemAddr);
1419 } // dma_master
1420
1421
1422 /************************
1423 *
1424 * dma_complete
1425 *
1426 * The hardware module calls this routine when it has completed a DMA
1427 * transfer. If the operating system dependent module has set up the DMA
1428 * channel via dma_master() (e.g. Windows NT or AIX) it should clean up
1429 * the DMA channel.
1430 * Args
1431 * smc - A pointer to the SMT context struct.
1432 *
1433 * descr - A pointer to a TxD or RxD, respectively.
1434 *
1435 * flag - Indicates the DMA transfer direction / SMT buffer:
1436 * DMA_RD (0x01) system RAM ==> adapter buffer memory
1437 * DMA_WR (0x02) adapter buffer memory ==> system RAM
1438 * SMT_BUF (0x80) SMT buffer (managed by HWM)
1439 * Out
1440 * Nothing.
1441 *
1442 ************************/
1443 void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
1444 {
1445 /* For TX buffers, there are two cases. If it is an SMT transmit
1446 * buffer, there is nothing to do since we use consistent memory
1447 * for the 'shared' memory area. The other case is for normal
1448 * transmit packets given to us by the networking stack, and in
1449 * that case we cleanup the PCI DMA mapping in mac_drv_tx_complete
1450 * below.
1451 *
1452 * For RX buffers, we have to unmap dynamic PCI DMA mappings here
1453 * because the hardware module is about to potentially look at
1454 * the contents of the buffer. If we did not call the PCI DMA
1455 * unmap first, the hardware module could read inconsistent data.
1456 */
1457 if (flag & DMA_WR) {
1458 skfddi_priv *bp = &smc->os;
1459 volatile struct s_smt_fp_rxd *r = &descr->r;
1460
1461 /* If SKB is NULL, we used the local buffer. */
1462 if (r->rxd_os.skb && r->rxd_os.dma_addr) {
1463 int MaxFrameSize = bp->MaxFrameSize;
1464
1465 pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
1466 MaxFrameSize, PCI_DMA_FROMDEVICE);
1467 r->rxd_os.dma_addr = 0;
1468 }
1469 }
1470 } // dma_complete
1471
1472
1473 /************************
1474 *
1475 * mac_drv_tx_complete
1476 *
1477 * Transmit of a packet is complete. Release the tx staging buffer.
1478 *
1479 * Args
1480 * smc - A pointer to the SMT context struct.
1481 *
1482 * txd - A pointer to the last TxD which is used by the frame.
1483 * Out
1484 * Returns nothing.
1485 *
1486 ************************/
1487 void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1488 {
1489 struct sk_buff *skb;
1490
1491 pr_debug("entering mac_drv_tx_complete\n");
1492 // Check if this TxD points to a skb
1493
1494 if (!(skb = txd->txd_os.skb)) {
1495 pr_debug("TXD with no skb assigned.\n");
1496 return;
1497 }
1498 txd->txd_os.skb = NULL;
1499
1500 // release the DMA mapping
1501 pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
1502 skb->len, PCI_DMA_TODEVICE);
1503 txd->txd_os.dma_addr = 0;
1504
1505 smc->os.MacStat.gen.tx_packets++; // Count transmitted packets.
1506 smc->os.MacStat.gen.tx_bytes+=skb->len; // Count bytes
1507
1508 // free the skb
1509 dev_kfree_skb_irq(skb);
1510
1511 pr_debug("leaving mac_drv_tx_complete\n");
1512 } // mac_drv_tx_complete
1513
1514
1515 /************************
1516 *
1517 * dump packets to logfile
1518 *
1519 ************************/
1520 #ifdef DUMPPACKETS
1521 void dump_data(unsigned char *Data, int length)
1522 {
1523 int i, j;
1524 unsigned char s[255], sh[10];
1525 if (length > 64) {
1526 length = 64;
1527 }
1528 printk(KERN_INFO "---Packet start---\n");
1529 for (i = 0, j = 0; i < length / 8; i++, j += 8)
1530 printk(KERN_INFO "%02x %02x %02x %02x %02x %02x %02x %02x\n",
1531 Data[j + 0], Data[j + 1], Data[j + 2], Data[j + 3],
1532 Data[j + 4], Data[j + 5], Data[j + 6], Data[j + 7]);
1533 strcpy(s, "");
1534 for (i = 0; i < length % 8; i++) {
1535 sprintf(sh, "%02x ", Data[j + i]);
1536 strcat(s, sh);
1537 }
1538 printk(KERN_INFO "%s\n", s);
1539 printk(KERN_INFO "------------------\n");
1540 } // dump_data
1541 #else
1542 #define dump_data(data,len)
1543 #endif // DUMPPACKETS
1544
1545 /************************
1546 *
1547 * mac_drv_rx_complete
1548 *
1549 * The hardware module calls this function if an LLC frame is received
1550 * in a receive buffer. Also the SMT, NSA, and directed beacon frames
1551 * from the network will be passed to the LLC layer by this function
1552 * if passing is enabled.
1553 *
1554 * mac_drv_rx_complete forwards the frame to the LLC layer if it should
1555 * be received. It also fills the RxD ring with new receive buffers if
1556 * some can be queued.
1557 * Args
1558 * smc - A pointer to the SMT context struct.
1559 *
1560 * rxd - A pointer to the first RxD which is used by the receive frame.
1561 *
1562 * frag_count - Count of RxDs used by the received frame.
1563 *
1564 * len - Frame length.
1565 * Out
1566 * Nothing.
1567 *
1568 ************************/
1569 void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1570 int frag_count, int len)
1571 {
1572 skfddi_priv *bp = &smc->os;
1573 struct sk_buff *skb;
1574 unsigned char *virt, *cp;
1575 unsigned short ri;
1576 u_int RifLength;
1577
1578 pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
1579 if (frag_count != 1) { // This is not allowed to happen.
1580
1581 printk("fddi: Multi-fragment receive!\n");
1582 goto RequeueRxd; // Re-use the given RXD(s).
1583
1584 }
1585 skb = rxd->rxd_os.skb;
1586 if (!skb) {
1587 pr_debug("No skb in rxd\n");
1588 smc->os.MacStat.gen.rx_errors++;
1589 goto RequeueRxd;
1590 }
1591 virt = skb->data;
1592
1593 // The DMA mapping was released in dma_complete above.
1594
1595 dump_data(skb->data, len);
1596
1597 /*
1598 * FDDI Frame format:
1599 * +-------+-------+-------+------------+--------+------------+
1600 * | FC[1] | DA[6] | SA[6] | RIF[0..18] | LLC[3] | Data[0..n] |
1601 * +-------+-------+-------+------------+--------+------------+
1602 *
1603 * FC = Frame Control
1604 * DA = Destination Address
1605 * SA = Source Address
1606 * RIF = Routing Information Field
1607 * LLC = Logical Link Control
1608 */
1609
1610 // Remove Routing Information Field (RIF), if present.
1611
1612 if ((virt[1 + 6] & FDDI_RII) == 0)
1613 RifLength = 0;
1614 else {
1615 int n;
1616 // goos: RIF removal has still to be tested
1617 pr_debug("RIF found\n");
1618 // Get RIF length from Routing Control (RC) field.
1619 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
1620
1621 ri = ntohs(*((__be16 *) cp));
1622 RifLength = ri & FDDI_RCF_LEN_MASK;
1623 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1624 printk("fddi: Invalid RIF.\n");
1625 goto RequeueRxd; // Discard the frame.
1626
1627 }
1628 virt[1 + 6] &= ~FDDI_RII; // Clear RII bit.
1629 // regions overlap
1630
1631 virt = cp + RifLength;
1632 for (n = FDDI_MAC_HDR_LEN; n; n--)
1633 *--virt = *--cp;
1634 // adjust sbd->data pointer
1635 skb_pull(skb, RifLength);
1636 len -= RifLength;
1637 RifLength = 0;
1638 }
1639
1640 // Count statistics.
1641 smc->os.MacStat.gen.rx_packets++; // Count indicated receive
1642 // packets.
1643 smc->os.MacStat.gen.rx_bytes+=len; // Count bytes.
1644
1645 // virt points to header again
1646 if (virt[1] & 0x01) { // Check group (multicast) bit.
1647
1648 smc->os.MacStat.gen.multicast++;
1649 }
1650
1651 // deliver frame to system
1652 rxd->rxd_os.skb = NULL;
1653 skb_trim(skb, len);
1654 skb->protocol = fddi_type_trans(skb, bp->dev);
1655
1656 netif_rx(skb);
1657
1658 HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
1659 return;
1660
1661 RequeueRxd:
1662 pr_debug("Rx: re-queue RXD.\n");
1663 mac_drv_requeue_rxd(smc, rxd, frag_count);
1664 smc->os.MacStat.gen.rx_errors++; // Count receive packets
1665 // not indicated.
1666
1667 } // mac_drv_rx_complete
1668
1669
1670 /************************
1671 *
1672 * mac_drv_requeue_rxd
1673 *
1674 * The hardware module calls this function to request the OS-specific
1675 * module to queue the receive buffer(s) represented by the pointer
1676 * to the RxD and the frag_count into the receive queue again. This
1677 * buffer was filled with an invalid frame or an SMT frame.
1678 * Args
1679 * smc - A pointer to the SMT context struct.
1680 *
1681 * rxd - A pointer to the first RxD which is used by the receive frame.
1682 *
1683 * frag_count - Count of RxDs used by the received frame.
1684 * Out
1685 * Nothing.
1686 *
1687 ************************/
1688 void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1689 int frag_count)
1690 {
1691 volatile struct s_smt_fp_rxd *next_rxd;
1692 volatile struct s_smt_fp_rxd *src_rxd;
1693 struct sk_buff *skb;
1694 int MaxFrameSize;
1695 unsigned char *v_addr;
1696 dma_addr_t b_addr;
1697
1698 if (frag_count != 1) // This is not allowed to happen.
1699
1700 printk("fddi: Multi-fragment requeue!\n");
1701
1702 MaxFrameSize = smc->os.MaxFrameSize;
1703 src_rxd = rxd;
1704 for (; frag_count > 0; frag_count--) {
1705 next_rxd = src_rxd->rxd_next;
1706 rxd = HWM_GET_CURR_RXD(smc);
1707
1708 skb = src_rxd->rxd_os.skb;
1709 if (skb == NULL) { // this should not happen
1710
1711 pr_debug("Requeue with no skb in rxd!\n");
1712 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1713 if (skb) {
1714 // we got a skb
1715 rxd->rxd_os.skb = skb;
1716 skb_reserve(skb, 3);
1717 skb_put(skb, MaxFrameSize);
1718 v_addr = skb->data;
1719 b_addr = pci_map_single(&smc->os.pdev,
1720 v_addr,
1721 MaxFrameSize,
1722 PCI_DMA_FROMDEVICE);
1723 rxd->rxd_os.dma_addr = b_addr;
1724 } else {
1725 // no skb available, use local buffer
1726 pr_debug("Queueing invalid buffer!\n");
1727 rxd->rxd_os.skb = NULL;
1728 v_addr = smc->os.LocalRxBuffer;
1729 b_addr = smc->os.LocalRxBufferDMA;
1730 }
1731 } else {
1732 // we use skb from old rxd
1733 rxd->rxd_os.skb = skb;
1734 v_addr = skb->data;
1735 b_addr = pci_map_single(&smc->os.pdev,
1736 v_addr,
1737 MaxFrameSize,
1738 PCI_DMA_FROMDEVICE);
1739 rxd->rxd_os.dma_addr = b_addr;
1740 }
1741 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1742 FIRST_FRAG | LAST_FRAG);
1743
1744 src_rxd = next_rxd;
1745 }
1746 } // mac_drv_requeue_rxd
1747
1748
1749 /************************
1750 *
1751 * mac_drv_fill_rxd
1752 *
1753 * The hardware module calls this function at initialization time
1754 * to fill the RxD ring with receive buffers. It is also called by
1755 * mac_drv_rx_complete if rx_free is large enough to queue some new
1756 * receive buffers into the RxD ring. mac_drv_fill_rxd queues new
1757 * receive buffers as long as enough RxDs and receive buffers are
1758 * available.
1759 * Args
1760 * smc - A pointer to the SMT context struct.
1761 * Out
1762 * Nothing.
1763 *
1764 ************************/
1765 void mac_drv_fill_rxd(struct s_smc *smc)
1766 {
1767 int MaxFrameSize;
1768 unsigned char *v_addr;
1769 unsigned long b_addr;
1770 struct sk_buff *skb;
1771 volatile struct s_smt_fp_rxd *rxd;
1772
1773 pr_debug("entering mac_drv_fill_rxd\n");
1774
1775 // Walk through the list of free receive buffers, passing receive
1776 // buffers to the HWM as long as RXDs are available.
1777
1778 MaxFrameSize = smc->os.MaxFrameSize;
1779 // Check if there is any RXD left.
1780 while (HWM_GET_RX_FREE(smc) > 0) {
1781 pr_debug(".\n");
1782
1783 rxd = HWM_GET_CURR_RXD(smc);
1784 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1785 if (skb) {
1786 // we got a skb
1787 skb_reserve(skb, 3);
1788 skb_put(skb, MaxFrameSize);
1789 v_addr = skb->data;
1790 b_addr = pci_map_single(&smc->os.pdev,
1791 v_addr,
1792 MaxFrameSize,
1793 PCI_DMA_FROMDEVICE);
1794 rxd->rxd_os.dma_addr = b_addr;
1795 } else {
1796 // no skb available, use local buffer
1797 // System has run out of buffer memory, but we want to
1798 // keep the receiver running in hope of better times.
1799 // Multiple descriptors may point to this local buffer,
1800 // so data in it must be considered invalid.
1801 pr_debug("Queueing invalid buffer!\n");
1802 v_addr = smc->os.LocalRxBuffer;
1803 b_addr = smc->os.LocalRxBufferDMA;
1804 }
1805
1806 rxd->rxd_os.skb = skb;
1807
1808 // Pass receive buffer to HWM.
1809 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1810 FIRST_FRAG | LAST_FRAG);
1811 }
1812 pr_debug("leaving mac_drv_fill_rxd\n");
1813 } // mac_drv_fill_rxd
1814
1815
1816 /************************
1817 *
1818 * mac_drv_clear_rxd
1819 *
1820 * The hardware module calls this function to release unused
1821 * receive buffers.
1822 * Args
1823 * smc - A pointer to the SMT context struct.
1824 *
1825 * rxd - A pointer to the first RxD which is used by the receive buffer.
1826 *
1827 * frag_count - Count of RxDs used by the receive buffer.
1828 * Out
1829 * Nothing.
1830 *
1831 ************************/
1832 void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1833 int frag_count)
1834 {
1835
1836 struct sk_buff *skb;
1837
1838 pr_debug("entering mac_drv_clear_rxd\n");
1839
1840 if (frag_count != 1) // This is not allowed to happen.
1841
1842 printk("fddi: Multi-fragment clear!\n");
1843
1844 for (; frag_count > 0; frag_count--) {
1845 skb = rxd->rxd_os.skb;
1846 if (skb != NULL) {
1847 skfddi_priv *bp = &smc->os;
1848 int MaxFrameSize = bp->MaxFrameSize;
1849
1850 pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
1851 MaxFrameSize, PCI_DMA_FROMDEVICE);
1852
1853 dev_kfree_skb(skb);
1854 rxd->rxd_os.skb = NULL;
1855 }
1856 rxd = rxd->rxd_next; // Next RXD.
1857
1858 }
1859 } // mac_drv_clear_rxd
1860
1861
1862 /************************
1863 *
1864 * mac_drv_rx_init
1865 *
1866 * The hardware module calls this routine when an SMT or NSA frame of the
1867 * local SMT should be delivered to the LLC layer.
1868 *
1869 * It is necessary to have this function, because there is no other way to
1870 * copy the contents of SMT MBufs into receive buffers.
1871 *
1872 * mac_drv_rx_init allocates the required target memory for this frame,
1873 * and receives the frame fragment by fragment by calling mac_drv_rx_frag.
1874 * Args
1875 * smc - A pointer to the SMT context struct.
1876 *
1877 * len - The length (in bytes) of the received frame (FC, DA, SA, Data).
1878 *
1879 * fc - The Frame Control field of the received frame.
1880 *
1881 * look_ahead - A pointer to the lookahead data buffer (may be NULL).
1882 *
1883 * la_len - The length of the lookahead data stored in the lookahead
1884 * buffer (may be zero).
1885 * Out
1886 * Always returns zero (0).
1887 *
1888 ************************/
1889 int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1890 char *look_ahead, int la_len)
1891 {
1892 struct sk_buff *skb;
1893
1894 pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
1895
1896 // "Received" a SMT or NSA frame of the local SMT.
1897
1898 if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
1899 pr_debug("fddi: Discard invalid local SMT frame\n");
1900 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1901 len, la_len, (unsigned long) look_ahead);
1902 return 0;
1903 }
1904 skb = alloc_skb(len + 3, GFP_ATOMIC);
1905 if (!skb) {
1906 pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1907 return 0;
1908 }
1909 skb_reserve(skb, 3);
1910 skb_put(skb, len);
1911 skb_copy_to_linear_data(skb, look_ahead, len);
1912
1913 // deliver frame to system
1914 skb->protocol = fddi_type_trans(skb, smc->os.dev);
1915 netif_rx(skb);
1916
1917 return 0;
1918 } // mac_drv_rx_init
1919
1920
1921 /************************
1922 *
1923 * smt_timer_poll
1924 *
1925 * This routine is called periodically by the SMT module to clean up the
1926 * driver.
1927 *
1928 * Return any queued frames back to the upper protocol layers if the ring
1929 * is down.
1930 * Args
1931 * smc - A pointer to the SMT context struct.
1932 * Out
1933 * Nothing.
1934 *
1935 ************************/
1936 void smt_timer_poll(struct s_smc *smc)
1937 {
1938 } // smt_timer_poll
1939
1940
1941 /************************
1942 *
1943 * ring_status_indication
1944 *
1945 * This function indicates a change of the ring state.
1946 * Args
1947 * smc - A pointer to the SMT context struct.
1948 *
1949 * status - The current ring status.
1950 * Out
1951 * Nothing.
1952 *
1953 ************************/
1954 void ring_status_indication(struct s_smc *smc, u_long status)
1955 {
1956 pr_debug("ring_status_indication( ");
1957 if (status & RS_RES15)
1958 pr_debug("RS_RES15 ");
1959 if (status & RS_HARDERROR)
1960 pr_debug("RS_HARDERROR ");
1961 if (status & RS_SOFTERROR)
1962 pr_debug("RS_SOFTERROR ");
1963 if (status & RS_BEACON)
1964 pr_debug("RS_BEACON ");
1965 if (status & RS_PATHTEST)
1966 pr_debug("RS_PATHTEST ");
1967 if (status & RS_SELFTEST)
1968 pr_debug("RS_SELFTEST ");
1969 if (status & RS_RES9)
1970 pr_debug("RS_RES9 ");
1971 if (status & RS_DISCONNECT)
1972 pr_debug("RS_DISCONNECT ");
1973 if (status & RS_RES7)
1974 pr_debug("RS_RES7 ");
1975 if (status & RS_DUPADDR)
1976 pr_debug("RS_DUPADDR ");
1977 if (status & RS_NORINGOP)
1978 pr_debug("RS_NORINGOP ");
1979 if (status & RS_VERSION)
1980 pr_debug("RS_VERSION ");
1981 if (status & RS_STUCKBYPASSS)
1982 pr_debug("RS_STUCKBYPASSS ");
1983 if (status & RS_EVENT)
1984 pr_debug("RS_EVENT ");
1985 if (status & RS_RINGOPCHANGE)
1986 pr_debug("RS_RINGOPCHANGE ");
1987 if (status & RS_RES0)
1988 pr_debug("RS_RES0 ");
1989 pr_debug("]\n");
1990 } // ring_status_indication
1991
1992
1993 /************************
1994 *
1995 * smt_get_time
1996 *
1997 * Gets the current time from the system.
1998 * Args
1999 * None.
2000 * Out
2001 * The current time in TICKS_PER_SECOND.
2002 *
2003 * TICKS_PER_SECOND has the unit 'count of timer ticks per second'. It is
2004 * defined in "targetos.h". The definition of TICKS_PER_SECOND must comply
2005 * to the time returned by smt_get_time().
2006 *
2007 ************************/
2008 unsigned long smt_get_time(void)
2009 {
2010 return jiffies;
2011 } // smt_get_time
2012
2013
2014 /************************
2015 *
2016 * smt_stat_counter
2017 *
2018 * Status counter update (ring_op, fifo full).
2019 * Args
2020 * smc - A pointer to the SMT context struct.
2021 *
2022 * stat - = 0: A ring operational change occurred.
2023 * = 1: The FORMAC FIFO buffer is full / FIFO overflow.
2024 * Out
2025 * Nothing.
2026 *
2027 ************************/
2028 void smt_stat_counter(struct s_smc *smc, int stat)
2029 {
2030 // BOOLEAN RingIsUp ;
2031
2032 pr_debug("smt_stat_counter\n");
2033 switch (stat) {
2034 case 0:
2035 pr_debug("Ring operational change.\n");
2036 break;
2037 case 1:
2038 pr_debug("Receive fifo overflow.\n");
2039 smc->os.MacStat.gen.rx_errors++;
2040 break;
2041 default:
2042 pr_debug("Unknown status (%d).\n", stat);
2043 break;
2044 }
2045 } // smt_stat_counter
2046
2047
2048 /************************
2049 *
2050 * cfm_state_change
2051 *
2052 * Sets CFM state in custom statistics.
2053 * Args
2054 * smc - A pointer to the SMT context struct.
2055 *
2056 * c_state - Possible values are:
2057 *
2058 * EC0_OUT, EC1_IN, EC2_TRACE, EC3_LEAVE, EC4_PATH_TEST,
2059 * EC5_INSERT, EC6_CHECK, EC7_DEINSERT
2060 * Out
2061 * Nothing.
2062 *
2063 ************************/
2064 void cfm_state_change(struct s_smc *smc, int c_state)
2065 {
2066 #ifdef DRIVERDEBUG
2067 char *s;
2068
2069 switch (c_state) {
2070 case SC0_ISOLATED:
2071 s = "SC0_ISOLATED";
2072 break;
2073 case SC1_WRAP_A:
2074 s = "SC1_WRAP_A";
2075 break;
2076 case SC2_WRAP_B:
2077 s = "SC2_WRAP_B";
2078 break;
2079 case SC4_THRU_A:
2080 s = "SC4_THRU_A";
2081 break;
2082 case SC5_THRU_B:
2083 s = "SC5_THRU_B";
2084 break;
2085 case SC7_WRAP_S:
2086 s = "SC7_WRAP_S";
2087 break;
2088 case SC9_C_WRAP_A:
2089 s = "SC9_C_WRAP_A";
2090 break;
2091 case SC10_C_WRAP_B:
2092 s = "SC10_C_WRAP_B";
2093 break;
2094 case SC11_C_WRAP_S:
2095 s = "SC11_C_WRAP_S";
2096 break;
2097 default:
2098 pr_debug("cfm_state_change: unknown %d\n", c_state);
2099 return;
2100 }
2101 pr_debug("cfm_state_change: %s\n", s);
2102 #endif // DRIVERDEBUG
2103 } // cfm_state_change
2104
2105
2106 /************************
2107 *
2108 * ecm_state_change
2109 *
2110 * Sets ECM state in custom statistics.
2111 * Args
2112 * smc - A pointer to the SMT context struct.
2113 *
2114 * e_state - Possible values are:
2115 *
2116 * SC0_ISOLATED, SC1_WRAP_A (5), SC2_WRAP_B (6), SC4_THRU_A (12),
2117 * SC5_THRU_B (7), SC7_WRAP_S (8)
2118 * Out
2119 * Nothing.
2120 *
2121 ************************/
2122 void ecm_state_change(struct s_smc *smc, int e_state)
2123 {
2124 #ifdef DRIVERDEBUG
2125 char *s;
2126
2127 switch (e_state) {
2128 case EC0_OUT:
2129 s = "EC0_OUT";
2130 break;
2131 case EC1_IN:
2132 s = "EC1_IN";
2133 break;
2134 case EC2_TRACE:
2135 s = "EC2_TRACE";
2136 break;
2137 case EC3_LEAVE:
2138 s = "EC3_LEAVE";
2139 break;
2140 case EC4_PATH_TEST:
2141 s = "EC4_PATH_TEST";
2142 break;
2143 case EC5_INSERT:
2144 s = "EC5_INSERT";
2145 break;
2146 case EC6_CHECK:
2147 s = "EC6_CHECK";
2148 break;
2149 case EC7_DEINSERT:
2150 s = "EC7_DEINSERT";
2151 break;
2152 default:
2153 s = "unknown";
2154 break;
2155 }
2156 pr_debug("ecm_state_change: %s\n", s);
2157 #endif //DRIVERDEBUG
2158 } // ecm_state_change
2159
2160
2161 /************************
2162 *
2163 * rmt_state_change
2164 *
2165 * Sets RMT state in custom statistics.
2166 * Args
2167 * smc - A pointer to the SMT context struct.
2168 *
2169 * r_state - Possible values are:
2170 *
2171 * RM0_ISOLATED, RM1_NON_OP, RM2_RING_OP, RM3_DETECT,
2172 * RM4_NON_OP_DUP, RM5_RING_OP_DUP, RM6_DIRECTED, RM7_TRACE
2173 * Out
2174 * Nothing.
2175 *
2176 ************************/
2177 void rmt_state_change(struct s_smc *smc, int r_state)
2178 {
2179 #ifdef DRIVERDEBUG
2180 char *s;
2181
2182 switch (r_state) {
2183 case RM0_ISOLATED:
2184 s = "RM0_ISOLATED";
2185 break;
2186 case RM1_NON_OP:
2187 s = "RM1_NON_OP - not operational";
2188 break;
2189 case RM2_RING_OP:
2190 s = "RM2_RING_OP - ring operational";
2191 break;
2192 case RM3_DETECT:
2193 s = "RM3_DETECT - detect dupl addresses";
2194 break;
2195 case RM4_NON_OP_DUP:
2196 s = "RM4_NON_OP_DUP - dupl. addr detected";
2197 break;
2198 case RM5_RING_OP_DUP:
2199 s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
2200 break;
2201 case RM6_DIRECTED:
2202 s = "RM6_DIRECTED - sending directed beacons";
2203 break;
2204 case RM7_TRACE:
2205 s = "RM7_TRACE - trace initiated";
2206 break;
2207 default:
2208 s = "unknown";
2209 break;
2210 }
2211 pr_debug("[rmt_state_change: %s]\n", s);
2212 #endif // DRIVERDEBUG
2213 } // rmt_state_change
2214
2215
2216 /************************
2217 *
2218 * drv_reset_indication
2219 *
2220 * This function is called by the SMT when it has detected a severe
2221 * hardware problem. The driver should perform a reset on the adapter
2222 * as soon as possible, but not from within this function.
2223 * Args
2224 * smc - A pointer to the SMT context struct.
2225 * Out
2226 * Nothing.
2227 *
2228 ************************/
2229 void drv_reset_indication(struct s_smc *smc)
2230 {
2231 pr_debug("entering drv_reset_indication\n");
2232
2233 smc->os.ResetRequested = TRUE; // Set flag.
2234
2235 } // drv_reset_indication
2236
2237 static struct pci_driver skfddi_pci_driver = {
2238 .name = "skfddi",
2239 .id_table = skfddi_pci_tbl,
2240 .probe = skfp_init_one,
2241 .remove = skfp_remove_one,
2242 };
2243
2244 module_pci_driver(skfddi_pci_driver);