Merge branches 'acpi-processor' and 'acpi-cppc'
[deliverable/linux.git] / drivers / net / fddi / skfp / skfddi.c
1 /*
2 * File Name:
3 * skfddi.c
4 *
5 * Copyright Information:
6 * Copyright SysKonnect 1998,1999.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * The information in this file is provided "AS IS" without warranty.
14 *
15 * Abstract:
16 * A Linux device driver supporting the SysKonnect FDDI PCI controller
17 * familie.
18 *
19 * Maintainers:
20 * CG Christoph Goos (cgoos@syskonnect.de)
21 *
22 * Contributors:
23 * DM David S. Miller
24 *
25 * Address all question to:
26 * linux@syskonnect.de
27 *
28 * The technical manual for the adapters is available from SysKonnect's
29 * web pages: www.syskonnect.com
30 * Goto "Support" and search Knowledge Base for "manual".
31 *
32 * Driver Architecture:
33 * The driver architecture is based on the DEC FDDI driver by
34 * Lawrence V. Stefani and several ethernet drivers.
35 * I also used an existing Windows NT miniport driver.
36 * All hardware dependent functions are handled by the SysKonnect
37 * Hardware Module.
38 * The only headerfiles that are directly related to this source
39 * are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
40 * The others belong to the SysKonnect FDDI Hardware Module and
41 * should better not be changed.
42 *
43 * Modification History:
44 * Date Name Description
45 * 02-Mar-98 CG Created.
46 *
47 * 10-Mar-99 CG Support for 2.2.x added.
48 * 25-Mar-99 CG Corrected IRQ routing for SMP (APIC)
49 * 26-Oct-99 CG Fixed compilation error on 2.2.13
50 * 12-Nov-99 CG Source code release
51 * 22-Nov-99 CG Included in kernel source.
52 * 07-May-00 DM 64 bit fixes, new dma interface
53 * 31-Jul-03 DB Audit copy_*_user in skfp_ioctl
54 * Daniele Bellucci <bellucda@tiscali.it>
55 * 03-Dec-03 SH Convert to PCI device model
56 *
57 * Compilation options (-Dxxx):
58 * DRIVERDEBUG print lots of messages to log file
59 * DUMPPACKETS print received/transmitted packets to logfile
60 *
61 * Tested cpu architectures:
62 * - i386
63 * - sparc64
64 */
65
66 /* Version information string - should be updated prior to */
67 /* each new release!!! */
68 #define VERSION "2.07"
69
70 static const char * const boot_msg =
71 "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
72 " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
73
74 /* Include files */
75
76 #include <linux/capability.h>
77 #include <linux/module.h>
78 #include <linux/kernel.h>
79 #include <linux/errno.h>
80 #include <linux/ioport.h>
81 #include <linux/interrupt.h>
82 #include <linux/pci.h>
83 #include <linux/netdevice.h>
84 #include <linux/fddidevice.h>
85 #include <linux/skbuff.h>
86 #include <linux/bitops.h>
87 #include <linux/gfp.h>
88
89 #include <asm/byteorder.h>
90 #include <asm/io.h>
91 #include <asm/uaccess.h>
92
93 #include "h/types.h"
94 #undef ADDR // undo Linux definition
95 #include "h/skfbi.h"
96 #include "h/fddi.h"
97 #include "h/smc.h"
98 #include "h/smtstate.h"
99
100
101 // Define module-wide (static) routines
102 static int skfp_driver_init(struct net_device *dev);
103 static int skfp_open(struct net_device *dev);
104 static int skfp_close(struct net_device *dev);
105 static irqreturn_t skfp_interrupt(int irq, void *dev_id);
106 static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
107 static void skfp_ctl_set_multicast_list(struct net_device *dev);
108 static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
109 static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
110 static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
111 static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
112 struct net_device *dev);
113 static void send_queued_packets(struct s_smc *smc);
114 static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
115 static void ResetAdapter(struct s_smc *smc);
116
117
118 // Functions needed by the hardware module
119 void *mac_drv_get_space(struct s_smc *smc, u_int size);
120 void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
121 unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
122 unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
123 void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
124 int flag);
125 void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
126 void llc_restart_tx(struct s_smc *smc);
127 void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
128 int frag_count, int len);
129 void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
130 int frag_count);
131 void mac_drv_fill_rxd(struct s_smc *smc);
132 void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
133 int frag_count);
134 int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
135 int la_len);
136 void dump_data(unsigned char *Data, int length);
137
138 // External functions from the hardware module
139 extern u_int mac_drv_check_space(void);
140 extern int mac_drv_init(struct s_smc *smc);
141 extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
142 int len, int frame_status);
143 extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
144 int frame_len, int frame_status);
145 extern void fddi_isr(struct s_smc *smc);
146 extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
147 int len, int frame_status);
148 extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
149 extern void mac_drv_clear_rx_queue(struct s_smc *smc);
150 extern void enable_tx_irq(struct s_smc *smc, u_short queue);
151
152 static const struct pci_device_id skfddi_pci_tbl[] = {
153 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
154 { } /* Terminating entry */
155 };
156 MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
157 MODULE_LICENSE("GPL");
158 MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
159
160 // Define module-wide (static) variables
161
162 static int num_boards; /* total number of adapters configured */
163
164 static const struct net_device_ops skfp_netdev_ops = {
165 .ndo_open = skfp_open,
166 .ndo_stop = skfp_close,
167 .ndo_start_xmit = skfp_send_pkt,
168 .ndo_get_stats = skfp_ctl_get_stats,
169 .ndo_change_mtu = fddi_change_mtu,
170 .ndo_set_rx_mode = skfp_ctl_set_multicast_list,
171 .ndo_set_mac_address = skfp_ctl_set_mac_address,
172 .ndo_do_ioctl = skfp_ioctl,
173 };
174
175 /*
176 * =================
177 * = skfp_init_one =
178 * =================
179 *
180 * Overview:
181 * Probes for supported FDDI PCI controllers
182 *
183 * Returns:
184 * Condition code
185 *
186 * Arguments:
187 * pdev - pointer to PCI device information
188 *
189 * Functional Description:
190 * This is now called by PCI driver registration process
191 * for each board found.
192 *
193 * Return Codes:
194 * 0 - This device (fddi0, fddi1, etc) configured successfully
195 * -ENODEV - No devices present, or no SysKonnect FDDI PCI device
196 * present for this device name
197 *
198 *
199 * Side Effects:
200 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
201 * initialized and the board resources are read and stored in
202 * the device structure.
203 */
204 static int skfp_init_one(struct pci_dev *pdev,
205 const struct pci_device_id *ent)
206 {
207 struct net_device *dev;
208 struct s_smc *smc; /* board pointer */
209 void __iomem *mem;
210 int err;
211
212 pr_debug("entering skfp_init_one\n");
213
214 if (num_boards == 0)
215 printk("%s\n", boot_msg);
216
217 err = pci_enable_device(pdev);
218 if (err)
219 return err;
220
221 err = pci_request_regions(pdev, "skfddi");
222 if (err)
223 goto err_out1;
224
225 pci_set_master(pdev);
226
227 #ifdef MEM_MAPPED_IO
228 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
229 printk(KERN_ERR "skfp: region is not an MMIO resource\n");
230 err = -EIO;
231 goto err_out2;
232 }
233
234 mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
235 #else
236 if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
237 printk(KERN_ERR "skfp: region is not PIO resource\n");
238 err = -EIO;
239 goto err_out2;
240 }
241
242 mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
243 #endif
244 if (!mem) {
245 printk(KERN_ERR "skfp: Unable to map register, "
246 "FDDI adapter will be disabled.\n");
247 err = -EIO;
248 goto err_out2;
249 }
250
251 dev = alloc_fddidev(sizeof(struct s_smc));
252 if (!dev) {
253 printk(KERN_ERR "skfp: Unable to allocate fddi device, "
254 "FDDI adapter will be disabled.\n");
255 err = -ENOMEM;
256 goto err_out3;
257 }
258
259 dev->irq = pdev->irq;
260 dev->netdev_ops = &skfp_netdev_ops;
261
262 SET_NETDEV_DEV(dev, &pdev->dev);
263
264 /* Initialize board structure with bus-specific info */
265 smc = netdev_priv(dev);
266 smc->os.dev = dev;
267 smc->os.bus_type = SK_BUS_TYPE_PCI;
268 smc->os.pdev = *pdev;
269 smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
270 smc->os.MaxFrameSize = MAX_FRAME_SIZE;
271 smc->os.dev = dev;
272 smc->hw.slot = -1;
273 smc->hw.iop = mem;
274 smc->os.ResetRequested = FALSE;
275 skb_queue_head_init(&smc->os.SendSkbQueue);
276
277 dev->base_addr = (unsigned long)mem;
278
279 err = skfp_driver_init(dev);
280 if (err)
281 goto err_out4;
282
283 err = register_netdev(dev);
284 if (err)
285 goto err_out5;
286
287 ++num_boards;
288 pci_set_drvdata(pdev, dev);
289
290 if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
291 (pdev->subsystem_device & 0xff00) == 0x5800)
292 printk("%s: SysKonnect FDDI PCI adapter"
293 " found (SK-%04X)\n", dev->name,
294 pdev->subsystem_device);
295 else
296 printk("%s: FDDI PCI adapter found\n", dev->name);
297
298 return 0;
299 err_out5:
300 if (smc->os.SharedMemAddr)
301 pci_free_consistent(pdev, smc->os.SharedMemSize,
302 smc->os.SharedMemAddr,
303 smc->os.SharedMemDMA);
304 pci_free_consistent(pdev, MAX_FRAME_SIZE,
305 smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
306 err_out4:
307 free_netdev(dev);
308 err_out3:
309 #ifdef MEM_MAPPED_IO
310 iounmap(mem);
311 #else
312 ioport_unmap(mem);
313 #endif
314 err_out2:
315 pci_release_regions(pdev);
316 err_out1:
317 pci_disable_device(pdev);
318 return err;
319 }
320
321 /*
322 * Called for each adapter board from pci_unregister_driver
323 */
324 static void skfp_remove_one(struct pci_dev *pdev)
325 {
326 struct net_device *p = pci_get_drvdata(pdev);
327 struct s_smc *lp = netdev_priv(p);
328
329 unregister_netdev(p);
330
331 if (lp->os.SharedMemAddr) {
332 pci_free_consistent(&lp->os.pdev,
333 lp->os.SharedMemSize,
334 lp->os.SharedMemAddr,
335 lp->os.SharedMemDMA);
336 lp->os.SharedMemAddr = NULL;
337 }
338 if (lp->os.LocalRxBuffer) {
339 pci_free_consistent(&lp->os.pdev,
340 MAX_FRAME_SIZE,
341 lp->os.LocalRxBuffer,
342 lp->os.LocalRxBufferDMA);
343 lp->os.LocalRxBuffer = NULL;
344 }
345 #ifdef MEM_MAPPED_IO
346 iounmap(lp->hw.iop);
347 #else
348 ioport_unmap(lp->hw.iop);
349 #endif
350 pci_release_regions(pdev);
351 free_netdev(p);
352
353 pci_disable_device(pdev);
354 }
355
356 /*
357 * ====================
358 * = skfp_driver_init =
359 * ====================
360 *
361 * Overview:
362 * Initializes remaining adapter board structure information
363 * and makes sure adapter is in a safe state prior to skfp_open().
364 *
365 * Returns:
366 * Condition code
367 *
368 * Arguments:
369 * dev - pointer to device information
370 *
371 * Functional Description:
372 * This function allocates additional resources such as the host memory
373 * blocks needed by the adapter.
374 * The adapter is also reset. The OS must call skfp_open() to open
375 * the adapter and bring it on-line.
376 *
377 * Return Codes:
378 * 0 - initialization succeeded
379 * -1 - initialization failed
380 */
381 static int skfp_driver_init(struct net_device *dev)
382 {
383 struct s_smc *smc = netdev_priv(dev);
384 skfddi_priv *bp = &smc->os;
385 int err = -EIO;
386
387 pr_debug("entering skfp_driver_init\n");
388
389 // set the io address in private structures
390 bp->base_addr = dev->base_addr;
391
392 // Get the interrupt level from the PCI Configuration Table
393 smc->hw.irq = dev->irq;
394
395 spin_lock_init(&bp->DriverLock);
396
397 // Allocate invalid frame
398 bp->LocalRxBuffer = pci_alloc_consistent(&bp->pdev, MAX_FRAME_SIZE, &bp->LocalRxBufferDMA);
399 if (!bp->LocalRxBuffer) {
400 printk("could not allocate mem for ");
401 printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
402 goto fail;
403 }
404
405 // Determine the required size of the 'shared' memory area.
406 bp->SharedMemSize = mac_drv_check_space();
407 pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
408 if (bp->SharedMemSize > 0) {
409 bp->SharedMemSize += 16; // for descriptor alignment
410
411 bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev,
412 bp->SharedMemSize,
413 &bp->SharedMemDMA);
414 if (!bp->SharedMemAddr) {
415 printk("could not allocate mem for ");
416 printk("hardware module: %ld byte\n",
417 bp->SharedMemSize);
418 goto fail;
419 }
420 bp->SharedMemHeap = 0; // Nothing used yet.
421
422 } else {
423 bp->SharedMemAddr = NULL;
424 bp->SharedMemHeap = 0;
425 } // SharedMemSize > 0
426
427 memset(bp->SharedMemAddr, 0, bp->SharedMemSize);
428
429 card_stop(smc); // Reset adapter.
430
431 pr_debug("mac_drv_init()..\n");
432 if (mac_drv_init(smc) != 0) {
433 pr_debug("mac_drv_init() failed\n");
434 goto fail;
435 }
436 read_address(smc, NULL);
437 pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
438 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
439
440 smt_reset_defaults(smc, 0);
441
442 return 0;
443
444 fail:
445 if (bp->SharedMemAddr) {
446 pci_free_consistent(&bp->pdev,
447 bp->SharedMemSize,
448 bp->SharedMemAddr,
449 bp->SharedMemDMA);
450 bp->SharedMemAddr = NULL;
451 }
452 if (bp->LocalRxBuffer) {
453 pci_free_consistent(&bp->pdev, MAX_FRAME_SIZE,
454 bp->LocalRxBuffer, bp->LocalRxBufferDMA);
455 bp->LocalRxBuffer = NULL;
456 }
457 return err;
458 } // skfp_driver_init
459
460
461 /*
462 * =============
463 * = skfp_open =
464 * =============
465 *
466 * Overview:
467 * Opens the adapter
468 *
469 * Returns:
470 * Condition code
471 *
472 * Arguments:
473 * dev - pointer to device information
474 *
475 * Functional Description:
476 * This function brings the adapter to an operational state.
477 *
478 * Return Codes:
479 * 0 - Adapter was successfully opened
480 * -EAGAIN - Could not register IRQ
481 */
482 static int skfp_open(struct net_device *dev)
483 {
484 struct s_smc *smc = netdev_priv(dev);
485 int err;
486
487 pr_debug("entering skfp_open\n");
488 /* Register IRQ - support shared interrupts by passing device ptr */
489 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
490 dev->name, dev);
491 if (err)
492 return err;
493
494 /*
495 * Set current address to factory MAC address
496 *
497 * Note: We've already done this step in skfp_driver_init.
498 * However, it's possible that a user has set a node
499 * address override, then closed and reopened the
500 * adapter. Unless we reset the device address field
501 * now, we'll continue to use the existing modified
502 * address.
503 */
504 read_address(smc, NULL);
505 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
506
507 init_smt(smc, NULL);
508 smt_online(smc, 1);
509 STI_FBI();
510
511 /* Clear local multicast address tables */
512 mac_clear_multicast(smc);
513
514 /* Disable promiscuous filter settings */
515 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
516
517 netif_start_queue(dev);
518 return 0;
519 } // skfp_open
520
521
522 /*
523 * ==============
524 * = skfp_close =
525 * ==============
526 *
527 * Overview:
528 * Closes the device/module.
529 *
530 * Returns:
531 * Condition code
532 *
533 * Arguments:
534 * dev - pointer to device information
535 *
536 * Functional Description:
537 * This routine closes the adapter and brings it to a safe state.
538 * The interrupt service routine is deregistered with the OS.
539 * The adapter can be opened again with another call to skfp_open().
540 *
541 * Return Codes:
542 * Always return 0.
543 *
544 * Assumptions:
545 * No further requests for this adapter are made after this routine is
546 * called. skfp_open() can be called to reset and reinitialize the
547 * adapter.
548 */
549 static int skfp_close(struct net_device *dev)
550 {
551 struct s_smc *smc = netdev_priv(dev);
552 skfddi_priv *bp = &smc->os;
553
554 CLI_FBI();
555 smt_reset_defaults(smc, 1);
556 card_stop(smc);
557 mac_drv_clear_tx_queue(smc);
558 mac_drv_clear_rx_queue(smc);
559
560 netif_stop_queue(dev);
561 /* Deregister (free) IRQ */
562 free_irq(dev->irq, dev);
563
564 skb_queue_purge(&bp->SendSkbQueue);
565 bp->QueueSkb = MAX_TX_QUEUE_LEN;
566
567 return 0;
568 } // skfp_close
569
570
571 /*
572 * ==================
573 * = skfp_interrupt =
574 * ==================
575 *
576 * Overview:
577 * Interrupt processing routine
578 *
579 * Returns:
580 * None
581 *
582 * Arguments:
583 * irq - interrupt vector
584 * dev_id - pointer to device information
585 *
586 * Functional Description:
587 * This routine calls the interrupt processing routine for this adapter. It
588 * disables and reenables adapter interrupts, as appropriate. We can support
589 * shared interrupts since the incoming dev_id pointer provides our device
590 * structure context. All the real work is done in the hardware module.
591 *
592 * Return Codes:
593 * None
594 *
595 * Assumptions:
596 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
597 * on Intel-based systems) is done by the operating system outside this
598 * routine.
599 *
600 * System interrupts are enabled through this call.
601 *
602 * Side Effects:
603 * Interrupts are disabled, then reenabled at the adapter.
604 */
605
606 static irqreturn_t skfp_interrupt(int irq, void *dev_id)
607 {
608 struct net_device *dev = dev_id;
609 struct s_smc *smc; /* private board structure pointer */
610 skfddi_priv *bp;
611
612 smc = netdev_priv(dev);
613 bp = &smc->os;
614
615 // IRQs enabled or disabled ?
616 if (inpd(ADDR(B0_IMSK)) == 0) {
617 // IRQs are disabled: must be shared interrupt
618 return IRQ_NONE;
619 }
620 // Note: At this point, IRQs are enabled.
621 if ((inpd(ISR_A) & smc->hw.is_imask) == 0) { // IRQ?
622 // Adapter did not issue an IRQ: must be shared interrupt
623 return IRQ_NONE;
624 }
625 CLI_FBI(); // Disable IRQs from our adapter.
626 spin_lock(&bp->DriverLock);
627
628 // Call interrupt handler in hardware module (HWM).
629 fddi_isr(smc);
630
631 if (smc->os.ResetRequested) {
632 ResetAdapter(smc);
633 smc->os.ResetRequested = FALSE;
634 }
635 spin_unlock(&bp->DriverLock);
636 STI_FBI(); // Enable IRQs from our adapter.
637
638 return IRQ_HANDLED;
639 } // skfp_interrupt
640
641
642 /*
643 * ======================
644 * = skfp_ctl_get_stats =
645 * ======================
646 *
647 * Overview:
648 * Get statistics for FDDI adapter
649 *
650 * Returns:
651 * Pointer to FDDI statistics structure
652 *
653 * Arguments:
654 * dev - pointer to device information
655 *
656 * Functional Description:
657 * Gets current MIB objects from adapter, then
658 * returns FDDI statistics structure as defined
659 * in if_fddi.h.
660 *
661 * Note: Since the FDDI statistics structure is
662 * still new and the device structure doesn't
663 * have an FDDI-specific get statistics handler,
664 * we'll return the FDDI statistics structure as
665 * a pointer to an Ethernet statistics structure.
666 * That way, at least the first part of the statistics
667 * structure can be decoded properly.
668 * We'll have to pay attention to this routine as the
669 * device structure becomes more mature and LAN media
670 * independent.
671 *
672 */
673 static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
674 {
675 struct s_smc *bp = netdev_priv(dev);
676
677 /* Fill the bp->stats structure with driver-maintained counters */
678
679 bp->os.MacStat.port_bs_flag[0] = 0x1234;
680 bp->os.MacStat.port_bs_flag[1] = 0x5678;
681 // goos: need to fill out fddi statistic
682 #if 0
683 /* Get FDDI SMT MIB objects */
684
685 /* Fill the bp->stats structure with the SMT MIB object values */
686
687 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
688 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
689 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
690 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
691 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
692 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
693 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
694 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
695 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
696 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
697 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
698 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
699 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
700 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
701 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
702 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
703 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
704 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
705 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
706 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
707 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
708 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
709 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
710 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
711 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
712 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
713 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
714 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
715 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
716 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
717 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
718 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
719 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
720 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
721 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
722 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
723 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
724 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
725 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
726 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
727 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
728 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
729 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
730 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
731 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
732 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
733 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
734 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
735 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
736 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
737 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
738 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
739 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
740 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
741 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
742 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
743 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
744 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
745 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
746 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
747 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
748 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
749 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
750 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
751 memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
752 memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
753 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
754 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
755 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
756 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
757 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
758 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
759 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
760 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
761 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
762 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
763 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
764 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
765 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
766 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
767 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
768 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
769 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
770 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
771 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
772 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
773 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
774 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
775 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
776 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
777 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
778 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
779
780
781 /* Fill the bp->stats structure with the FDDI counter values */
782
783 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
784 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
785 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
786 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
787 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
788 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
789 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
790 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
791 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
792 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
793 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
794
795 #endif
796 return (struct net_device_stats *)&bp->os.MacStat;
797 } // ctl_get_stat
798
799
800 /*
801 * ==============================
802 * = skfp_ctl_set_multicast_list =
803 * ==============================
804 *
805 * Overview:
806 * Enable/Disable LLC frame promiscuous mode reception
807 * on the adapter and/or update multicast address table.
808 *
809 * Returns:
810 * None
811 *
812 * Arguments:
813 * dev - pointer to device information
814 *
815 * Functional Description:
816 * This function acquires the driver lock and only calls
817 * skfp_ctl_set_multicast_list_wo_lock then.
818 * This routine follows a fairly simple algorithm for setting the
819 * adapter filters and CAM:
820 *
821 * if IFF_PROMISC flag is set
822 * enable promiscuous mode
823 * else
824 * disable promiscuous mode
825 * if number of multicast addresses <= max. multicast number
826 * add mc addresses to adapter table
827 * else
828 * enable promiscuous mode
829 * update adapter filters
830 *
831 * Assumptions:
832 * Multicast addresses are presented in canonical (LSB) format.
833 *
834 * Side Effects:
835 * On-board adapter filters are updated.
836 */
837 static void skfp_ctl_set_multicast_list(struct net_device *dev)
838 {
839 struct s_smc *smc = netdev_priv(dev);
840 skfddi_priv *bp = &smc->os;
841 unsigned long Flags;
842
843 spin_lock_irqsave(&bp->DriverLock, Flags);
844 skfp_ctl_set_multicast_list_wo_lock(dev);
845 spin_unlock_irqrestore(&bp->DriverLock, Flags);
846 } // skfp_ctl_set_multicast_list
847
848
849
850 static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
851 {
852 struct s_smc *smc = netdev_priv(dev);
853 struct netdev_hw_addr *ha;
854
855 /* Enable promiscuous mode, if necessary */
856 if (dev->flags & IFF_PROMISC) {
857 mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
858 pr_debug("PROMISCUOUS MODE ENABLED\n");
859 }
860 /* Else, update multicast address table */
861 else {
862 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
863 pr_debug("PROMISCUOUS MODE DISABLED\n");
864
865 // Reset all MC addresses
866 mac_clear_multicast(smc);
867 mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
868
869 if (dev->flags & IFF_ALLMULTI) {
870 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
871 pr_debug("ENABLE ALL MC ADDRESSES\n");
872 } else if (!netdev_mc_empty(dev)) {
873 if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
874 /* use exact filtering */
875
876 // point to first multicast addr
877 netdev_for_each_mc_addr(ha, dev) {
878 mac_add_multicast(smc,
879 (struct fddi_addr *)ha->addr,
880 1);
881
882 pr_debug("ENABLE MC ADDRESS: %pMF\n",
883 ha->addr);
884 }
885
886 } else { // more MC addresses than HW supports
887
888 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
889 pr_debug("ENABLE ALL MC ADDRESSES\n");
890 }
891 } else { // no MC addresses
892
893 pr_debug("DISABLE ALL MC ADDRESSES\n");
894 }
895
896 /* Update adapter filters */
897 mac_update_multicast(smc);
898 }
899 } // skfp_ctl_set_multicast_list_wo_lock
900
901
902 /*
903 * ===========================
904 * = skfp_ctl_set_mac_address =
905 * ===========================
906 *
907 * Overview:
908 * set new mac address on adapter and update dev_addr field in device table.
909 *
910 * Returns:
911 * None
912 *
913 * Arguments:
914 * dev - pointer to device information
915 * addr - pointer to sockaddr structure containing unicast address to set
916 *
917 * Assumptions:
918 * The address pointed to by addr->sa_data is a valid unicast
919 * address and is presented in canonical (LSB) format.
920 */
921 static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
922 {
923 struct s_smc *smc = netdev_priv(dev);
924 struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
925 skfddi_priv *bp = &smc->os;
926 unsigned long Flags;
927
928
929 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
930 spin_lock_irqsave(&bp->DriverLock, Flags);
931 ResetAdapter(smc);
932 spin_unlock_irqrestore(&bp->DriverLock, Flags);
933
934 return 0; /* always return zero */
935 } // skfp_ctl_set_mac_address
936
937
938 /*
939 * ==============
940 * = skfp_ioctl =
941 * ==============
942 *
943 * Overview:
944 *
945 * Perform IOCTL call functions here. Some are privileged operations and the
946 * effective uid is checked in those cases.
947 *
948 * Returns:
949 * status value
950 * 0 - success
951 * other - failure
952 *
953 * Arguments:
954 * dev - pointer to device information
955 * rq - pointer to ioctl request structure
956 * cmd - ?
957 *
958 */
959
960
961 static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
962 {
963 struct s_smc *smc = netdev_priv(dev);
964 skfddi_priv *lp = &smc->os;
965 struct s_skfp_ioctl ioc;
966 int status = 0;
967
968 if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
969 return -EFAULT;
970
971 switch (ioc.cmd) {
972 case SKFP_GET_STATS: /* Get the driver statistics */
973 ioc.len = sizeof(lp->MacStat);
974 status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
975 ? -EFAULT : 0;
976 break;
977 case SKFP_CLR_STATS: /* Zero out the driver statistics */
978 if (!capable(CAP_NET_ADMIN)) {
979 status = -EPERM;
980 } else {
981 memset(&lp->MacStat, 0, sizeof(lp->MacStat));
982 }
983 break;
984 default:
985 printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd);
986 status = -EOPNOTSUPP;
987
988 } // switch
989
990 return status;
991 } // skfp_ioctl
992
993
994 /*
995 * =====================
996 * = skfp_send_pkt =
997 * =====================
998 *
999 * Overview:
1000 * Queues a packet for transmission and try to transmit it.
1001 *
1002 * Returns:
1003 * Condition code
1004 *
1005 * Arguments:
1006 * skb - pointer to sk_buff to queue for transmission
1007 * dev - pointer to device information
1008 *
1009 * Functional Description:
1010 * Here we assume that an incoming skb transmit request
1011 * is contained in a single physically contiguous buffer
1012 * in which the virtual address of the start of packet
1013 * (skb->data) can be converted to a physical address
1014 * by using pci_map_single().
1015 *
1016 * We have an internal queue for packets we can not send
1017 * immediately. Packets in this queue can be given to the
1018 * adapter if transmit buffers are freed.
1019 *
1020 * We can't free the skb until after it's been DMA'd
1021 * out by the adapter, so we'll keep it in the driver and
1022 * return it in mac_drv_tx_complete.
1023 *
1024 * Return Codes:
1025 * 0 - driver has queued and/or sent packet
1026 * 1 - caller should requeue the sk_buff for later transmission
1027 *
1028 * Assumptions:
1029 * The entire packet is stored in one physically
1030 * contiguous buffer which is not cached and whose
1031 * 32-bit physical address can be determined.
1032 *
1033 * It's vital that this routine is NOT reentered for the
1034 * same board and that the OS is not in another section of
1035 * code (eg. skfp_interrupt) for the same board on a
1036 * different thread.
1037 *
1038 * Side Effects:
1039 * None
1040 */
1041 static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
1042 struct net_device *dev)
1043 {
1044 struct s_smc *smc = netdev_priv(dev);
1045 skfddi_priv *bp = &smc->os;
1046
1047 pr_debug("skfp_send_pkt\n");
1048
1049 /*
1050 * Verify that incoming transmit request is OK
1051 *
1052 * Note: The packet size check is consistent with other
1053 * Linux device drivers, although the correct packet
1054 * size should be verified before calling the
1055 * transmit routine.
1056 */
1057
1058 if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
1059 bp->MacStat.gen.tx_errors++; /* bump error counter */
1060 // dequeue packets from xmt queue and send them
1061 netif_start_queue(dev);
1062 dev_kfree_skb(skb);
1063 return NETDEV_TX_OK; /* return "success" */
1064 }
1065 if (bp->QueueSkb == 0) { // return with tbusy set: queue full
1066
1067 netif_stop_queue(dev);
1068 return NETDEV_TX_BUSY;
1069 }
1070 bp->QueueSkb--;
1071 skb_queue_tail(&bp->SendSkbQueue, skb);
1072 send_queued_packets(netdev_priv(dev));
1073 if (bp->QueueSkb == 0) {
1074 netif_stop_queue(dev);
1075 }
1076 return NETDEV_TX_OK;
1077
1078 } // skfp_send_pkt
1079
1080
1081 /*
1082 * =======================
1083 * = send_queued_packets =
1084 * =======================
1085 *
1086 * Overview:
1087 * Send packets from the driver queue as long as there are some and
1088 * transmit resources are available.
1089 *
1090 * Returns:
1091 * None
1092 *
1093 * Arguments:
1094 * smc - pointer to smc (adapter) structure
1095 *
1096 * Functional Description:
1097 * Take a packet from queue if there is any. If not, then we are done.
1098 * Check if there are resources to send the packet. If not, requeue it
1099 * and exit.
1100 * Set packet descriptor flags and give packet to adapter.
1101 * Check if any send resources can be freed (we do not use the
1102 * transmit complete interrupt).
1103 */
1104 static void send_queued_packets(struct s_smc *smc)
1105 {
1106 skfddi_priv *bp = &smc->os;
1107 struct sk_buff *skb;
1108 unsigned char fc;
1109 int queue;
1110 struct s_smt_fp_txd *txd; // Current TxD.
1111 dma_addr_t dma_address;
1112 unsigned long Flags;
1113
1114 int frame_status; // HWM tx frame status.
1115
1116 pr_debug("send queued packets\n");
1117 for (;;) {
1118 // send first buffer from queue
1119 skb = skb_dequeue(&bp->SendSkbQueue);
1120
1121 if (!skb) {
1122 pr_debug("queue empty\n");
1123 return;
1124 } // queue empty !
1125
1126 spin_lock_irqsave(&bp->DriverLock, Flags);
1127 fc = skb->data[0];
1128 queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
1129 #ifdef ESS
1130 // Check if the frame may/must be sent as a synchronous frame.
1131
1132 if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1133 // It's an LLC frame.
1134 if (!smc->ess.sync_bw_available)
1135 fc &= ~FC_SYNC_BIT; // No bandwidth available.
1136
1137 else { // Bandwidth is available.
1138
1139 if (smc->mib.fddiESSSynchTxMode) {
1140 // Send as sync. frame.
1141 fc |= FC_SYNC_BIT;
1142 }
1143 }
1144 }
1145 #endif // ESS
1146 frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
1147
1148 if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
1149 // Unable to send the frame.
1150
1151 if ((frame_status & RING_DOWN) != 0) {
1152 // Ring is down.
1153 pr_debug("Tx attempt while ring down.\n");
1154 } else if ((frame_status & OUT_OF_TXD) != 0) {
1155 pr_debug("%s: out of TXDs.\n", bp->dev->name);
1156 } else {
1157 pr_debug("%s: out of transmit resources",
1158 bp->dev->name);
1159 }
1160
1161 // Note: We will retry the operation as soon as
1162 // transmit resources become available.
1163 skb_queue_head(&bp->SendSkbQueue, skb);
1164 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1165 return; // Packet has been queued.
1166
1167 } // if (unable to send frame)
1168
1169 bp->QueueSkb++; // one packet less in local queue
1170
1171 // source address in packet ?
1172 CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
1173
1174 txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
1175
1176 dma_address = pci_map_single(&bp->pdev, skb->data,
1177 skb->len, PCI_DMA_TODEVICE);
1178 if (frame_status & LAN_TX) {
1179 txd->txd_os.skb = skb; // save skb
1180 txd->txd_os.dma_addr = dma_address; // save dma mapping
1181 }
1182 hwm_tx_frag(smc, skb->data, dma_address, skb->len,
1183 frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
1184
1185 if (!(frame_status & LAN_TX)) { // local only frame
1186 pci_unmap_single(&bp->pdev, dma_address,
1187 skb->len, PCI_DMA_TODEVICE);
1188 dev_kfree_skb_irq(skb);
1189 }
1190 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1191 } // for
1192
1193 return; // never reached
1194
1195 } // send_queued_packets
1196
1197
1198 /************************
1199 *
1200 * CheckSourceAddress
1201 *
1202 * Verify if the source address is set. Insert it if necessary.
1203 *
1204 ************************/
1205 static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1206 {
1207 unsigned char SRBit;
1208
1209 if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0) // source routing bit
1210
1211 return;
1212 if ((unsigned short) frame[1 + 10] != 0)
1213 return;
1214 SRBit = frame[1 + 6] & 0x01;
1215 memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
1216 frame[8] |= SRBit;
1217 } // CheckSourceAddress
1218
1219
1220 /************************
1221 *
1222 * ResetAdapter
1223 *
1224 * Reset the adapter and bring it back to operational mode.
1225 * Args
1226 * smc - A pointer to the SMT context struct.
1227 * Out
1228 * Nothing.
1229 *
1230 ************************/
1231 static void ResetAdapter(struct s_smc *smc)
1232 {
1233
1234 pr_debug("[fddi: ResetAdapter]\n");
1235
1236 // Stop the adapter.
1237
1238 card_stop(smc); // Stop all activity.
1239
1240 // Clear the transmit and receive descriptor queues.
1241 mac_drv_clear_tx_queue(smc);
1242 mac_drv_clear_rx_queue(smc);
1243
1244 // Restart the adapter.
1245
1246 smt_reset_defaults(smc, 1); // Initialize the SMT module.
1247
1248 init_smt(smc, (smc->os.dev)->dev_addr); // Initialize the hardware.
1249
1250 smt_online(smc, 1); // Insert into the ring again.
1251 STI_FBI();
1252
1253 // Restore original receive mode (multicasts, promiscuous, etc.).
1254 skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
1255 } // ResetAdapter
1256
1257
1258 //--------------- functions called by hardware module ----------------
1259
1260 /************************
1261 *
1262 * llc_restart_tx
1263 *
1264 * The hardware driver calls this routine when the transmit complete
1265 * interrupt bits (end of frame) for the synchronous or asynchronous
1266 * queue is set.
1267 *
1268 * NOTE The hardware driver calls this function also if no packets are queued.
1269 * The routine must be able to handle this case.
1270 * Args
1271 * smc - A pointer to the SMT context struct.
1272 * Out
1273 * Nothing.
1274 *
1275 ************************/
1276 void llc_restart_tx(struct s_smc *smc)
1277 {
1278 skfddi_priv *bp = &smc->os;
1279
1280 pr_debug("[llc_restart_tx]\n");
1281
1282 // Try to send queued packets
1283 spin_unlock(&bp->DriverLock);
1284 send_queued_packets(smc);
1285 spin_lock(&bp->DriverLock);
1286 netif_start_queue(bp->dev);// system may send again if it was blocked
1287
1288 } // llc_restart_tx
1289
1290
1291 /************************
1292 *
1293 * mac_drv_get_space
1294 *
1295 * The hardware module calls this function to allocate the memory
1296 * for the SMT MBufs if the define MB_OUTSIDE_SMC is specified.
1297 * Args
1298 * smc - A pointer to the SMT context struct.
1299 *
1300 * size - Size of memory in bytes to allocate.
1301 * Out
1302 * != 0 A pointer to the virtual address of the allocated memory.
1303 * == 0 Allocation error.
1304 *
1305 ************************/
1306 void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1307 {
1308 void *virt;
1309
1310 pr_debug("mac_drv_get_space (%d bytes), ", size);
1311 virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
1312
1313 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1314 printk("Unexpected SMT memory size requested: %d\n", size);
1315 return NULL;
1316 }
1317 smc->os.SharedMemHeap += size; // Move heap pointer.
1318
1319 pr_debug("mac_drv_get_space end\n");
1320 pr_debug("virt addr: %lx\n", (ulong) virt);
1321 pr_debug("bus addr: %lx\n", (ulong)
1322 (smc->os.SharedMemDMA +
1323 ((char *) virt - (char *)smc->os.SharedMemAddr)));
1324 return virt;
1325 } // mac_drv_get_space
1326
1327
1328 /************************
1329 *
1330 * mac_drv_get_desc_mem
1331 *
1332 * This function is called by the hardware dependent module.
1333 * It allocates the memory for the RxD and TxD descriptors.
1334 *
1335 * This memory must be non-cached, non-movable and non-swappable.
1336 * This memory should start at a physical page boundary.
1337 * Args
1338 * smc - A pointer to the SMT context struct.
1339 *
1340 * size - Size of memory in bytes to allocate.
1341 * Out
1342 * != 0 A pointer to the virtual address of the allocated memory.
1343 * == 0 Allocation error.
1344 *
1345 ************************/
1346 void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1347 {
1348
1349 char *virt;
1350
1351 pr_debug("mac_drv_get_desc_mem\n");
1352
1353 // Descriptor memory must be aligned on 16-byte boundary.
1354
1355 virt = mac_drv_get_space(smc, size);
1356
1357 size = (u_int) (16 - (((unsigned long) virt) & 15UL));
1358 size = size % 16;
1359
1360 pr_debug("Allocate %u bytes alignment gap ", size);
1361 pr_debug("for descriptor memory.\n");
1362
1363 if (!mac_drv_get_space(smc, size)) {
1364 printk("fddi: Unable to align descriptor memory.\n");
1365 return NULL;
1366 }
1367 return virt + size;
1368 } // mac_drv_get_desc_mem
1369
1370
1371 /************************
1372 *
1373 * mac_drv_virt2phys
1374 *
1375 * Get the physical address of a given virtual address.
1376 * Args
1377 * smc - A pointer to the SMT context struct.
1378 *
1379 * virt - A (virtual) pointer into our 'shared' memory area.
1380 * Out
1381 * Physical address of the given virtual address.
1382 *
1383 ************************/
1384 unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1385 {
1386 return smc->os.SharedMemDMA +
1387 ((char *) virt - (char *)smc->os.SharedMemAddr);
1388 } // mac_drv_virt2phys
1389
1390
1391 /************************
1392 *
1393 * dma_master
1394 *
1395 * The HWM calls this function, when the driver leads through a DMA
1396 * transfer. If the OS-specific module must prepare the system hardware
1397 * for the DMA transfer, it should do it in this function.
1398 *
1399 * The hardware module calls this dma_master if it wants to send an SMT
1400 * frame. This means that the virt address passed in here is part of
1401 * the 'shared' memory area.
1402 * Args
1403 * smc - A pointer to the SMT context struct.
1404 *
1405 * virt - The virtual address of the data.
1406 *
1407 * len - The length in bytes of the data.
1408 *
1409 * flag - Indicates the transmit direction and the buffer type:
1410 * DMA_RD (0x01) system RAM ==> adapter buffer memory
1411 * DMA_WR (0x02) adapter buffer memory ==> system RAM
1412 * SMT_BUF (0x80) SMT buffer
1413 *
1414 * >> NOTE: SMT_BUF and DMA_RD are always set for PCI. <<
1415 * Out
1416 * Returns the pyhsical address for the DMA transfer.
1417 *
1418 ************************/
1419 u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1420 {
1421 return smc->os.SharedMemDMA +
1422 ((char *) virt - (char *)smc->os.SharedMemAddr);
1423 } // dma_master
1424
1425
1426 /************************
1427 *
1428 * dma_complete
1429 *
1430 * The hardware module calls this routine when it has completed a DMA
1431 * transfer. If the operating system dependent module has set up the DMA
1432 * channel via dma_master() (e.g. Windows NT or AIX) it should clean up
1433 * the DMA channel.
1434 * Args
1435 * smc - A pointer to the SMT context struct.
1436 *
1437 * descr - A pointer to a TxD or RxD, respectively.
1438 *
1439 * flag - Indicates the DMA transfer direction / SMT buffer:
1440 * DMA_RD (0x01) system RAM ==> adapter buffer memory
1441 * DMA_WR (0x02) adapter buffer memory ==> system RAM
1442 * SMT_BUF (0x80) SMT buffer (managed by HWM)
1443 * Out
1444 * Nothing.
1445 *
1446 ************************/
1447 void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
1448 {
1449 /* For TX buffers, there are two cases. If it is an SMT transmit
1450 * buffer, there is nothing to do since we use consistent memory
1451 * for the 'shared' memory area. The other case is for normal
1452 * transmit packets given to us by the networking stack, and in
1453 * that case we cleanup the PCI DMA mapping in mac_drv_tx_complete
1454 * below.
1455 *
1456 * For RX buffers, we have to unmap dynamic PCI DMA mappings here
1457 * because the hardware module is about to potentially look at
1458 * the contents of the buffer. If we did not call the PCI DMA
1459 * unmap first, the hardware module could read inconsistent data.
1460 */
1461 if (flag & DMA_WR) {
1462 skfddi_priv *bp = &smc->os;
1463 volatile struct s_smt_fp_rxd *r = &descr->r;
1464
1465 /* If SKB is NULL, we used the local buffer. */
1466 if (r->rxd_os.skb && r->rxd_os.dma_addr) {
1467 int MaxFrameSize = bp->MaxFrameSize;
1468
1469 pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
1470 MaxFrameSize, PCI_DMA_FROMDEVICE);
1471 r->rxd_os.dma_addr = 0;
1472 }
1473 }
1474 } // dma_complete
1475
1476
1477 /************************
1478 *
1479 * mac_drv_tx_complete
1480 *
1481 * Transmit of a packet is complete. Release the tx staging buffer.
1482 *
1483 * Args
1484 * smc - A pointer to the SMT context struct.
1485 *
1486 * txd - A pointer to the last TxD which is used by the frame.
1487 * Out
1488 * Returns nothing.
1489 *
1490 ************************/
1491 void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1492 {
1493 struct sk_buff *skb;
1494
1495 pr_debug("entering mac_drv_tx_complete\n");
1496 // Check if this TxD points to a skb
1497
1498 if (!(skb = txd->txd_os.skb)) {
1499 pr_debug("TXD with no skb assigned.\n");
1500 return;
1501 }
1502 txd->txd_os.skb = NULL;
1503
1504 // release the DMA mapping
1505 pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
1506 skb->len, PCI_DMA_TODEVICE);
1507 txd->txd_os.dma_addr = 0;
1508
1509 smc->os.MacStat.gen.tx_packets++; // Count transmitted packets.
1510 smc->os.MacStat.gen.tx_bytes+=skb->len; // Count bytes
1511
1512 // free the skb
1513 dev_kfree_skb_irq(skb);
1514
1515 pr_debug("leaving mac_drv_tx_complete\n");
1516 } // mac_drv_tx_complete
1517
1518
1519 /************************
1520 *
1521 * dump packets to logfile
1522 *
1523 ************************/
1524 #ifdef DUMPPACKETS
1525 void dump_data(unsigned char *Data, int length)
1526 {
1527 int i, j;
1528 unsigned char s[255], sh[10];
1529 if (length > 64) {
1530 length = 64;
1531 }
1532 printk(KERN_INFO "---Packet start---\n");
1533 for (i = 0, j = 0; i < length / 8; i++, j += 8)
1534 printk(KERN_INFO "%02x %02x %02x %02x %02x %02x %02x %02x\n",
1535 Data[j + 0], Data[j + 1], Data[j + 2], Data[j + 3],
1536 Data[j + 4], Data[j + 5], Data[j + 6], Data[j + 7]);
1537 strcpy(s, "");
1538 for (i = 0; i < length % 8; i++) {
1539 sprintf(sh, "%02x ", Data[j + i]);
1540 strcat(s, sh);
1541 }
1542 printk(KERN_INFO "%s\n", s);
1543 printk(KERN_INFO "------------------\n");
1544 } // dump_data
1545 #else
1546 #define dump_data(data,len)
1547 #endif // DUMPPACKETS
1548
1549 /************************
1550 *
1551 * mac_drv_rx_complete
1552 *
1553 * The hardware module calls this function if an LLC frame is received
1554 * in a receive buffer. Also the SMT, NSA, and directed beacon frames
1555 * from the network will be passed to the LLC layer by this function
1556 * if passing is enabled.
1557 *
1558 * mac_drv_rx_complete forwards the frame to the LLC layer if it should
1559 * be received. It also fills the RxD ring with new receive buffers if
1560 * some can be queued.
1561 * Args
1562 * smc - A pointer to the SMT context struct.
1563 *
1564 * rxd - A pointer to the first RxD which is used by the receive frame.
1565 *
1566 * frag_count - Count of RxDs used by the received frame.
1567 *
1568 * len - Frame length.
1569 * Out
1570 * Nothing.
1571 *
1572 ************************/
1573 void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1574 int frag_count, int len)
1575 {
1576 skfddi_priv *bp = &smc->os;
1577 struct sk_buff *skb;
1578 unsigned char *virt, *cp;
1579 unsigned short ri;
1580 u_int RifLength;
1581
1582 pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
1583 if (frag_count != 1) { // This is not allowed to happen.
1584
1585 printk("fddi: Multi-fragment receive!\n");
1586 goto RequeueRxd; // Re-use the given RXD(s).
1587
1588 }
1589 skb = rxd->rxd_os.skb;
1590 if (!skb) {
1591 pr_debug("No skb in rxd\n");
1592 smc->os.MacStat.gen.rx_errors++;
1593 goto RequeueRxd;
1594 }
1595 virt = skb->data;
1596
1597 // The DMA mapping was released in dma_complete above.
1598
1599 dump_data(skb->data, len);
1600
1601 /*
1602 * FDDI Frame format:
1603 * +-------+-------+-------+------------+--------+------------+
1604 * | FC[1] | DA[6] | SA[6] | RIF[0..18] | LLC[3] | Data[0..n] |
1605 * +-------+-------+-------+------------+--------+------------+
1606 *
1607 * FC = Frame Control
1608 * DA = Destination Address
1609 * SA = Source Address
1610 * RIF = Routing Information Field
1611 * LLC = Logical Link Control
1612 */
1613
1614 // Remove Routing Information Field (RIF), if present.
1615
1616 if ((virt[1 + 6] & FDDI_RII) == 0)
1617 RifLength = 0;
1618 else {
1619 int n;
1620 // goos: RIF removal has still to be tested
1621 pr_debug("RIF found\n");
1622 // Get RIF length from Routing Control (RC) field.
1623 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
1624
1625 ri = ntohs(*((__be16 *) cp));
1626 RifLength = ri & FDDI_RCF_LEN_MASK;
1627 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1628 printk("fddi: Invalid RIF.\n");
1629 goto RequeueRxd; // Discard the frame.
1630
1631 }
1632 virt[1 + 6] &= ~FDDI_RII; // Clear RII bit.
1633 // regions overlap
1634
1635 virt = cp + RifLength;
1636 for (n = FDDI_MAC_HDR_LEN; n; n--)
1637 *--virt = *--cp;
1638 // adjust sbd->data pointer
1639 skb_pull(skb, RifLength);
1640 len -= RifLength;
1641 RifLength = 0;
1642 }
1643
1644 // Count statistics.
1645 smc->os.MacStat.gen.rx_packets++; // Count indicated receive
1646 // packets.
1647 smc->os.MacStat.gen.rx_bytes+=len; // Count bytes.
1648
1649 // virt points to header again
1650 if (virt[1] & 0x01) { // Check group (multicast) bit.
1651
1652 smc->os.MacStat.gen.multicast++;
1653 }
1654
1655 // deliver frame to system
1656 rxd->rxd_os.skb = NULL;
1657 skb_trim(skb, len);
1658 skb->protocol = fddi_type_trans(skb, bp->dev);
1659
1660 netif_rx(skb);
1661
1662 HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
1663 return;
1664
1665 RequeueRxd:
1666 pr_debug("Rx: re-queue RXD.\n");
1667 mac_drv_requeue_rxd(smc, rxd, frag_count);
1668 smc->os.MacStat.gen.rx_errors++; // Count receive packets
1669 // not indicated.
1670
1671 } // mac_drv_rx_complete
1672
1673
1674 /************************
1675 *
1676 * mac_drv_requeue_rxd
1677 *
1678 * The hardware module calls this function to request the OS-specific
1679 * module to queue the receive buffer(s) represented by the pointer
1680 * to the RxD and the frag_count into the receive queue again. This
1681 * buffer was filled with an invalid frame or an SMT frame.
1682 * Args
1683 * smc - A pointer to the SMT context struct.
1684 *
1685 * rxd - A pointer to the first RxD which is used by the receive frame.
1686 *
1687 * frag_count - Count of RxDs used by the received frame.
1688 * Out
1689 * Nothing.
1690 *
1691 ************************/
1692 void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1693 int frag_count)
1694 {
1695 volatile struct s_smt_fp_rxd *next_rxd;
1696 volatile struct s_smt_fp_rxd *src_rxd;
1697 struct sk_buff *skb;
1698 int MaxFrameSize;
1699 unsigned char *v_addr;
1700 dma_addr_t b_addr;
1701
1702 if (frag_count != 1) // This is not allowed to happen.
1703
1704 printk("fddi: Multi-fragment requeue!\n");
1705
1706 MaxFrameSize = smc->os.MaxFrameSize;
1707 src_rxd = rxd;
1708 for (; frag_count > 0; frag_count--) {
1709 next_rxd = src_rxd->rxd_next;
1710 rxd = HWM_GET_CURR_RXD(smc);
1711
1712 skb = src_rxd->rxd_os.skb;
1713 if (skb == NULL) { // this should not happen
1714
1715 pr_debug("Requeue with no skb in rxd!\n");
1716 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1717 if (skb) {
1718 // we got a skb
1719 rxd->rxd_os.skb = skb;
1720 skb_reserve(skb, 3);
1721 skb_put(skb, MaxFrameSize);
1722 v_addr = skb->data;
1723 b_addr = pci_map_single(&smc->os.pdev,
1724 v_addr,
1725 MaxFrameSize,
1726 PCI_DMA_FROMDEVICE);
1727 rxd->rxd_os.dma_addr = b_addr;
1728 } else {
1729 // no skb available, use local buffer
1730 pr_debug("Queueing invalid buffer!\n");
1731 rxd->rxd_os.skb = NULL;
1732 v_addr = smc->os.LocalRxBuffer;
1733 b_addr = smc->os.LocalRxBufferDMA;
1734 }
1735 } else {
1736 // we use skb from old rxd
1737 rxd->rxd_os.skb = skb;
1738 v_addr = skb->data;
1739 b_addr = pci_map_single(&smc->os.pdev,
1740 v_addr,
1741 MaxFrameSize,
1742 PCI_DMA_FROMDEVICE);
1743 rxd->rxd_os.dma_addr = b_addr;
1744 }
1745 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1746 FIRST_FRAG | LAST_FRAG);
1747
1748 src_rxd = next_rxd;
1749 }
1750 } // mac_drv_requeue_rxd
1751
1752
1753 /************************
1754 *
1755 * mac_drv_fill_rxd
1756 *
1757 * The hardware module calls this function at initialization time
1758 * to fill the RxD ring with receive buffers. It is also called by
1759 * mac_drv_rx_complete if rx_free is large enough to queue some new
1760 * receive buffers into the RxD ring. mac_drv_fill_rxd queues new
1761 * receive buffers as long as enough RxDs and receive buffers are
1762 * available.
1763 * Args
1764 * smc - A pointer to the SMT context struct.
1765 * Out
1766 * Nothing.
1767 *
1768 ************************/
1769 void mac_drv_fill_rxd(struct s_smc *smc)
1770 {
1771 int MaxFrameSize;
1772 unsigned char *v_addr;
1773 unsigned long b_addr;
1774 struct sk_buff *skb;
1775 volatile struct s_smt_fp_rxd *rxd;
1776
1777 pr_debug("entering mac_drv_fill_rxd\n");
1778
1779 // Walk through the list of free receive buffers, passing receive
1780 // buffers to the HWM as long as RXDs are available.
1781
1782 MaxFrameSize = smc->os.MaxFrameSize;
1783 // Check if there is any RXD left.
1784 while (HWM_GET_RX_FREE(smc) > 0) {
1785 pr_debug(".\n");
1786
1787 rxd = HWM_GET_CURR_RXD(smc);
1788 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1789 if (skb) {
1790 // we got a skb
1791 skb_reserve(skb, 3);
1792 skb_put(skb, MaxFrameSize);
1793 v_addr = skb->data;
1794 b_addr = pci_map_single(&smc->os.pdev,
1795 v_addr,
1796 MaxFrameSize,
1797 PCI_DMA_FROMDEVICE);
1798 rxd->rxd_os.dma_addr = b_addr;
1799 } else {
1800 // no skb available, use local buffer
1801 // System has run out of buffer memory, but we want to
1802 // keep the receiver running in hope of better times.
1803 // Multiple descriptors may point to this local buffer,
1804 // so data in it must be considered invalid.
1805 pr_debug("Queueing invalid buffer!\n");
1806 v_addr = smc->os.LocalRxBuffer;
1807 b_addr = smc->os.LocalRxBufferDMA;
1808 }
1809
1810 rxd->rxd_os.skb = skb;
1811
1812 // Pass receive buffer to HWM.
1813 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1814 FIRST_FRAG | LAST_FRAG);
1815 }
1816 pr_debug("leaving mac_drv_fill_rxd\n");
1817 } // mac_drv_fill_rxd
1818
1819
1820 /************************
1821 *
1822 * mac_drv_clear_rxd
1823 *
1824 * The hardware module calls this function to release unused
1825 * receive buffers.
1826 * Args
1827 * smc - A pointer to the SMT context struct.
1828 *
1829 * rxd - A pointer to the first RxD which is used by the receive buffer.
1830 *
1831 * frag_count - Count of RxDs used by the receive buffer.
1832 * Out
1833 * Nothing.
1834 *
1835 ************************/
1836 void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1837 int frag_count)
1838 {
1839
1840 struct sk_buff *skb;
1841
1842 pr_debug("entering mac_drv_clear_rxd\n");
1843
1844 if (frag_count != 1) // This is not allowed to happen.
1845
1846 printk("fddi: Multi-fragment clear!\n");
1847
1848 for (; frag_count > 0; frag_count--) {
1849 skb = rxd->rxd_os.skb;
1850 if (skb != NULL) {
1851 skfddi_priv *bp = &smc->os;
1852 int MaxFrameSize = bp->MaxFrameSize;
1853
1854 pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
1855 MaxFrameSize, PCI_DMA_FROMDEVICE);
1856
1857 dev_kfree_skb(skb);
1858 rxd->rxd_os.skb = NULL;
1859 }
1860 rxd = rxd->rxd_next; // Next RXD.
1861
1862 }
1863 } // mac_drv_clear_rxd
1864
1865
1866 /************************
1867 *
1868 * mac_drv_rx_init
1869 *
1870 * The hardware module calls this routine when an SMT or NSA frame of the
1871 * local SMT should be delivered to the LLC layer.
1872 *
1873 * It is necessary to have this function, because there is no other way to
1874 * copy the contents of SMT MBufs into receive buffers.
1875 *
1876 * mac_drv_rx_init allocates the required target memory for this frame,
1877 * and receives the frame fragment by fragment by calling mac_drv_rx_frag.
1878 * Args
1879 * smc - A pointer to the SMT context struct.
1880 *
1881 * len - The length (in bytes) of the received frame (FC, DA, SA, Data).
1882 *
1883 * fc - The Frame Control field of the received frame.
1884 *
1885 * look_ahead - A pointer to the lookahead data buffer (may be NULL).
1886 *
1887 * la_len - The length of the lookahead data stored in the lookahead
1888 * buffer (may be zero).
1889 * Out
1890 * Always returns zero (0).
1891 *
1892 ************************/
1893 int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1894 char *look_ahead, int la_len)
1895 {
1896 struct sk_buff *skb;
1897
1898 pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
1899
1900 // "Received" a SMT or NSA frame of the local SMT.
1901
1902 if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
1903 pr_debug("fddi: Discard invalid local SMT frame\n");
1904 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1905 len, la_len, (unsigned long) look_ahead);
1906 return 0;
1907 }
1908 skb = alloc_skb(len + 3, GFP_ATOMIC);
1909 if (!skb) {
1910 pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1911 return 0;
1912 }
1913 skb_reserve(skb, 3);
1914 skb_put(skb, len);
1915 skb_copy_to_linear_data(skb, look_ahead, len);
1916
1917 // deliver frame to system
1918 skb->protocol = fddi_type_trans(skb, smc->os.dev);
1919 netif_rx(skb);
1920
1921 return 0;
1922 } // mac_drv_rx_init
1923
1924
1925 /************************
1926 *
1927 * smt_timer_poll
1928 *
1929 * This routine is called periodically by the SMT module to clean up the
1930 * driver.
1931 *
1932 * Return any queued frames back to the upper protocol layers if the ring
1933 * is down.
1934 * Args
1935 * smc - A pointer to the SMT context struct.
1936 * Out
1937 * Nothing.
1938 *
1939 ************************/
1940 void smt_timer_poll(struct s_smc *smc)
1941 {
1942 } // smt_timer_poll
1943
1944
1945 /************************
1946 *
1947 * ring_status_indication
1948 *
1949 * This function indicates a change of the ring state.
1950 * Args
1951 * smc - A pointer to the SMT context struct.
1952 *
1953 * status - The current ring status.
1954 * Out
1955 * Nothing.
1956 *
1957 ************************/
1958 void ring_status_indication(struct s_smc *smc, u_long status)
1959 {
1960 pr_debug("ring_status_indication( ");
1961 if (status & RS_RES15)
1962 pr_debug("RS_RES15 ");
1963 if (status & RS_HARDERROR)
1964 pr_debug("RS_HARDERROR ");
1965 if (status & RS_SOFTERROR)
1966 pr_debug("RS_SOFTERROR ");
1967 if (status & RS_BEACON)
1968 pr_debug("RS_BEACON ");
1969 if (status & RS_PATHTEST)
1970 pr_debug("RS_PATHTEST ");
1971 if (status & RS_SELFTEST)
1972 pr_debug("RS_SELFTEST ");
1973 if (status & RS_RES9)
1974 pr_debug("RS_RES9 ");
1975 if (status & RS_DISCONNECT)
1976 pr_debug("RS_DISCONNECT ");
1977 if (status & RS_RES7)
1978 pr_debug("RS_RES7 ");
1979 if (status & RS_DUPADDR)
1980 pr_debug("RS_DUPADDR ");
1981 if (status & RS_NORINGOP)
1982 pr_debug("RS_NORINGOP ");
1983 if (status & RS_VERSION)
1984 pr_debug("RS_VERSION ");
1985 if (status & RS_STUCKBYPASSS)
1986 pr_debug("RS_STUCKBYPASSS ");
1987 if (status & RS_EVENT)
1988 pr_debug("RS_EVENT ");
1989 if (status & RS_RINGOPCHANGE)
1990 pr_debug("RS_RINGOPCHANGE ");
1991 if (status & RS_RES0)
1992 pr_debug("RS_RES0 ");
1993 pr_debug("]\n");
1994 } // ring_status_indication
1995
1996
1997 /************************
1998 *
1999 * smt_get_time
2000 *
2001 * Gets the current time from the system.
2002 * Args
2003 * None.
2004 * Out
2005 * The current time in TICKS_PER_SECOND.
2006 *
2007 * TICKS_PER_SECOND has the unit 'count of timer ticks per second'. It is
2008 * defined in "targetos.h". The definition of TICKS_PER_SECOND must comply
2009 * to the time returned by smt_get_time().
2010 *
2011 ************************/
2012 unsigned long smt_get_time(void)
2013 {
2014 return jiffies;
2015 } // smt_get_time
2016
2017
2018 /************************
2019 *
2020 * smt_stat_counter
2021 *
2022 * Status counter update (ring_op, fifo full).
2023 * Args
2024 * smc - A pointer to the SMT context struct.
2025 *
2026 * stat - = 0: A ring operational change occurred.
2027 * = 1: The FORMAC FIFO buffer is full / FIFO overflow.
2028 * Out
2029 * Nothing.
2030 *
2031 ************************/
2032 void smt_stat_counter(struct s_smc *smc, int stat)
2033 {
2034 // BOOLEAN RingIsUp ;
2035
2036 pr_debug("smt_stat_counter\n");
2037 switch (stat) {
2038 case 0:
2039 pr_debug("Ring operational change.\n");
2040 break;
2041 case 1:
2042 pr_debug("Receive fifo overflow.\n");
2043 smc->os.MacStat.gen.rx_errors++;
2044 break;
2045 default:
2046 pr_debug("Unknown status (%d).\n", stat);
2047 break;
2048 }
2049 } // smt_stat_counter
2050
2051
2052 /************************
2053 *
2054 * cfm_state_change
2055 *
2056 * Sets CFM state in custom statistics.
2057 * Args
2058 * smc - A pointer to the SMT context struct.
2059 *
2060 * c_state - Possible values are:
2061 *
2062 * EC0_OUT, EC1_IN, EC2_TRACE, EC3_LEAVE, EC4_PATH_TEST,
2063 * EC5_INSERT, EC6_CHECK, EC7_DEINSERT
2064 * Out
2065 * Nothing.
2066 *
2067 ************************/
2068 void cfm_state_change(struct s_smc *smc, int c_state)
2069 {
2070 #ifdef DRIVERDEBUG
2071 char *s;
2072
2073 switch (c_state) {
2074 case SC0_ISOLATED:
2075 s = "SC0_ISOLATED";
2076 break;
2077 case SC1_WRAP_A:
2078 s = "SC1_WRAP_A";
2079 break;
2080 case SC2_WRAP_B:
2081 s = "SC2_WRAP_B";
2082 break;
2083 case SC4_THRU_A:
2084 s = "SC4_THRU_A";
2085 break;
2086 case SC5_THRU_B:
2087 s = "SC5_THRU_B";
2088 break;
2089 case SC7_WRAP_S:
2090 s = "SC7_WRAP_S";
2091 break;
2092 case SC9_C_WRAP_A:
2093 s = "SC9_C_WRAP_A";
2094 break;
2095 case SC10_C_WRAP_B:
2096 s = "SC10_C_WRAP_B";
2097 break;
2098 case SC11_C_WRAP_S:
2099 s = "SC11_C_WRAP_S";
2100 break;
2101 default:
2102 pr_debug("cfm_state_change: unknown %d\n", c_state);
2103 return;
2104 }
2105 pr_debug("cfm_state_change: %s\n", s);
2106 #endif // DRIVERDEBUG
2107 } // cfm_state_change
2108
2109
2110 /************************
2111 *
2112 * ecm_state_change
2113 *
2114 * Sets ECM state in custom statistics.
2115 * Args
2116 * smc - A pointer to the SMT context struct.
2117 *
2118 * e_state - Possible values are:
2119 *
2120 * SC0_ISOLATED, SC1_WRAP_A (5), SC2_WRAP_B (6), SC4_THRU_A (12),
2121 * SC5_THRU_B (7), SC7_WRAP_S (8)
2122 * Out
2123 * Nothing.
2124 *
2125 ************************/
2126 void ecm_state_change(struct s_smc *smc, int e_state)
2127 {
2128 #ifdef DRIVERDEBUG
2129 char *s;
2130
2131 switch (e_state) {
2132 case EC0_OUT:
2133 s = "EC0_OUT";
2134 break;
2135 case EC1_IN:
2136 s = "EC1_IN";
2137 break;
2138 case EC2_TRACE:
2139 s = "EC2_TRACE";
2140 break;
2141 case EC3_LEAVE:
2142 s = "EC3_LEAVE";
2143 break;
2144 case EC4_PATH_TEST:
2145 s = "EC4_PATH_TEST";
2146 break;
2147 case EC5_INSERT:
2148 s = "EC5_INSERT";
2149 break;
2150 case EC6_CHECK:
2151 s = "EC6_CHECK";
2152 break;
2153 case EC7_DEINSERT:
2154 s = "EC7_DEINSERT";
2155 break;
2156 default:
2157 s = "unknown";
2158 break;
2159 }
2160 pr_debug("ecm_state_change: %s\n", s);
2161 #endif //DRIVERDEBUG
2162 } // ecm_state_change
2163
2164
2165 /************************
2166 *
2167 * rmt_state_change
2168 *
2169 * Sets RMT state in custom statistics.
2170 * Args
2171 * smc - A pointer to the SMT context struct.
2172 *
2173 * r_state - Possible values are:
2174 *
2175 * RM0_ISOLATED, RM1_NON_OP, RM2_RING_OP, RM3_DETECT,
2176 * RM4_NON_OP_DUP, RM5_RING_OP_DUP, RM6_DIRECTED, RM7_TRACE
2177 * Out
2178 * Nothing.
2179 *
2180 ************************/
2181 void rmt_state_change(struct s_smc *smc, int r_state)
2182 {
2183 #ifdef DRIVERDEBUG
2184 char *s;
2185
2186 switch (r_state) {
2187 case RM0_ISOLATED:
2188 s = "RM0_ISOLATED";
2189 break;
2190 case RM1_NON_OP:
2191 s = "RM1_NON_OP - not operational";
2192 break;
2193 case RM2_RING_OP:
2194 s = "RM2_RING_OP - ring operational";
2195 break;
2196 case RM3_DETECT:
2197 s = "RM3_DETECT - detect dupl addresses";
2198 break;
2199 case RM4_NON_OP_DUP:
2200 s = "RM4_NON_OP_DUP - dupl. addr detected";
2201 break;
2202 case RM5_RING_OP_DUP:
2203 s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
2204 break;
2205 case RM6_DIRECTED:
2206 s = "RM6_DIRECTED - sending directed beacons";
2207 break;
2208 case RM7_TRACE:
2209 s = "RM7_TRACE - trace initiated";
2210 break;
2211 default:
2212 s = "unknown";
2213 break;
2214 }
2215 pr_debug("[rmt_state_change: %s]\n", s);
2216 #endif // DRIVERDEBUG
2217 } // rmt_state_change
2218
2219
2220 /************************
2221 *
2222 * drv_reset_indication
2223 *
2224 * This function is called by the SMT when it has detected a severe
2225 * hardware problem. The driver should perform a reset on the adapter
2226 * as soon as possible, but not from within this function.
2227 * Args
2228 * smc - A pointer to the SMT context struct.
2229 * Out
2230 * Nothing.
2231 *
2232 ************************/
2233 void drv_reset_indication(struct s_smc *smc)
2234 {
2235 pr_debug("entering drv_reset_indication\n");
2236
2237 smc->os.ResetRequested = TRUE; // Set flag.
2238
2239 } // drv_reset_indication
2240
2241 static struct pci_driver skfddi_pci_driver = {
2242 .name = "skfddi",
2243 .id_table = skfddi_pci_tbl,
2244 .probe = skfp_init_one,
2245 .remove = skfp_remove_one,
2246 };
2247
2248 module_pci_driver(skfddi_pci_driver);
This page took 0.125162 seconds and 5 git commands to generate.