1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * File Name:
4  *   skfddi.c
5  *
6  * Copyright Information:
7  *   Copyright SysKonnect 1998,1999.
8  *
9  * The information in this file is provided "AS IS" without warranty.
10  *
11  * Abstract:
12  *   A Linux device driver supporting the SysKonnect FDDI PCI controller
13  *   familie.
14  *
15  * Maintainers:
16  *   CG    Christoph Goos (cgoos@syskonnect.de)
17  *
18  * Contributors:
19  *   DM    David S. Miller
20  *
21  * Address all question to:
22  *   linux@syskonnect.de
23  *
24  * The technical manual for the adapters is available from SysKonnect's
25  * web pages: www.syskonnect.com
26  * Goto "Support" and search Knowledge Base for "manual".
27  *
28  * Driver Architecture:
29  *   The driver architecture is based on the DEC FDDI driver by
30  *   Lawrence V. Stefani and several ethernet drivers.
31  *   I also used an existing Windows NT miniport driver.
32  *   All hardware dependent functions are handled by the SysKonnect
33  *   Hardware Module.
34  *   The only headerfiles that are directly related to this source
35  *   are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
36  *   The others belong to the SysKonnect FDDI Hardware Module and
37  *   should better not be changed.
38  *
39  * Modification History:
40  *              Date            Name    Description
41  *              02-Mar-98       CG	Created.
42  *
43  *		10-Mar-99	CG	Support for 2.2.x added.
44  *		25-Mar-99	CG	Corrected IRQ routing for SMP (APIC)
45  *		26-Oct-99	CG	Fixed compilation error on 2.2.13
46  *		12-Nov-99	CG	Source code release
47  *		22-Nov-99	CG	Included in kernel source.
48  *		07-May-00	DM	64 bit fixes, new dma interface
49  *		31-Jul-03	DB	Audit copy_*_user in skfp_ioctl
50  *					  Daniele Bellucci <bellucda@tiscali.it>
51  *		03-Dec-03	SH	Convert to PCI device model
52  *
53  * Compilation options (-Dxxx):
54  *              DRIVERDEBUG     print lots of messages to log file
55  *              DUMPPACKETS     print received/transmitted packets to logfile
56  *
57  * Tested cpu architectures:
58  *	- i386
59  *	- sparc64
60  */
61 
62 /* Version information string - should be updated prior to */
63 /* each new release!!! */
64 #define VERSION		"2.07"
65 
66 static const char * const boot_msg =
67 	"SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
68 	"  SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
69 
70 /* Include files */
71 
72 #include <linux/capability.h>
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/interrupt.h>
78 #include <linux/pci.h>
79 #include <linux/netdevice.h>
80 #include <linux/fddidevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/bitops.h>
83 #include <linux/gfp.h>
84 
85 #include <asm/byteorder.h>
86 #include <asm/io.h>
87 #include <linux/uaccess.h>
88 
89 #include	"h/types.h"
90 #undef ADDR			// undo Linux definition
91 #include	"h/skfbi.h"
92 #include	"h/fddi.h"
93 #include	"h/smc.h"
94 #include	"h/smtstate.h"
95 
96 
97 // Define module-wide (static) routines
98 static int skfp_driver_init(struct net_device *dev);
99 static int skfp_open(struct net_device *dev);
100 static int skfp_close(struct net_device *dev);
101 static irqreturn_t skfp_interrupt(int irq, void *dev_id);
102 static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
103 static void skfp_ctl_set_multicast_list(struct net_device *dev);
104 static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
105 static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
106 static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
107 static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
108 				       struct net_device *dev);
109 static void send_queued_packets(struct s_smc *smc);
110 static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
111 static void ResetAdapter(struct s_smc *smc);
112 
113 
114 // Functions needed by the hardware module
115 void *mac_drv_get_space(struct s_smc *smc, u_int size);
116 void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
117 unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
118 unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
119 void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
120 		  int flag);
121 void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
122 void llc_restart_tx(struct s_smc *smc);
123 void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
124 			 int frag_count, int len);
125 void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
126 			 int frag_count);
127 void mac_drv_fill_rxd(struct s_smc *smc);
128 void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
129 		       int frag_count);
130 int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
131 		    int la_len);
132 void dump_data(unsigned char *Data, int length);
133 
134 // External functions from the hardware module
135 extern u_int mac_drv_check_space(void);
136 extern int mac_drv_init(struct s_smc *smc);
137 extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
138 			int len, int frame_status);
139 extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
140 		       int frame_len, int frame_status);
141 extern void fddi_isr(struct s_smc *smc);
142 extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
143 			int len, int frame_status);
144 extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
145 extern void mac_drv_clear_rx_queue(struct s_smc *smc);
146 extern void enable_tx_irq(struct s_smc *smc, u_short queue);
147 
148 static const struct pci_device_id skfddi_pci_tbl[] = {
149 	{ PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
150 	{ }			/* Terminating entry */
151 };
152 MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
153 MODULE_LICENSE("GPL");
154 MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
155 
156 // Define module-wide (static) variables
157 
158 static int num_boards;	/* total number of adapters configured */
159 
160 static const struct net_device_ops skfp_netdev_ops = {
161 	.ndo_open		= skfp_open,
162 	.ndo_stop		= skfp_close,
163 	.ndo_start_xmit		= skfp_send_pkt,
164 	.ndo_get_stats		= skfp_ctl_get_stats,
165 	.ndo_set_rx_mode	= skfp_ctl_set_multicast_list,
166 	.ndo_set_mac_address	= skfp_ctl_set_mac_address,
167 	.ndo_do_ioctl		= skfp_ioctl,
168 };
169 
170 /*
171  * =================
172  * = skfp_init_one =
173  * =================
174  *
175  * Overview:
176  *   Probes for supported FDDI PCI controllers
177  *
178  * Returns:
179  *   Condition code
180  *
181  * Arguments:
182  *   pdev - pointer to PCI device information
183  *
184  * Functional Description:
185  *   This is now called by PCI driver registration process
186  *   for each board found.
187  *
188  * Return Codes:
189  *   0           - This device (fddi0, fddi1, etc) configured successfully
190  *   -ENODEV - No devices present, or no SysKonnect FDDI PCI device
191  *                         present for this device name
192  *
193  *
194  * Side Effects:
195  *   Device structures for FDDI adapters (fddi0, fddi1, etc) are
196  *   initialized and the board resources are read and stored in
197  *   the device structure.
198  */
skfp_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)199 static int skfp_init_one(struct pci_dev *pdev,
200 				const struct pci_device_id *ent)
201 {
202 	struct net_device *dev;
203 	struct s_smc *smc;	/* board pointer */
204 	void __iomem *mem;
205 	int err;
206 
207 	pr_debug("entering skfp_init_one\n");
208 
209 	if (num_boards == 0)
210 		printk("%s\n", boot_msg);
211 
212 	err = pci_enable_device(pdev);
213 	if (err)
214 		return err;
215 
216 	err = pci_request_regions(pdev, "skfddi");
217 	if (err)
218 		goto err_out1;
219 
220 	pci_set_master(pdev);
221 
222 #ifdef MEM_MAPPED_IO
223 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
224 		printk(KERN_ERR "skfp: region is not an MMIO resource\n");
225 		err = -EIO;
226 		goto err_out2;
227 	}
228 
229 	mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
230 #else
231 	if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
232 		printk(KERN_ERR "skfp: region is not PIO resource\n");
233 		err = -EIO;
234 		goto err_out2;
235 	}
236 
237 	mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
238 #endif
239 	if (!mem) {
240 		printk(KERN_ERR "skfp:  Unable to map register, "
241 				"FDDI adapter will be disabled.\n");
242 		err = -EIO;
243 		goto err_out2;
244 	}
245 
246 	dev = alloc_fddidev(sizeof(struct s_smc));
247 	if (!dev) {
248 		printk(KERN_ERR "skfp: Unable to allocate fddi device, "
249 				"FDDI adapter will be disabled.\n");
250 		err = -ENOMEM;
251 		goto err_out3;
252 	}
253 
254 	dev->irq = pdev->irq;
255 	dev->netdev_ops = &skfp_netdev_ops;
256 
257 	SET_NETDEV_DEV(dev, &pdev->dev);
258 
259 	/* Initialize board structure with bus-specific info */
260 	smc = netdev_priv(dev);
261 	smc->os.dev = dev;
262 	smc->os.bus_type = SK_BUS_TYPE_PCI;
263 	smc->os.pdev = *pdev;
264 	smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
265 	smc->os.MaxFrameSize = MAX_FRAME_SIZE;
266 	smc->os.dev = dev;
267 	smc->hw.slot = -1;
268 	smc->hw.iop = mem;
269 	smc->os.ResetRequested = FALSE;
270 	skb_queue_head_init(&smc->os.SendSkbQueue);
271 
272 	dev->base_addr = (unsigned long)mem;
273 
274 	err = skfp_driver_init(dev);
275 	if (err)
276 		goto err_out4;
277 
278 	err = register_netdev(dev);
279 	if (err)
280 		goto err_out5;
281 
282 	++num_boards;
283 	pci_set_drvdata(pdev, dev);
284 
285 	if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
286 	    (pdev->subsystem_device & 0xff00) == 0x5800)
287 		printk("%s: SysKonnect FDDI PCI adapter"
288 		       " found (SK-%04X)\n", dev->name,
289 		       pdev->subsystem_device);
290 	else
291 		printk("%s: FDDI PCI adapter found\n", dev->name);
292 
293 	return 0;
294 err_out5:
295 	if (smc->os.SharedMemAddr)
296 		dma_free_coherent(&pdev->dev, smc->os.SharedMemSize,
297 				  smc->os.SharedMemAddr,
298 				  smc->os.SharedMemDMA);
299 	dma_free_coherent(&pdev->dev, MAX_FRAME_SIZE,
300 			  smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
301 err_out4:
302 	free_netdev(dev);
303 err_out3:
304 #ifdef MEM_MAPPED_IO
305 	iounmap(mem);
306 #else
307 	ioport_unmap(mem);
308 #endif
309 err_out2:
310 	pci_release_regions(pdev);
311 err_out1:
312 	pci_disable_device(pdev);
313 	return err;
314 }
315 
316 /*
317  * Called for each adapter board from pci_unregister_driver
318  */
skfp_remove_one(struct pci_dev * pdev)319 static void skfp_remove_one(struct pci_dev *pdev)
320 {
321 	struct net_device *p = pci_get_drvdata(pdev);
322 	struct s_smc *lp = netdev_priv(p);
323 
324 	unregister_netdev(p);
325 
326 	if (lp->os.SharedMemAddr) {
327 		dma_free_coherent(&pdev->dev,
328 				  lp->os.SharedMemSize,
329 				  lp->os.SharedMemAddr,
330 				  lp->os.SharedMemDMA);
331 		lp->os.SharedMemAddr = NULL;
332 	}
333 	if (lp->os.LocalRxBuffer) {
334 		dma_free_coherent(&pdev->dev,
335 				  MAX_FRAME_SIZE,
336 				  lp->os.LocalRxBuffer,
337 				  lp->os.LocalRxBufferDMA);
338 		lp->os.LocalRxBuffer = NULL;
339 	}
340 #ifdef MEM_MAPPED_IO
341 	iounmap(lp->hw.iop);
342 #else
343 	ioport_unmap(lp->hw.iop);
344 #endif
345 	pci_release_regions(pdev);
346 	free_netdev(p);
347 
348 	pci_disable_device(pdev);
349 }
350 
351 /*
352  * ====================
353  * = skfp_driver_init =
354  * ====================
355  *
356  * Overview:
357  *   Initializes remaining adapter board structure information
358  *   and makes sure adapter is in a safe state prior to skfp_open().
359  *
360  * Returns:
361  *   Condition code
362  *
363  * Arguments:
364  *   dev - pointer to device information
365  *
366  * Functional Description:
367  *   This function allocates additional resources such as the host memory
368  *   blocks needed by the adapter.
369  *   The adapter is also reset. The OS must call skfp_open() to open
370  *   the adapter and bring it on-line.
371  *
372  * Return Codes:
373  *    0 - initialization succeeded
374  *   -1 - initialization failed
375  */
skfp_driver_init(struct net_device * dev)376 static  int skfp_driver_init(struct net_device *dev)
377 {
378 	struct s_smc *smc = netdev_priv(dev);
379 	skfddi_priv *bp = &smc->os;
380 	int err = -EIO;
381 
382 	pr_debug("entering skfp_driver_init\n");
383 
384 	// set the io address in private structures
385 	bp->base_addr = dev->base_addr;
386 
387 	// Get the interrupt level from the PCI Configuration Table
388 	smc->hw.irq = dev->irq;
389 
390 	spin_lock_init(&bp->DriverLock);
391 
392 	// Allocate invalid frame
393 	bp->LocalRxBuffer = dma_alloc_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
394 					       &bp->LocalRxBufferDMA,
395 					       GFP_ATOMIC);
396 	if (!bp->LocalRxBuffer) {
397 		printk("could not allocate mem for ");
398 		printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
399 		goto fail;
400 	}
401 
402 	// Determine the required size of the 'shared' memory area.
403 	bp->SharedMemSize = mac_drv_check_space();
404 	pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
405 	if (bp->SharedMemSize > 0) {
406 		bp->SharedMemSize += 16;	// for descriptor alignment
407 
408 		bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
409 						       bp->SharedMemSize,
410 						       &bp->SharedMemDMA,
411 						       GFP_ATOMIC);
412 		if (!bp->SharedMemAddr) {
413 			printk("could not allocate mem for ");
414 			printk("hardware module: %ld byte\n",
415 			       bp->SharedMemSize);
416 			goto fail;
417 		}
418 
419 	} else {
420 		bp->SharedMemAddr = NULL;
421 	}
422 
423 	bp->SharedMemHeap = 0;
424 
425 	card_stop(smc);		// Reset adapter.
426 
427 	pr_debug("mac_drv_init()..\n");
428 	if (mac_drv_init(smc) != 0) {
429 		pr_debug("mac_drv_init() failed\n");
430 		goto fail;
431 	}
432 	read_address(smc, NULL);
433 	pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
434 	memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
435 
436 	smt_reset_defaults(smc, 0);
437 
438 	return 0;
439 
440 fail:
441 	if (bp->SharedMemAddr) {
442 		dma_free_coherent(&bp->pdev.dev,
443 				  bp->SharedMemSize,
444 				  bp->SharedMemAddr,
445 				  bp->SharedMemDMA);
446 		bp->SharedMemAddr = NULL;
447 	}
448 	if (bp->LocalRxBuffer) {
449 		dma_free_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
450 				  bp->LocalRxBuffer, bp->LocalRxBufferDMA);
451 		bp->LocalRxBuffer = NULL;
452 	}
453 	return err;
454 }				// skfp_driver_init
455 
456 
457 /*
458  * =============
459  * = skfp_open =
460  * =============
461  *
462  * Overview:
463  *   Opens the adapter
464  *
465  * Returns:
466  *   Condition code
467  *
468  * Arguments:
469  *   dev - pointer to device information
470  *
471  * Functional Description:
472  *   This function brings the adapter to an operational state.
473  *
474  * Return Codes:
475  *   0           - Adapter was successfully opened
476  *   -EAGAIN - Could not register IRQ
477  */
skfp_open(struct net_device * dev)478 static int skfp_open(struct net_device *dev)
479 {
480 	struct s_smc *smc = netdev_priv(dev);
481 	int err;
482 
483 	pr_debug("entering skfp_open\n");
484 	/* Register IRQ - support shared interrupts by passing device ptr */
485 	err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
486 			  dev->name, dev);
487 	if (err)
488 		return err;
489 
490 	/*
491 	 * Set current address to factory MAC address
492 	 *
493 	 * Note: We've already done this step in skfp_driver_init.
494 	 *       However, it's possible that a user has set a node
495 	 *               address override, then closed and reopened the
496 	 *               adapter.  Unless we reset the device address field
497 	 *               now, we'll continue to use the existing modified
498 	 *               address.
499 	 */
500 	read_address(smc, NULL);
501 	memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
502 
503 	init_smt(smc, NULL);
504 	smt_online(smc, 1);
505 	STI_FBI();
506 
507 	/* Clear local multicast address tables */
508 	mac_clear_multicast(smc);
509 
510 	/* Disable promiscuous filter settings */
511 	mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
512 
513 	netif_start_queue(dev);
514 	return 0;
515 }				// skfp_open
516 
517 
518 /*
519  * ==============
520  * = skfp_close =
521  * ==============
522  *
523  * Overview:
524  *   Closes the device/module.
525  *
526  * Returns:
527  *   Condition code
528  *
529  * Arguments:
530  *   dev - pointer to device information
531  *
532  * Functional Description:
533  *   This routine closes the adapter and brings it to a safe state.
534  *   The interrupt service routine is deregistered with the OS.
535  *   The adapter can be opened again with another call to skfp_open().
536  *
537  * Return Codes:
538  *   Always return 0.
539  *
540  * Assumptions:
541  *   No further requests for this adapter are made after this routine is
542  *   called.  skfp_open() can be called to reset and reinitialize the
543  *   adapter.
544  */
skfp_close(struct net_device * dev)545 static int skfp_close(struct net_device *dev)
546 {
547 	struct s_smc *smc = netdev_priv(dev);
548 	skfddi_priv *bp = &smc->os;
549 
550 	CLI_FBI();
551 	smt_reset_defaults(smc, 1);
552 	card_stop(smc);
553 	mac_drv_clear_tx_queue(smc);
554 	mac_drv_clear_rx_queue(smc);
555 
556 	netif_stop_queue(dev);
557 	/* Deregister (free) IRQ */
558 	free_irq(dev->irq, dev);
559 
560 	skb_queue_purge(&bp->SendSkbQueue);
561 	bp->QueueSkb = MAX_TX_QUEUE_LEN;
562 
563 	return 0;
564 }				// skfp_close
565 
566 
567 /*
568  * ==================
569  * = skfp_interrupt =
570  * ==================
571  *
572  * Overview:
573  *   Interrupt processing routine
574  *
575  * Returns:
576  *   None
577  *
578  * Arguments:
579  *   irq        - interrupt vector
580  *   dev_id     - pointer to device information
581  *
582  * Functional Description:
583  *   This routine calls the interrupt processing routine for this adapter.  It
584  *   disables and reenables adapter interrupts, as appropriate.  We can support
585  *   shared interrupts since the incoming dev_id pointer provides our device
586  *   structure context. All the real work is done in the hardware module.
587  *
588  * Return Codes:
589  *   None
590  *
591  * Assumptions:
592  *   The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
593  *   on Intel-based systems) is done by the operating system outside this
594  *   routine.
595  *
596  *       System interrupts are enabled through this call.
597  *
598  * Side Effects:
599  *   Interrupts are disabled, then reenabled at the adapter.
600  */
601 
skfp_interrupt(int irq,void * dev_id)602 static irqreturn_t skfp_interrupt(int irq, void *dev_id)
603 {
604 	struct net_device *dev = dev_id;
605 	struct s_smc *smc;	/* private board structure pointer */
606 	skfddi_priv *bp;
607 
608 	smc = netdev_priv(dev);
609 	bp = &smc->os;
610 
611 	// IRQs enabled or disabled ?
612 	if (inpd(ADDR(B0_IMSK)) == 0) {
613 		// IRQs are disabled: must be shared interrupt
614 		return IRQ_NONE;
615 	}
616 	// Note: At this point, IRQs are enabled.
617 	if ((inpd(ISR_A) & smc->hw.is_imask) == 0) {	// IRQ?
618 		// Adapter did not issue an IRQ: must be shared interrupt
619 		return IRQ_NONE;
620 	}
621 	CLI_FBI();		// Disable IRQs from our adapter.
622 	spin_lock(&bp->DriverLock);
623 
624 	// Call interrupt handler in hardware module (HWM).
625 	fddi_isr(smc);
626 
627 	if (smc->os.ResetRequested) {
628 		ResetAdapter(smc);
629 		smc->os.ResetRequested = FALSE;
630 	}
631 	spin_unlock(&bp->DriverLock);
632 	STI_FBI();		// Enable IRQs from our adapter.
633 
634 	return IRQ_HANDLED;
635 }				// skfp_interrupt
636 
637 
638 /*
639  * ======================
640  * = skfp_ctl_get_stats =
641  * ======================
642  *
643  * Overview:
644  *   Get statistics for FDDI adapter
645  *
646  * Returns:
647  *   Pointer to FDDI statistics structure
648  *
649  * Arguments:
650  *   dev - pointer to device information
651  *
652  * Functional Description:
653  *   Gets current MIB objects from adapter, then
654  *   returns FDDI statistics structure as defined
655  *   in if_fddi.h.
656  *
657  *   Note: Since the FDDI statistics structure is
658  *   still new and the device structure doesn't
659  *   have an FDDI-specific get statistics handler,
660  *   we'll return the FDDI statistics structure as
661  *   a pointer to an Ethernet statistics structure.
662  *   That way, at least the first part of the statistics
663  *   structure can be decoded properly.
664  *   We'll have to pay attention to this routine as the
665  *   device structure becomes more mature and LAN media
666  *   independent.
667  *
668  */
skfp_ctl_get_stats(struct net_device * dev)669 static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
670 {
671 	struct s_smc *bp = netdev_priv(dev);
672 
673 	/* Fill the bp->stats structure with driver-maintained counters */
674 
675 	bp->os.MacStat.port_bs_flag[0] = 0x1234;
676 	bp->os.MacStat.port_bs_flag[1] = 0x5678;
677 // goos: need to fill out fddi statistic
678 #if 0
679 	/* Get FDDI SMT MIB objects */
680 
681 /* Fill the bp->stats structure with the SMT MIB object values */
682 
683 	memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
684 	bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
685 	bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
686 	bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
687 	memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
688 	bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
689 	bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
690 	bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
691 	bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
692 	bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
693 	bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
694 	bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
695 	bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
696 	bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
697 	bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
698 	bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
699 	bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
700 	bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
701 	bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
702 	bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
703 	bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
704 	bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
705 	bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
706 	bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
707 	bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
708 	bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
709 	bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
710 	bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
711 	bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
712 	memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
713 	memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
714 	memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
715 	memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
716 	bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
717 	bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
718 	bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
719 	memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
720 	bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
721 	bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
722 	bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
723 	bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
724 	bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
725 	bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
726 	bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
727 	bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
728 	bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
729 	bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
730 	bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
731 	bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
732 	bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
733 	bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
734 	bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
735 	bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
736 	memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
737 	bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
738 	bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
739 	bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
740 	bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
741 	bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
742 	bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
743 	bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
744 	bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
745 	bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
746 	bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
747 	memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
748 	memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
749 	bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
750 	bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
751 	bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
752 	bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
753 	bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
754 	bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
755 	bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
756 	bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
757 	bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
758 	bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
759 	bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
760 	bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
761 	bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
762 	bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
763 	bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
764 	bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
765 	bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
766 	bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
767 	bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
768 	bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
769 	bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
770 	bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
771 	bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
772 	bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
773 	bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
774 	bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
775 
776 
777 	/* Fill the bp->stats structure with the FDDI counter values */
778 
779 	bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
780 	bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
781 	bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
782 	bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
783 	bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
784 	bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
785 	bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
786 	bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
787 	bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
788 	bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
789 	bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
790 
791 #endif
792 	return (struct net_device_stats *)&bp->os.MacStat;
793 }				// ctl_get_stat
794 
795 
796 /*
797  * ==============================
798  * = skfp_ctl_set_multicast_list =
799  * ==============================
800  *
801  * Overview:
802  *   Enable/Disable LLC frame promiscuous mode reception
803  *   on the adapter and/or update multicast address table.
804  *
805  * Returns:
806  *   None
807  *
808  * Arguments:
809  *   dev - pointer to device information
810  *
811  * Functional Description:
812  *   This function acquires the driver lock and only calls
813  *   skfp_ctl_set_multicast_list_wo_lock then.
814  *   This routine follows a fairly simple algorithm for setting the
815  *   adapter filters and CAM:
816  *
817  *      if IFF_PROMISC flag is set
818  *              enable promiscuous mode
819  *      else
820  *              disable promiscuous mode
821  *              if number of multicast addresses <= max. multicast number
822  *                      add mc addresses to adapter table
823  *              else
824  *                      enable promiscuous mode
825  *              update adapter filters
826  *
827  * Assumptions:
828  *   Multicast addresses are presented in canonical (LSB) format.
829  *
830  * Side Effects:
831  *   On-board adapter filters are updated.
832  */
skfp_ctl_set_multicast_list(struct net_device * dev)833 static void skfp_ctl_set_multicast_list(struct net_device *dev)
834 {
835 	struct s_smc *smc = netdev_priv(dev);
836 	skfddi_priv *bp = &smc->os;
837 	unsigned long Flags;
838 
839 	spin_lock_irqsave(&bp->DriverLock, Flags);
840 	skfp_ctl_set_multicast_list_wo_lock(dev);
841 	spin_unlock_irqrestore(&bp->DriverLock, Flags);
842 }				// skfp_ctl_set_multicast_list
843 
844 
845 
skfp_ctl_set_multicast_list_wo_lock(struct net_device * dev)846 static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
847 {
848 	struct s_smc *smc = netdev_priv(dev);
849 	struct netdev_hw_addr *ha;
850 
851 	/* Enable promiscuous mode, if necessary */
852 	if (dev->flags & IFF_PROMISC) {
853 		mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
854 		pr_debug("PROMISCUOUS MODE ENABLED\n");
855 	}
856 	/* Else, update multicast address table */
857 	else {
858 		mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
859 		pr_debug("PROMISCUOUS MODE DISABLED\n");
860 
861 		// Reset all MC addresses
862 		mac_clear_multicast(smc);
863 		mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
864 
865 		if (dev->flags & IFF_ALLMULTI) {
866 			mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
867 			pr_debug("ENABLE ALL MC ADDRESSES\n");
868 		} else if (!netdev_mc_empty(dev)) {
869 			if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
870 				/* use exact filtering */
871 
872 				// point to first multicast addr
873 				netdev_for_each_mc_addr(ha, dev) {
874 					mac_add_multicast(smc,
875 						(struct fddi_addr *)ha->addr,
876 						1);
877 
878 					pr_debug("ENABLE MC ADDRESS: %pMF\n",
879 						 ha->addr);
880 				}
881 
882 			} else {	// more MC addresses than HW supports
883 
884 				mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
885 				pr_debug("ENABLE ALL MC ADDRESSES\n");
886 			}
887 		} else {	// no MC addresses
888 
889 			pr_debug("DISABLE ALL MC ADDRESSES\n");
890 		}
891 
892 		/* Update adapter filters */
893 		mac_update_multicast(smc);
894 	}
895 }				// skfp_ctl_set_multicast_list_wo_lock
896 
897 
898 /*
899  * ===========================
900  * = skfp_ctl_set_mac_address =
901  * ===========================
902  *
903  * Overview:
904  *   set new mac address on adapter and update dev_addr field in device table.
905  *
906  * Returns:
907  *   None
908  *
909  * Arguments:
910  *   dev  - pointer to device information
911  *   addr - pointer to sockaddr structure containing unicast address to set
912  *
913  * Assumptions:
914  *   The address pointed to by addr->sa_data is a valid unicast
915  *   address and is presented in canonical (LSB) format.
916  */
skfp_ctl_set_mac_address(struct net_device * dev,void * addr)917 static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
918 {
919 	struct s_smc *smc = netdev_priv(dev);
920 	struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
921 	skfddi_priv *bp = &smc->os;
922 	unsigned long Flags;
923 
924 
925 	memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
926 	spin_lock_irqsave(&bp->DriverLock, Flags);
927 	ResetAdapter(smc);
928 	spin_unlock_irqrestore(&bp->DriverLock, Flags);
929 
930 	return 0;		/* always return zero */
931 }				// skfp_ctl_set_mac_address
932 
933 
934 /*
935  * ==============
936  * = skfp_ioctl =
937  * ==============
938  *
939  * Overview:
940  *
941  * Perform IOCTL call functions here. Some are privileged operations and the
942  * effective uid is checked in those cases.
943  *
944  * Returns:
945  *   status value
946  *   0 - success
947  *   other - failure
948  *
949  * Arguments:
950  *   dev  - pointer to device information
951  *   rq - pointer to ioctl request structure
952  *   cmd - ?
953  *
954  */
955 
956 
skfp_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)957 static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
958 {
959 	struct s_smc *smc = netdev_priv(dev);
960 	skfddi_priv *lp = &smc->os;
961 	struct s_skfp_ioctl ioc;
962 	int status = 0;
963 
964 	if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
965 		return -EFAULT;
966 
967 	switch (ioc.cmd) {
968 	case SKFP_GET_STATS:	/* Get the driver statistics */
969 		ioc.len = sizeof(lp->MacStat);
970 		status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
971 				? -EFAULT : 0;
972 		break;
973 	case SKFP_CLR_STATS:	/* Zero out the driver statistics */
974 		if (!capable(CAP_NET_ADMIN)) {
975 			status = -EPERM;
976 		} else {
977 			memset(&lp->MacStat, 0, sizeof(lp->MacStat));
978 		}
979 		break;
980 	default:
981 		printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd);
982 		status = -EOPNOTSUPP;
983 
984 	}			// switch
985 
986 	return status;
987 }				// skfp_ioctl
988 
989 
990 /*
991  * =====================
992  * = skfp_send_pkt     =
993  * =====================
994  *
995  * Overview:
996  *   Queues a packet for transmission and try to transmit it.
997  *
998  * Returns:
999  *   Condition code
1000  *
1001  * Arguments:
1002  *   skb - pointer to sk_buff to queue for transmission
1003  *   dev - pointer to device information
1004  *
1005  * Functional Description:
1006  *   Here we assume that an incoming skb transmit request
1007  *   is contained in a single physically contiguous buffer
1008  *   in which the virtual address of the start of packet
1009  *   (skb->data) can be converted to a physical address
1010  *   by using pci_map_single().
1011  *
1012  *   We have an internal queue for packets we can not send
1013  *   immediately. Packets in this queue can be given to the
1014  *   adapter if transmit buffers are freed.
1015  *
1016  *   We can't free the skb until after it's been DMA'd
1017  *   out by the adapter, so we'll keep it in the driver and
1018  *   return it in mac_drv_tx_complete.
1019  *
1020  * Return Codes:
1021  *   0 - driver has queued and/or sent packet
1022  *       1 - caller should requeue the sk_buff for later transmission
1023  *
1024  * Assumptions:
1025  *   The entire packet is stored in one physically
1026  *   contiguous buffer which is not cached and whose
1027  *   32-bit physical address can be determined.
1028  *
1029  *   It's vital that this routine is NOT reentered for the
1030  *   same board and that the OS is not in another section of
1031  *   code (eg. skfp_interrupt) for the same board on a
1032  *   different thread.
1033  *
1034  * Side Effects:
1035  *   None
1036  */
skfp_send_pkt(struct sk_buff * skb,struct net_device * dev)1037 static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
1038 				       struct net_device *dev)
1039 {
1040 	struct s_smc *smc = netdev_priv(dev);
1041 	skfddi_priv *bp = &smc->os;
1042 
1043 	pr_debug("skfp_send_pkt\n");
1044 
1045 	/*
1046 	 * Verify that incoming transmit request is OK
1047 	 *
1048 	 * Note: The packet size check is consistent with other
1049 	 *               Linux device drivers, although the correct packet
1050 	 *               size should be verified before calling the
1051 	 *               transmit routine.
1052 	 */
1053 
1054 	if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
1055 		bp->MacStat.gen.tx_errors++;	/* bump error counter */
1056 		// dequeue packets from xmt queue and send them
1057 		netif_start_queue(dev);
1058 		dev_kfree_skb(skb);
1059 		return NETDEV_TX_OK;	/* return "success" */
1060 	}
1061 	if (bp->QueueSkb == 0) {	// return with tbusy set: queue full
1062 
1063 		netif_stop_queue(dev);
1064 		return NETDEV_TX_BUSY;
1065 	}
1066 	bp->QueueSkb--;
1067 	skb_queue_tail(&bp->SendSkbQueue, skb);
1068 	send_queued_packets(netdev_priv(dev));
1069 	if (bp->QueueSkb == 0) {
1070 		netif_stop_queue(dev);
1071 	}
1072 	return NETDEV_TX_OK;
1073 
1074 }				// skfp_send_pkt
1075 
1076 
1077 /*
1078  * =======================
1079  * = send_queued_packets =
1080  * =======================
1081  *
1082  * Overview:
1083  *   Send packets from the driver queue as long as there are some and
1084  *   transmit resources are available.
1085  *
1086  * Returns:
1087  *   None
1088  *
1089  * Arguments:
1090  *   smc - pointer to smc (adapter) structure
1091  *
1092  * Functional Description:
1093  *   Take a packet from queue if there is any. If not, then we are done.
1094  *   Check if there are resources to send the packet. If not, requeue it
1095  *   and exit.
1096  *   Set packet descriptor flags and give packet to adapter.
1097  *   Check if any send resources can be freed (we do not use the
1098  *   transmit complete interrupt).
1099  */
send_queued_packets(struct s_smc * smc)1100 static void send_queued_packets(struct s_smc *smc)
1101 {
1102 	skfddi_priv *bp = &smc->os;
1103 	struct sk_buff *skb;
1104 	unsigned char fc;
1105 	int queue;
1106 	struct s_smt_fp_txd *txd;	// Current TxD.
1107 	dma_addr_t dma_address;
1108 	unsigned long Flags;
1109 
1110 	int frame_status;	// HWM tx frame status.
1111 
1112 	pr_debug("send queued packets\n");
1113 	for (;;) {
1114 		// send first buffer from queue
1115 		skb = skb_dequeue(&bp->SendSkbQueue);
1116 
1117 		if (!skb) {
1118 			pr_debug("queue empty\n");
1119 			return;
1120 		}		// queue empty !
1121 
1122 		spin_lock_irqsave(&bp->DriverLock, Flags);
1123 		fc = skb->data[0];
1124 		queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
1125 #ifdef ESS
1126 		// Check if the frame may/must be sent as a synchronous frame.
1127 
1128 		if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1129 			// It's an LLC frame.
1130 			if (!smc->ess.sync_bw_available)
1131 				fc &= ~FC_SYNC_BIT; // No bandwidth available.
1132 
1133 			else {	// Bandwidth is available.
1134 
1135 				if (smc->mib.fddiESSSynchTxMode) {
1136 					// Send as sync. frame.
1137 					fc |= FC_SYNC_BIT;
1138 				}
1139 			}
1140 		}
1141 #endif				// ESS
1142 		frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
1143 
1144 		if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
1145 			// Unable to send the frame.
1146 
1147 			if ((frame_status & RING_DOWN) != 0) {
1148 				// Ring is down.
1149 				pr_debug("Tx attempt while ring down.\n");
1150 			} else if ((frame_status & OUT_OF_TXD) != 0) {
1151 				pr_debug("%s: out of TXDs.\n", bp->dev->name);
1152 			} else {
1153 				pr_debug("%s: out of transmit resources",
1154 					bp->dev->name);
1155 			}
1156 
1157 			// Note: We will retry the operation as soon as
1158 			// transmit resources become available.
1159 			skb_queue_head(&bp->SendSkbQueue, skb);
1160 			spin_unlock_irqrestore(&bp->DriverLock, Flags);
1161 			return;	// Packet has been queued.
1162 
1163 		}		// if (unable to send frame)
1164 
1165 		bp->QueueSkb++;	// one packet less in local queue
1166 
1167 		// source address in packet ?
1168 		CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
1169 
1170 		txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
1171 
1172 		dma_address = pci_map_single(&bp->pdev, skb->data,
1173 					     skb->len, PCI_DMA_TODEVICE);
1174 		if (frame_status & LAN_TX) {
1175 			txd->txd_os.skb = skb;			// save skb
1176 			txd->txd_os.dma_addr = dma_address;	// save dma mapping
1177 		}
1178 		hwm_tx_frag(smc, skb->data, dma_address, skb->len,
1179                       frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
1180 
1181 		if (!(frame_status & LAN_TX)) {		// local only frame
1182 			pci_unmap_single(&bp->pdev, dma_address,
1183 					 skb->len, PCI_DMA_TODEVICE);
1184 			dev_kfree_skb_irq(skb);
1185 		}
1186 		spin_unlock_irqrestore(&bp->DriverLock, Flags);
1187 	}			// for
1188 
1189 	return;			// never reached
1190 
1191 }				// send_queued_packets
1192 
1193 
1194 /************************
1195  *
1196  * CheckSourceAddress
1197  *
1198  * Verify if the source address is set. Insert it if necessary.
1199  *
1200  ************************/
CheckSourceAddress(unsigned char * frame,unsigned char * hw_addr)1201 static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1202 {
1203 	unsigned char SRBit;
1204 
1205 	if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0) // source routing bit
1206 
1207 		return;
1208 	if ((unsigned short) frame[1 + 10] != 0)
1209 		return;
1210 	SRBit = frame[1 + 6] & 0x01;
1211 	memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
1212 	frame[8] |= SRBit;
1213 }				// CheckSourceAddress
1214 
1215 
1216 /************************
1217  *
1218  *	ResetAdapter
1219  *
1220  *	Reset the adapter and bring it back to operational mode.
1221  * Args
1222  *	smc - A pointer to the SMT context struct.
1223  * Out
1224  *	Nothing.
1225  *
1226  ************************/
ResetAdapter(struct s_smc * smc)1227 static void ResetAdapter(struct s_smc *smc)
1228 {
1229 
1230 	pr_debug("[fddi: ResetAdapter]\n");
1231 
1232 	// Stop the adapter.
1233 
1234 	card_stop(smc);		// Stop all activity.
1235 
1236 	// Clear the transmit and receive descriptor queues.
1237 	mac_drv_clear_tx_queue(smc);
1238 	mac_drv_clear_rx_queue(smc);
1239 
1240 	// Restart the adapter.
1241 
1242 	smt_reset_defaults(smc, 1);	// Initialize the SMT module.
1243 
1244 	init_smt(smc, (smc->os.dev)->dev_addr);	// Initialize the hardware.
1245 
1246 	smt_online(smc, 1);	// Insert into the ring again.
1247 	STI_FBI();
1248 
1249 	// Restore original receive mode (multicasts, promiscuous, etc.).
1250 	skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
1251 }				// ResetAdapter
1252 
1253 
1254 //--------------- functions called by hardware module ----------------
1255 
1256 /************************
1257  *
1258  *	llc_restart_tx
1259  *
1260  *	The hardware driver calls this routine when the transmit complete
1261  *	interrupt bits (end of frame) for the synchronous or asynchronous
1262  *	queue is set.
1263  *
1264  * NOTE The hardware driver calls this function also if no packets are queued.
1265  *	The routine must be able to handle this case.
1266  * Args
1267  *	smc - A pointer to the SMT context struct.
1268  * Out
1269  *	Nothing.
1270  *
1271  ************************/
llc_restart_tx(struct s_smc * smc)1272 void llc_restart_tx(struct s_smc *smc)
1273 {
1274 	skfddi_priv *bp = &smc->os;
1275 
1276 	pr_debug("[llc_restart_tx]\n");
1277 
1278 	// Try to send queued packets
1279 	spin_unlock(&bp->DriverLock);
1280 	send_queued_packets(smc);
1281 	spin_lock(&bp->DriverLock);
1282 	netif_start_queue(bp->dev);// system may send again if it was blocked
1283 
1284 }				// llc_restart_tx
1285 
1286 
1287 /************************
1288  *
1289  *	mac_drv_get_space
1290  *
1291  *	The hardware module calls this function to allocate the memory
1292  *	for the SMT MBufs if the define MB_OUTSIDE_SMC is specified.
1293  * Args
1294  *	smc - A pointer to the SMT context struct.
1295  *
1296  *	size - Size of memory in bytes to allocate.
1297  * Out
1298  *	!= 0	A pointer to the virtual address of the allocated memory.
1299  *	== 0	Allocation error.
1300  *
1301  ************************/
mac_drv_get_space(struct s_smc * smc,unsigned int size)1302 void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1303 {
1304 	void *virt;
1305 
1306 	pr_debug("mac_drv_get_space (%d bytes), ", size);
1307 	virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
1308 
1309 	if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1310 		printk("Unexpected SMT memory size requested: %d\n", size);
1311 		return NULL;
1312 	}
1313 	smc->os.SharedMemHeap += size;	// Move heap pointer.
1314 
1315 	pr_debug("mac_drv_get_space end\n");
1316 	pr_debug("virt addr: %lx\n", (ulong) virt);
1317 	pr_debug("bus  addr: %lx\n", (ulong)
1318 	       (smc->os.SharedMemDMA +
1319 		((char *) virt - (char *)smc->os.SharedMemAddr)));
1320 	return virt;
1321 }				// mac_drv_get_space
1322 
1323 
1324 /************************
1325  *
1326  *	mac_drv_get_desc_mem
1327  *
1328  *	This function is called by the hardware dependent module.
1329  *	It allocates the memory for the RxD and TxD descriptors.
1330  *
1331  *	This memory must be non-cached, non-movable and non-swappable.
1332  *	This memory should start at a physical page boundary.
1333  * Args
1334  *	smc - A pointer to the SMT context struct.
1335  *
1336  *	size - Size of memory in bytes to allocate.
1337  * Out
1338  *	!= 0	A pointer to the virtual address of the allocated memory.
1339  *	== 0	Allocation error.
1340  *
1341  ************************/
mac_drv_get_desc_mem(struct s_smc * smc,unsigned int size)1342 void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1343 {
1344 
1345 	char *virt;
1346 
1347 	pr_debug("mac_drv_get_desc_mem\n");
1348 
1349 	// Descriptor memory must be aligned on 16-byte boundary.
1350 
1351 	virt = mac_drv_get_space(smc, size);
1352 
1353 	size = (u_int) (16 - (((unsigned long) virt) & 15UL));
1354 	size = size % 16;
1355 
1356 	pr_debug("Allocate %u bytes alignment gap ", size);
1357 	pr_debug("for descriptor memory.\n");
1358 
1359 	if (!mac_drv_get_space(smc, size)) {
1360 		printk("fddi: Unable to align descriptor memory.\n");
1361 		return NULL;
1362 	}
1363 	return virt + size;
1364 }				// mac_drv_get_desc_mem
1365 
1366 
1367 /************************
1368  *
1369  *	mac_drv_virt2phys
1370  *
1371  *	Get the physical address of a given virtual address.
1372  * Args
1373  *	smc - A pointer to the SMT context struct.
1374  *
1375  *	virt - A (virtual) pointer into our 'shared' memory area.
1376  * Out
1377  *	Physical address of the given virtual address.
1378  *
1379  ************************/
mac_drv_virt2phys(struct s_smc * smc,void * virt)1380 unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1381 {
1382 	return smc->os.SharedMemDMA +
1383 		((char *) virt - (char *)smc->os.SharedMemAddr);
1384 }				// mac_drv_virt2phys
1385 
1386 
1387 /************************
1388  *
1389  *	dma_master
1390  *
1391  *	The HWM calls this function, when the driver leads through a DMA
1392  *	transfer. If the OS-specific module must prepare the system hardware
1393  *	for the DMA transfer, it should do it in this function.
1394  *
1395  *	The hardware module calls this dma_master if it wants to send an SMT
1396  *	frame.  This means that the virt address passed in here is part of
1397  *      the 'shared' memory area.
1398  * Args
1399  *	smc - A pointer to the SMT context struct.
1400  *
1401  *	virt - The virtual address of the data.
1402  *
1403  *	len - The length in bytes of the data.
1404  *
1405  *	flag - Indicates the transmit direction and the buffer type:
1406  *		DMA_RD	(0x01)	system RAM ==> adapter buffer memory
1407  *		DMA_WR	(0x02)	adapter buffer memory ==> system RAM
1408  *		SMT_BUF (0x80)	SMT buffer
1409  *
1410  *	>> NOTE: SMT_BUF and DMA_RD are always set for PCI. <<
1411  * Out
1412  *	Returns the pyhsical address for the DMA transfer.
1413  *
1414  ************************/
dma_master(struct s_smc * smc,void * virt,int len,int flag)1415 u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1416 {
1417 	return smc->os.SharedMemDMA +
1418 		((char *) virt - (char *)smc->os.SharedMemAddr);
1419 }				// dma_master
1420 
1421 
1422 /************************
1423  *
1424  *	dma_complete
1425  *
1426  *	The hardware module calls this routine when it has completed a DMA
1427  *	transfer. If the operating system dependent module has set up the DMA
1428  *	channel via dma_master() (e.g. Windows NT or AIX) it should clean up
1429  *	the DMA channel.
1430  * Args
1431  *	smc - A pointer to the SMT context struct.
1432  *
1433  *	descr - A pointer to a TxD or RxD, respectively.
1434  *
1435  *	flag - Indicates the DMA transfer direction / SMT buffer:
1436  *		DMA_RD	(0x01)	system RAM ==> adapter buffer memory
1437  *		DMA_WR	(0x02)	adapter buffer memory ==> system RAM
1438  *		SMT_BUF (0x80)	SMT buffer (managed by HWM)
1439  * Out
1440  *	Nothing.
1441  *
1442  ************************/
dma_complete(struct s_smc * smc,volatile union s_fp_descr * descr,int flag)1443 void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
1444 {
1445 	/* For TX buffers, there are two cases.  If it is an SMT transmit
1446 	 * buffer, there is nothing to do since we use consistent memory
1447 	 * for the 'shared' memory area.  The other case is for normal
1448 	 * transmit packets given to us by the networking stack, and in
1449 	 * that case we cleanup the PCI DMA mapping in mac_drv_tx_complete
1450 	 * below.
1451 	 *
1452 	 * For RX buffers, we have to unmap dynamic PCI DMA mappings here
1453 	 * because the hardware module is about to potentially look at
1454 	 * the contents of the buffer.  If we did not call the PCI DMA
1455 	 * unmap first, the hardware module could read inconsistent data.
1456 	 */
1457 	if (flag & DMA_WR) {
1458 		skfddi_priv *bp = &smc->os;
1459 		volatile struct s_smt_fp_rxd *r = &descr->r;
1460 
1461 		/* If SKB is NULL, we used the local buffer. */
1462 		if (r->rxd_os.skb && r->rxd_os.dma_addr) {
1463 			int MaxFrameSize = bp->MaxFrameSize;
1464 
1465 			pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
1466 					 MaxFrameSize, PCI_DMA_FROMDEVICE);
1467 			r->rxd_os.dma_addr = 0;
1468 		}
1469 	}
1470 }				// dma_complete
1471 
1472 
1473 /************************
1474  *
1475  *	mac_drv_tx_complete
1476  *
1477  *	Transmit of a packet is complete. Release the tx staging buffer.
1478  *
1479  * Args
1480  *	smc - A pointer to the SMT context struct.
1481  *
1482  *	txd - A pointer to the last TxD which is used by the frame.
1483  * Out
1484  *	Returns nothing.
1485  *
1486  ************************/
mac_drv_tx_complete(struct s_smc * smc,volatile struct s_smt_fp_txd * txd)1487 void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1488 {
1489 	struct sk_buff *skb;
1490 
1491 	pr_debug("entering mac_drv_tx_complete\n");
1492 	// Check if this TxD points to a skb
1493 
1494 	if (!(skb = txd->txd_os.skb)) {
1495 		pr_debug("TXD with no skb assigned.\n");
1496 		return;
1497 	}
1498 	txd->txd_os.skb = NULL;
1499 
1500 	// release the DMA mapping
1501 	pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
1502 			 skb->len, PCI_DMA_TODEVICE);
1503 	txd->txd_os.dma_addr = 0;
1504 
1505 	smc->os.MacStat.gen.tx_packets++;	// Count transmitted packets.
1506 	smc->os.MacStat.gen.tx_bytes+=skb->len;	// Count bytes
1507 
1508 	// free the skb
1509 	dev_kfree_skb_irq(skb);
1510 
1511 	pr_debug("leaving mac_drv_tx_complete\n");
1512 }				// mac_drv_tx_complete
1513 
1514 
1515 /************************
1516  *
1517  * dump packets to logfile
1518  *
1519  ************************/
1520 #ifdef DUMPPACKETS
dump_data(unsigned char * Data,int length)1521 void dump_data(unsigned char *Data, int length)
1522 {
1523 	printk(KERN_INFO "---Packet start---\n");
1524 	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, Data, min_t(size_t, length, 64), false);
1525 	printk(KERN_INFO "------------------\n");
1526 }				// dump_data
1527 #else
1528 #define dump_data(data,len)
1529 #endif				// DUMPPACKETS
1530 
1531 /************************
1532  *
1533  *	mac_drv_rx_complete
1534  *
1535  *	The hardware module calls this function if an LLC frame is received
1536  *	in a receive buffer. Also the SMT, NSA, and directed beacon frames
1537  *	from the network will be passed to the LLC layer by this function
1538  *	if passing is enabled.
1539  *
1540  *	mac_drv_rx_complete forwards the frame to the LLC layer if it should
1541  *	be received. It also fills the RxD ring with new receive buffers if
1542  *	some can be queued.
1543  * Args
1544  *	smc - A pointer to the SMT context struct.
1545  *
1546  *	rxd - A pointer to the first RxD which is used by the receive frame.
1547  *
1548  *	frag_count - Count of RxDs used by the received frame.
1549  *
1550  *	len - Frame length.
1551  * Out
1552  *	Nothing.
1553  *
1554  ************************/
mac_drv_rx_complete(struct s_smc * smc,volatile struct s_smt_fp_rxd * rxd,int frag_count,int len)1555 void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1556 			 int frag_count, int len)
1557 {
1558 	skfddi_priv *bp = &smc->os;
1559 	struct sk_buff *skb;
1560 	unsigned char *virt, *cp;
1561 	unsigned short ri;
1562 	u_int RifLength;
1563 
1564 	pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
1565 	if (frag_count != 1) {	// This is not allowed to happen.
1566 
1567 		printk("fddi: Multi-fragment receive!\n");
1568 		goto RequeueRxd;	// Re-use the given RXD(s).
1569 
1570 	}
1571 	skb = rxd->rxd_os.skb;
1572 	if (!skb) {
1573 		pr_debug("No skb in rxd\n");
1574 		smc->os.MacStat.gen.rx_errors++;
1575 		goto RequeueRxd;
1576 	}
1577 	virt = skb->data;
1578 
1579 	// The DMA mapping was released in dma_complete above.
1580 
1581 	dump_data(skb->data, len);
1582 
1583 	/*
1584 	 * FDDI Frame format:
1585 	 * +-------+-------+-------+------------+--------+------------+
1586 	 * | FC[1] | DA[6] | SA[6] | RIF[0..18] | LLC[3] | Data[0..n] |
1587 	 * +-------+-------+-------+------------+--------+------------+
1588 	 *
1589 	 * FC = Frame Control
1590 	 * DA = Destination Address
1591 	 * SA = Source Address
1592 	 * RIF = Routing Information Field
1593 	 * LLC = Logical Link Control
1594 	 */
1595 
1596 	// Remove Routing Information Field (RIF), if present.
1597 
1598 	if ((virt[1 + 6] & FDDI_RII) == 0)
1599 		RifLength = 0;
1600 	else {
1601 		int n;
1602 // goos: RIF removal has still to be tested
1603 		pr_debug("RIF found\n");
1604 		// Get RIF length from Routing Control (RC) field.
1605 		cp = virt + FDDI_MAC_HDR_LEN;	// Point behind MAC header.
1606 
1607 		ri = ntohs(*((__be16 *) cp));
1608 		RifLength = ri & FDDI_RCF_LEN_MASK;
1609 		if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1610 			printk("fddi: Invalid RIF.\n");
1611 			goto RequeueRxd;	// Discard the frame.
1612 
1613 		}
1614 		virt[1 + 6] &= ~FDDI_RII;	// Clear RII bit.
1615 		// regions overlap
1616 
1617 		virt = cp + RifLength;
1618 		for (n = FDDI_MAC_HDR_LEN; n; n--)
1619 			*--virt = *--cp;
1620 		// adjust sbd->data pointer
1621 		skb_pull(skb, RifLength);
1622 		len -= RifLength;
1623 		RifLength = 0;
1624 	}
1625 
1626 	// Count statistics.
1627 	smc->os.MacStat.gen.rx_packets++;	// Count indicated receive
1628 						// packets.
1629 	smc->os.MacStat.gen.rx_bytes+=len;	// Count bytes.
1630 
1631 	// virt points to header again
1632 	if (virt[1] & 0x01) {	// Check group (multicast) bit.
1633 
1634 		smc->os.MacStat.gen.multicast++;
1635 	}
1636 
1637 	// deliver frame to system
1638 	rxd->rxd_os.skb = NULL;
1639 	skb_trim(skb, len);
1640 	skb->protocol = fddi_type_trans(skb, bp->dev);
1641 
1642 	netif_rx(skb);
1643 
1644 	HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
1645 	return;
1646 
1647       RequeueRxd:
1648 	pr_debug("Rx: re-queue RXD.\n");
1649 	mac_drv_requeue_rxd(smc, rxd, frag_count);
1650 	smc->os.MacStat.gen.rx_errors++;	// Count receive packets
1651 						// not indicated.
1652 
1653 }				// mac_drv_rx_complete
1654 
1655 
1656 /************************
1657  *
1658  *	mac_drv_requeue_rxd
1659  *
1660  *	The hardware module calls this function to request the OS-specific
1661  *	module to queue the receive buffer(s) represented by the pointer
1662  *	to the RxD and the frag_count into the receive queue again. This
1663  *	buffer was filled with an invalid frame or an SMT frame.
1664  * Args
1665  *	smc - A pointer to the SMT context struct.
1666  *
1667  *	rxd - A pointer to the first RxD which is used by the receive frame.
1668  *
1669  *	frag_count - Count of RxDs used by the received frame.
1670  * Out
1671  *	Nothing.
1672  *
1673  ************************/
mac_drv_requeue_rxd(struct s_smc * smc,volatile struct s_smt_fp_rxd * rxd,int frag_count)1674 void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1675 			 int frag_count)
1676 {
1677 	volatile struct s_smt_fp_rxd *next_rxd;
1678 	volatile struct s_smt_fp_rxd *src_rxd;
1679 	struct sk_buff *skb;
1680 	int MaxFrameSize;
1681 	unsigned char *v_addr;
1682 	dma_addr_t b_addr;
1683 
1684 	if (frag_count != 1)	// This is not allowed to happen.
1685 
1686 		printk("fddi: Multi-fragment requeue!\n");
1687 
1688 	MaxFrameSize = smc->os.MaxFrameSize;
1689 	src_rxd = rxd;
1690 	for (; frag_count > 0; frag_count--) {
1691 		next_rxd = src_rxd->rxd_next;
1692 		rxd = HWM_GET_CURR_RXD(smc);
1693 
1694 		skb = src_rxd->rxd_os.skb;
1695 		if (skb == NULL) {	// this should not happen
1696 
1697 			pr_debug("Requeue with no skb in rxd!\n");
1698 			skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1699 			if (skb) {
1700 				// we got a skb
1701 				rxd->rxd_os.skb = skb;
1702 				skb_reserve(skb, 3);
1703 				skb_put(skb, MaxFrameSize);
1704 				v_addr = skb->data;
1705 				b_addr = pci_map_single(&smc->os.pdev,
1706 							v_addr,
1707 							MaxFrameSize,
1708 							PCI_DMA_FROMDEVICE);
1709 				rxd->rxd_os.dma_addr = b_addr;
1710 			} else {
1711 				// no skb available, use local buffer
1712 				pr_debug("Queueing invalid buffer!\n");
1713 				rxd->rxd_os.skb = NULL;
1714 				v_addr = smc->os.LocalRxBuffer;
1715 				b_addr = smc->os.LocalRxBufferDMA;
1716 			}
1717 		} else {
1718 			// we use skb from old rxd
1719 			rxd->rxd_os.skb = skb;
1720 			v_addr = skb->data;
1721 			b_addr = pci_map_single(&smc->os.pdev,
1722 						v_addr,
1723 						MaxFrameSize,
1724 						PCI_DMA_FROMDEVICE);
1725 			rxd->rxd_os.dma_addr = b_addr;
1726 		}
1727 		hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1728 			    FIRST_FRAG | LAST_FRAG);
1729 
1730 		src_rxd = next_rxd;
1731 	}
1732 }				// mac_drv_requeue_rxd
1733 
1734 
1735 /************************
1736  *
1737  *	mac_drv_fill_rxd
1738  *
1739  *	The hardware module calls this function at initialization time
1740  *	to fill the RxD ring with receive buffers. It is also called by
1741  *	mac_drv_rx_complete if rx_free is large enough to queue some new
1742  *	receive buffers into the RxD ring. mac_drv_fill_rxd queues new
1743  *	receive buffers as long as enough RxDs and receive buffers are
1744  *	available.
1745  * Args
1746  *	smc - A pointer to the SMT context struct.
1747  * Out
1748  *	Nothing.
1749  *
1750  ************************/
mac_drv_fill_rxd(struct s_smc * smc)1751 void mac_drv_fill_rxd(struct s_smc *smc)
1752 {
1753 	int MaxFrameSize;
1754 	unsigned char *v_addr;
1755 	unsigned long b_addr;
1756 	struct sk_buff *skb;
1757 	volatile struct s_smt_fp_rxd *rxd;
1758 
1759 	pr_debug("entering mac_drv_fill_rxd\n");
1760 
1761 	// Walk through the list of free receive buffers, passing receive
1762 	// buffers to the HWM as long as RXDs are available.
1763 
1764 	MaxFrameSize = smc->os.MaxFrameSize;
1765 	// Check if there is any RXD left.
1766 	while (HWM_GET_RX_FREE(smc) > 0) {
1767 		pr_debug(".\n");
1768 
1769 		rxd = HWM_GET_CURR_RXD(smc);
1770 		skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1771 		if (skb) {
1772 			// we got a skb
1773 			skb_reserve(skb, 3);
1774 			skb_put(skb, MaxFrameSize);
1775 			v_addr = skb->data;
1776 			b_addr = pci_map_single(&smc->os.pdev,
1777 						v_addr,
1778 						MaxFrameSize,
1779 						PCI_DMA_FROMDEVICE);
1780 			rxd->rxd_os.dma_addr = b_addr;
1781 		} else {
1782 			// no skb available, use local buffer
1783 			// System has run out of buffer memory, but we want to
1784 			// keep the receiver running in hope of better times.
1785 			// Multiple descriptors may point to this local buffer,
1786 			// so data in it must be considered invalid.
1787 			pr_debug("Queueing invalid buffer!\n");
1788 			v_addr = smc->os.LocalRxBuffer;
1789 			b_addr = smc->os.LocalRxBufferDMA;
1790 		}
1791 
1792 		rxd->rxd_os.skb = skb;
1793 
1794 		// Pass receive buffer to HWM.
1795 		hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1796 			    FIRST_FRAG | LAST_FRAG);
1797 	}
1798 	pr_debug("leaving mac_drv_fill_rxd\n");
1799 }				// mac_drv_fill_rxd
1800 
1801 
1802 /************************
1803  *
1804  *	mac_drv_clear_rxd
1805  *
1806  *	The hardware module calls this function to release unused
1807  *	receive buffers.
1808  * Args
1809  *	smc - A pointer to the SMT context struct.
1810  *
1811  *	rxd - A pointer to the first RxD which is used by the receive buffer.
1812  *
1813  *	frag_count - Count of RxDs used by the receive buffer.
1814  * Out
1815  *	Nothing.
1816  *
1817  ************************/
mac_drv_clear_rxd(struct s_smc * smc,volatile struct s_smt_fp_rxd * rxd,int frag_count)1818 void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1819 		       int frag_count)
1820 {
1821 
1822 	struct sk_buff *skb;
1823 
1824 	pr_debug("entering mac_drv_clear_rxd\n");
1825 
1826 	if (frag_count != 1)	// This is not allowed to happen.
1827 
1828 		printk("fddi: Multi-fragment clear!\n");
1829 
1830 	for (; frag_count > 0; frag_count--) {
1831 		skb = rxd->rxd_os.skb;
1832 		if (skb != NULL) {
1833 			skfddi_priv *bp = &smc->os;
1834 			int MaxFrameSize = bp->MaxFrameSize;
1835 
1836 			pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
1837 					 MaxFrameSize, PCI_DMA_FROMDEVICE);
1838 
1839 			dev_kfree_skb(skb);
1840 			rxd->rxd_os.skb = NULL;
1841 		}
1842 		rxd = rxd->rxd_next;	// Next RXD.
1843 
1844 	}
1845 }				// mac_drv_clear_rxd
1846 
1847 
1848 /************************
1849  *
1850  *	mac_drv_rx_init
1851  *
1852  *	The hardware module calls this routine when an SMT or NSA frame of the
1853  *	local SMT should be delivered to the LLC layer.
1854  *
1855  *	It is necessary to have this function, because there is no other way to
1856  *	copy the contents of SMT MBufs into receive buffers.
1857  *
1858  *	mac_drv_rx_init allocates the required target memory for this frame,
1859  *	and receives the frame fragment by fragment by calling mac_drv_rx_frag.
1860  * Args
1861  *	smc - A pointer to the SMT context struct.
1862  *
1863  *	len - The length (in bytes) of the received frame (FC, DA, SA, Data).
1864  *
1865  *	fc - The Frame Control field of the received frame.
1866  *
1867  *	look_ahead - A pointer to the lookahead data buffer (may be NULL).
1868  *
1869  *	la_len - The length of the lookahead data stored in the lookahead
1870  *	buffer (may be zero).
1871  * Out
1872  *	Always returns zero (0).
1873  *
1874  ************************/
mac_drv_rx_init(struct s_smc * smc,int len,int fc,char * look_ahead,int la_len)1875 int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1876 		    char *look_ahead, int la_len)
1877 {
1878 	struct sk_buff *skb;
1879 
1880 	pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
1881 
1882 	// "Received" a SMT or NSA frame of the local SMT.
1883 
1884 	if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
1885 		pr_debug("fddi: Discard invalid local SMT frame\n");
1886 		pr_debug("  len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1887 		       len, la_len, (unsigned long) look_ahead);
1888 		return 0;
1889 	}
1890 	skb = alloc_skb(len + 3, GFP_ATOMIC);
1891 	if (!skb) {
1892 		pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1893 		return 0;
1894 	}
1895 	skb_reserve(skb, 3);
1896 	skb_put(skb, len);
1897 	skb_copy_to_linear_data(skb, look_ahead, len);
1898 
1899 	// deliver frame to system
1900 	skb->protocol = fddi_type_trans(skb, smc->os.dev);
1901 	netif_rx(skb);
1902 
1903 	return 0;
1904 }				// mac_drv_rx_init
1905 
1906 
1907 /************************
1908  *
1909  *	smt_timer_poll
1910  *
1911  *	This routine is called periodically by the SMT module to clean up the
1912  *	driver.
1913  *
1914  *	Return any queued frames back to the upper protocol layers if the ring
1915  *	is down.
1916  * Args
1917  *	smc - A pointer to the SMT context struct.
1918  * Out
1919  *	Nothing.
1920  *
1921  ************************/
smt_timer_poll(struct s_smc * smc)1922 void smt_timer_poll(struct s_smc *smc)
1923 {
1924 }				// smt_timer_poll
1925 
1926 
1927 /************************
1928  *
1929  *	ring_status_indication
1930  *
1931  *	This function indicates a change of the ring state.
1932  * Args
1933  *	smc - A pointer to the SMT context struct.
1934  *
1935  *	status - The current ring status.
1936  * Out
1937  *	Nothing.
1938  *
1939  ************************/
ring_status_indication(struct s_smc * smc,u_long status)1940 void ring_status_indication(struct s_smc *smc, u_long status)
1941 {
1942 	pr_debug("ring_status_indication( ");
1943 	if (status & RS_RES15)
1944 		pr_debug("RS_RES15 ");
1945 	if (status & RS_HARDERROR)
1946 		pr_debug("RS_HARDERROR ");
1947 	if (status & RS_SOFTERROR)
1948 		pr_debug("RS_SOFTERROR ");
1949 	if (status & RS_BEACON)
1950 		pr_debug("RS_BEACON ");
1951 	if (status & RS_PATHTEST)
1952 		pr_debug("RS_PATHTEST ");
1953 	if (status & RS_SELFTEST)
1954 		pr_debug("RS_SELFTEST ");
1955 	if (status & RS_RES9)
1956 		pr_debug("RS_RES9 ");
1957 	if (status & RS_DISCONNECT)
1958 		pr_debug("RS_DISCONNECT ");
1959 	if (status & RS_RES7)
1960 		pr_debug("RS_RES7 ");
1961 	if (status & RS_DUPADDR)
1962 		pr_debug("RS_DUPADDR ");
1963 	if (status & RS_NORINGOP)
1964 		pr_debug("RS_NORINGOP ");
1965 	if (status & RS_VERSION)
1966 		pr_debug("RS_VERSION ");
1967 	if (status & RS_STUCKBYPASSS)
1968 		pr_debug("RS_STUCKBYPASSS ");
1969 	if (status & RS_EVENT)
1970 		pr_debug("RS_EVENT ");
1971 	if (status & RS_RINGOPCHANGE)
1972 		pr_debug("RS_RINGOPCHANGE ");
1973 	if (status & RS_RES0)
1974 		pr_debug("RS_RES0 ");
1975 	pr_debug("]\n");
1976 }				// ring_status_indication
1977 
1978 
1979 /************************
1980  *
1981  *	smt_get_time
1982  *
1983  *	Gets the current time from the system.
1984  * Args
1985  *	None.
1986  * Out
1987  *	The current time in TICKS_PER_SECOND.
1988  *
1989  *	TICKS_PER_SECOND has the unit 'count of timer ticks per second'. It is
1990  *	defined in "targetos.h". The definition of TICKS_PER_SECOND must comply
1991  *	to the time returned by smt_get_time().
1992  *
1993  ************************/
smt_get_time(void)1994 unsigned long smt_get_time(void)
1995 {
1996 	return jiffies;
1997 }				// smt_get_time
1998 
1999 
2000 /************************
2001  *
2002  *	smt_stat_counter
2003  *
2004  *	Status counter update (ring_op, fifo full).
2005  * Args
2006  *	smc - A pointer to the SMT context struct.
2007  *
2008  *	stat -	= 0: A ring operational change occurred.
2009  *		= 1: The FORMAC FIFO buffer is full / FIFO overflow.
2010  * Out
2011  *	Nothing.
2012  *
2013  ************************/
smt_stat_counter(struct s_smc * smc,int stat)2014 void smt_stat_counter(struct s_smc *smc, int stat)
2015 {
2016 //      BOOLEAN RingIsUp ;
2017 
2018 	pr_debug("smt_stat_counter\n");
2019 	switch (stat) {
2020 	case 0:
2021 		pr_debug("Ring operational change.\n");
2022 		break;
2023 	case 1:
2024 		pr_debug("Receive fifo overflow.\n");
2025 		smc->os.MacStat.gen.rx_errors++;
2026 		break;
2027 	default:
2028 		pr_debug("Unknown status (%d).\n", stat);
2029 		break;
2030 	}
2031 }				// smt_stat_counter
2032 
2033 
2034 /************************
2035  *
2036  *	cfm_state_change
2037  *
2038  *	Sets CFM state in custom statistics.
2039  * Args
2040  *	smc - A pointer to the SMT context struct.
2041  *
2042  *	c_state - Possible values are:
2043  *
2044  *		EC0_OUT, EC1_IN, EC2_TRACE, EC3_LEAVE, EC4_PATH_TEST,
2045  *		EC5_INSERT, EC6_CHECK, EC7_DEINSERT
2046  * Out
2047  *	Nothing.
2048  *
2049  ************************/
cfm_state_change(struct s_smc * smc,int c_state)2050 void cfm_state_change(struct s_smc *smc, int c_state)
2051 {
2052 #ifdef DRIVERDEBUG
2053 	char *s;
2054 
2055 	switch (c_state) {
2056 	case SC0_ISOLATED:
2057 		s = "SC0_ISOLATED";
2058 		break;
2059 	case SC1_WRAP_A:
2060 		s = "SC1_WRAP_A";
2061 		break;
2062 	case SC2_WRAP_B:
2063 		s = "SC2_WRAP_B";
2064 		break;
2065 	case SC4_THRU_A:
2066 		s = "SC4_THRU_A";
2067 		break;
2068 	case SC5_THRU_B:
2069 		s = "SC5_THRU_B";
2070 		break;
2071 	case SC7_WRAP_S:
2072 		s = "SC7_WRAP_S";
2073 		break;
2074 	case SC9_C_WRAP_A:
2075 		s = "SC9_C_WRAP_A";
2076 		break;
2077 	case SC10_C_WRAP_B:
2078 		s = "SC10_C_WRAP_B";
2079 		break;
2080 	case SC11_C_WRAP_S:
2081 		s = "SC11_C_WRAP_S";
2082 		break;
2083 	default:
2084 		pr_debug("cfm_state_change: unknown %d\n", c_state);
2085 		return;
2086 	}
2087 	pr_debug("cfm_state_change: %s\n", s);
2088 #endif				// DRIVERDEBUG
2089 }				// cfm_state_change
2090 
2091 
2092 /************************
2093  *
2094  *	ecm_state_change
2095  *
2096  *	Sets ECM state in custom statistics.
2097  * Args
2098  *	smc - A pointer to the SMT context struct.
2099  *
2100  *	e_state - Possible values are:
2101  *
2102  *		SC0_ISOLATED, SC1_WRAP_A (5), SC2_WRAP_B (6), SC4_THRU_A (12),
2103  *		SC5_THRU_B (7), SC7_WRAP_S (8)
2104  * Out
2105  *	Nothing.
2106  *
2107  ************************/
ecm_state_change(struct s_smc * smc,int e_state)2108 void ecm_state_change(struct s_smc *smc, int e_state)
2109 {
2110 #ifdef DRIVERDEBUG
2111 	char *s;
2112 
2113 	switch (e_state) {
2114 	case EC0_OUT:
2115 		s = "EC0_OUT";
2116 		break;
2117 	case EC1_IN:
2118 		s = "EC1_IN";
2119 		break;
2120 	case EC2_TRACE:
2121 		s = "EC2_TRACE";
2122 		break;
2123 	case EC3_LEAVE:
2124 		s = "EC3_LEAVE";
2125 		break;
2126 	case EC4_PATH_TEST:
2127 		s = "EC4_PATH_TEST";
2128 		break;
2129 	case EC5_INSERT:
2130 		s = "EC5_INSERT";
2131 		break;
2132 	case EC6_CHECK:
2133 		s = "EC6_CHECK";
2134 		break;
2135 	case EC7_DEINSERT:
2136 		s = "EC7_DEINSERT";
2137 		break;
2138 	default:
2139 		s = "unknown";
2140 		break;
2141 	}
2142 	pr_debug("ecm_state_change: %s\n", s);
2143 #endif				//DRIVERDEBUG
2144 }				// ecm_state_change
2145 
2146 
2147 /************************
2148  *
2149  *	rmt_state_change
2150  *
2151  *	Sets RMT state in custom statistics.
2152  * Args
2153  *	smc - A pointer to the SMT context struct.
2154  *
2155  *	r_state - Possible values are:
2156  *
2157  *		RM0_ISOLATED, RM1_NON_OP, RM2_RING_OP, RM3_DETECT,
2158  *		RM4_NON_OP_DUP, RM5_RING_OP_DUP, RM6_DIRECTED, RM7_TRACE
2159  * Out
2160  *	Nothing.
2161  *
2162  ************************/
rmt_state_change(struct s_smc * smc,int r_state)2163 void rmt_state_change(struct s_smc *smc, int r_state)
2164 {
2165 #ifdef DRIVERDEBUG
2166 	char *s;
2167 
2168 	switch (r_state) {
2169 	case RM0_ISOLATED:
2170 		s = "RM0_ISOLATED";
2171 		break;
2172 	case RM1_NON_OP:
2173 		s = "RM1_NON_OP - not operational";
2174 		break;
2175 	case RM2_RING_OP:
2176 		s = "RM2_RING_OP - ring operational";
2177 		break;
2178 	case RM3_DETECT:
2179 		s = "RM3_DETECT - detect dupl addresses";
2180 		break;
2181 	case RM4_NON_OP_DUP:
2182 		s = "RM4_NON_OP_DUP - dupl. addr detected";
2183 		break;
2184 	case RM5_RING_OP_DUP:
2185 		s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
2186 		break;
2187 	case RM6_DIRECTED:
2188 		s = "RM6_DIRECTED - sending directed beacons";
2189 		break;
2190 	case RM7_TRACE:
2191 		s = "RM7_TRACE - trace initiated";
2192 		break;
2193 	default:
2194 		s = "unknown";
2195 		break;
2196 	}
2197 	pr_debug("[rmt_state_change: %s]\n", s);
2198 #endif				// DRIVERDEBUG
2199 }				// rmt_state_change
2200 
2201 
2202 /************************
2203  *
2204  *	drv_reset_indication
2205  *
2206  *	This function is called by the SMT when it has detected a severe
2207  *	hardware problem. The driver should perform a reset on the adapter
2208  *	as soon as possible, but not from within this function.
2209  * Args
2210  *	smc - A pointer to the SMT context struct.
2211  * Out
2212  *	Nothing.
2213  *
2214  ************************/
drv_reset_indication(struct s_smc * smc)2215 void drv_reset_indication(struct s_smc *smc)
2216 {
2217 	pr_debug("entering drv_reset_indication\n");
2218 
2219 	smc->os.ResetRequested = TRUE;	// Set flag.
2220 
2221 }				// drv_reset_indication
2222 
2223 static struct pci_driver skfddi_pci_driver = {
2224 	.name		= "skfddi",
2225 	.id_table	= skfddi_pci_tbl,
2226 	.probe		= skfp_init_one,
2227 	.remove		= skfp_remove_one,
2228 };
2229 
2230 module_pci_driver(skfddi_pci_driver);
2231