sparc: Make SBUS DMA interfaces take struct device.
[deliverable/linux.git] / drivers / atm / fore200e.c
1 /*
2 A FORE Systems 200E-series driver for ATM on Linux.
3 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
4
5 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
6
7 This driver simultaneously supports PCA-200E and SBA-200E adapters
8 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/capability.h>
30 #include <linux/interrupt.h>
31 #include <linux/bitops.h>
32 #include <linux/pci.h>
33 #include <linux/module.h>
34 #include <linux/atmdev.h>
35 #include <linux/sonet.h>
36 #include <linux/atm_suni.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/delay.h>
39 #include <linux/firmware.h>
40 #include <asm/io.h>
41 #include <asm/string.h>
42 #include <asm/page.h>
43 #include <asm/irq.h>
44 #include <asm/dma.h>
45 #include <asm/byteorder.h>
46 #include <asm/uaccess.h>
47 #include <asm/atomic.h>
48
49 #ifdef CONFIG_SBUS
50 #include <asm/idprom.h>
51 #include <asm/sbus.h>
52 #include <asm/openprom.h>
53 #include <asm/oplib.h>
54 #include <asm/pgtable.h>
55 #endif
56
57 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
58 #define FORE200E_USE_TASKLET
59 #endif
60
61 #if 0 /* enable the debugging code of the buffer supply queues */
62 #define FORE200E_BSQ_DEBUG
63 #endif
64
65 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
66 #define FORE200E_52BYTE_AAL0_SDU
67 #endif
68
69 #include "fore200e.h"
70 #include "suni.h"
71
72 #define FORE200E_VERSION "0.3e"
73
74 #define FORE200E "fore200e: "
75
76 #if 0 /* override .config */
77 #define CONFIG_ATM_FORE200E_DEBUG 1
78 #endif
79 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
80 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
81 printk(FORE200E format, ##args); } while (0)
82 #else
83 #define DPRINTK(level, format, args...) do {} while (0)
84 #endif
85
86
87 #define FORE200E_ALIGN(addr, alignment) \
88 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
89
90 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
91
92 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
93
94 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
95
96 #if 1
97 #define ASSERT(expr) if (!(expr)) { \
98 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
99 __func__, __LINE__, #expr); \
100 panic(FORE200E "%s", __func__); \
101 }
102 #else
103 #define ASSERT(expr) do {} while (0)
104 #endif
105
106
107 static const struct atmdev_ops fore200e_ops;
108 static const struct fore200e_bus fore200e_bus[];
109
110 static LIST_HEAD(fore200e_boards);
111
112
113 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
114 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
115 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
116
117
118 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
119 { BUFFER_S1_NBR, BUFFER_L1_NBR },
120 { BUFFER_S2_NBR, BUFFER_L2_NBR }
121 };
122
123 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
124 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
125 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
126 };
127
128
129 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
130 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
131 #endif
132
133
134 #if 0 /* currently unused */
135 static int
136 fore200e_fore2atm_aal(enum fore200e_aal aal)
137 {
138 switch(aal) {
139 case FORE200E_AAL0: return ATM_AAL0;
140 case FORE200E_AAL34: return ATM_AAL34;
141 case FORE200E_AAL5: return ATM_AAL5;
142 }
143
144 return -EINVAL;
145 }
146 #endif
147
148
149 static enum fore200e_aal
150 fore200e_atm2fore_aal(int aal)
151 {
152 switch(aal) {
153 case ATM_AAL0: return FORE200E_AAL0;
154 case ATM_AAL34: return FORE200E_AAL34;
155 case ATM_AAL1:
156 case ATM_AAL2:
157 case ATM_AAL5: return FORE200E_AAL5;
158 }
159
160 return -EINVAL;
161 }
162
163
164 static char*
165 fore200e_irq_itoa(int irq)
166 {
167 static char str[8];
168 sprintf(str, "%d", irq);
169 return str;
170 }
171
172
173 /* allocate and align a chunk of memory intended to hold the data behing exchanged
174 between the driver and the adapter (using streaming DVMA) */
175
176 static int
177 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
178 {
179 unsigned long offset = 0;
180
181 if (alignment <= sizeof(int))
182 alignment = 0;
183
184 chunk->alloc_size = size + alignment;
185 chunk->align_size = size;
186 chunk->direction = direction;
187
188 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
189 if (chunk->alloc_addr == NULL)
190 return -ENOMEM;
191
192 if (alignment > 0)
193 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
194
195 chunk->align_addr = chunk->alloc_addr + offset;
196
197 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
198
199 return 0;
200 }
201
202
203 /* free a chunk of memory */
204
205 static void
206 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
207 {
208 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
209
210 kfree(chunk->alloc_addr);
211 }
212
213
214 static void
215 fore200e_spin(int msecs)
216 {
217 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
218 while (time_before(jiffies, timeout));
219 }
220
221
222 static int
223 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
224 {
225 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
226 int ok;
227
228 mb();
229 do {
230 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
231 break;
232
233 } while (time_before(jiffies, timeout));
234
235 #if 1
236 if (!ok) {
237 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
238 *addr, val);
239 }
240 #endif
241
242 return ok;
243 }
244
245
246 static int
247 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
248 {
249 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
250 int ok;
251
252 do {
253 if ((ok = (fore200e->bus->read(addr) == val)))
254 break;
255
256 } while (time_before(jiffies, timeout));
257
258 #if 1
259 if (!ok) {
260 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
261 fore200e->bus->read(addr), val);
262 }
263 #endif
264
265 return ok;
266 }
267
268
269 static void
270 fore200e_free_rx_buf(struct fore200e* fore200e)
271 {
272 int scheme, magn, nbr;
273 struct buffer* buffer;
274
275 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
276 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
277
278 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
279
280 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
281
282 struct chunk* data = &buffer[ nbr ].data;
283
284 if (data->alloc_addr != NULL)
285 fore200e_chunk_free(fore200e, data);
286 }
287 }
288 }
289 }
290 }
291
292
293 static void
294 fore200e_uninit_bs_queue(struct fore200e* fore200e)
295 {
296 int scheme, magn;
297
298 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
299 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
300
301 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
302 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
303
304 if (status->alloc_addr)
305 fore200e->bus->dma_chunk_free(fore200e, status);
306
307 if (rbd_block->alloc_addr)
308 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
309 }
310 }
311 }
312
313
314 static int
315 fore200e_reset(struct fore200e* fore200e, int diag)
316 {
317 int ok;
318
319 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
320
321 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
322
323 fore200e->bus->reset(fore200e);
324
325 if (diag) {
326 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
327 if (ok == 0) {
328
329 printk(FORE200E "device %s self-test failed\n", fore200e->name);
330 return -ENODEV;
331 }
332
333 printk(FORE200E "device %s self-test passed\n", fore200e->name);
334
335 fore200e->state = FORE200E_STATE_RESET;
336 }
337
338 return 0;
339 }
340
341
342 static void
343 fore200e_shutdown(struct fore200e* fore200e)
344 {
345 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
346 fore200e->name, fore200e->phys_base,
347 fore200e_irq_itoa(fore200e->irq));
348
349 if (fore200e->state > FORE200E_STATE_RESET) {
350 /* first, reset the board to prevent further interrupts or data transfers */
351 fore200e_reset(fore200e, 0);
352 }
353
354 /* then, release all allocated resources */
355 switch(fore200e->state) {
356
357 case FORE200E_STATE_COMPLETE:
358 kfree(fore200e->stats);
359
360 case FORE200E_STATE_IRQ:
361 free_irq(fore200e->irq, fore200e->atm_dev);
362
363 case FORE200E_STATE_ALLOC_BUF:
364 fore200e_free_rx_buf(fore200e);
365
366 case FORE200E_STATE_INIT_BSQ:
367 fore200e_uninit_bs_queue(fore200e);
368
369 case FORE200E_STATE_INIT_RXQ:
370 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
371 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
372
373 case FORE200E_STATE_INIT_TXQ:
374 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
375 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
376
377 case FORE200E_STATE_INIT_CMDQ:
378 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
379
380 case FORE200E_STATE_INITIALIZE:
381 /* nothing to do for that state */
382
383 case FORE200E_STATE_START_FW:
384 /* nothing to do for that state */
385
386 case FORE200E_STATE_RESET:
387 /* nothing to do for that state */
388
389 case FORE200E_STATE_MAP:
390 fore200e->bus->unmap(fore200e);
391
392 case FORE200E_STATE_CONFIGURE:
393 /* nothing to do for that state */
394
395 case FORE200E_STATE_REGISTER:
396 /* XXX shouldn't we *start* by deregistering the device? */
397 atm_dev_deregister(fore200e->atm_dev);
398
399 case FORE200E_STATE_BLANK:
400 /* nothing to do for that state */
401 break;
402 }
403 }
404
405
406 #ifdef CONFIG_PCI
407
408 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
409 {
410 /* on big-endian hosts, the board is configured to convert
411 the endianess of slave RAM accesses */
412 return le32_to_cpu(readl(addr));
413 }
414
415
416 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
417 {
418 /* on big-endian hosts, the board is configured to convert
419 the endianess of slave RAM accesses */
420 writel(cpu_to_le32(val), addr);
421 }
422
423
424 static u32
425 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
426 {
427 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
428
429 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
430 virt_addr, size, direction, dma_addr);
431
432 return dma_addr;
433 }
434
435
436 static void
437 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
438 {
439 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
440 dma_addr, size, direction);
441
442 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
443 }
444
445
446 static void
447 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
448 {
449 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
450
451 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
452 }
453
454 static void
455 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
456 {
457 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
458
459 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
460 }
461
462
463 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
464 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
465
466 static int
467 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
468 int size, int nbr, int alignment)
469 {
470 /* returned chunks are page-aligned */
471 chunk->alloc_size = size * nbr;
472 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
473 chunk->alloc_size,
474 &chunk->dma_addr);
475
476 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
477 return -ENOMEM;
478
479 chunk->align_addr = chunk->alloc_addr;
480
481 return 0;
482 }
483
484
485 /* free a DMA consistent chunk of memory */
486
487 static void
488 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
489 {
490 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
491 chunk->alloc_size,
492 chunk->alloc_addr,
493 chunk->dma_addr);
494 }
495
496
497 static int
498 fore200e_pca_irq_check(struct fore200e* fore200e)
499 {
500 /* this is a 1 bit register */
501 int irq_posted = readl(fore200e->regs.pca.psr);
502
503 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
504 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
505 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
506 }
507 #endif
508
509 return irq_posted;
510 }
511
512
513 static void
514 fore200e_pca_irq_ack(struct fore200e* fore200e)
515 {
516 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
517 }
518
519
520 static void
521 fore200e_pca_reset(struct fore200e* fore200e)
522 {
523 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
524 fore200e_spin(10);
525 writel(0, fore200e->regs.pca.hcr);
526 }
527
528
529 static int __devinit
530 fore200e_pca_map(struct fore200e* fore200e)
531 {
532 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
533
534 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
535
536 if (fore200e->virt_base == NULL) {
537 printk(FORE200E "can't map device %s\n", fore200e->name);
538 return -EFAULT;
539 }
540
541 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
542
543 /* gain access to the PCA specific registers */
544 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
545 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
546 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
547
548 fore200e->state = FORE200E_STATE_MAP;
549 return 0;
550 }
551
552
553 static void
554 fore200e_pca_unmap(struct fore200e* fore200e)
555 {
556 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
557
558 if (fore200e->virt_base != NULL)
559 iounmap(fore200e->virt_base);
560 }
561
562
563 static int __devinit
564 fore200e_pca_configure(struct fore200e* fore200e)
565 {
566 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
567 u8 master_ctrl, latency;
568
569 DPRINTK(2, "device %s being configured\n", fore200e->name);
570
571 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
572 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
573 return -EIO;
574 }
575
576 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
577
578 master_ctrl = master_ctrl
579 #if defined(__BIG_ENDIAN)
580 /* request the PCA board to convert the endianess of slave RAM accesses */
581 | PCA200E_CTRL_CONVERT_ENDIAN
582 #endif
583 #if 0
584 | PCA200E_CTRL_DIS_CACHE_RD
585 | PCA200E_CTRL_DIS_WRT_INVAL
586 | PCA200E_CTRL_ENA_CONT_REQ_MODE
587 | PCA200E_CTRL_2_CACHE_WRT_INVAL
588 #endif
589 | PCA200E_CTRL_LARGE_PCI_BURSTS;
590
591 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
592
593 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
594 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
595 this may impact the performances of other PCI devices on the same bus, though */
596 latency = 192;
597 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
598
599 fore200e->state = FORE200E_STATE_CONFIGURE;
600 return 0;
601 }
602
603
604 static int __init
605 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
606 {
607 struct host_cmdq* cmdq = &fore200e->host_cmdq;
608 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
609 struct prom_opcode opcode;
610 int ok;
611 u32 prom_dma;
612
613 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
614
615 opcode.opcode = OPCODE_GET_PROM;
616 opcode.pad = 0;
617
618 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
619
620 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
621
622 *entry->status = STATUS_PENDING;
623
624 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
625
626 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
627
628 *entry->status = STATUS_FREE;
629
630 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
631
632 if (ok == 0) {
633 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
634 return -EIO;
635 }
636
637 #if defined(__BIG_ENDIAN)
638
639 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
640
641 /* MAC address is stored as little-endian */
642 swap_here(&prom->mac_addr[0]);
643 swap_here(&prom->mac_addr[4]);
644 #endif
645
646 return 0;
647 }
648
649
650 static int
651 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
652 {
653 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
654
655 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
656 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
657 }
658
659 #endif /* CONFIG_PCI */
660
661
662 #ifdef CONFIG_SBUS
663
664 static u32
665 fore200e_sba_read(volatile u32 __iomem *addr)
666 {
667 return sbus_readl(addr);
668 }
669
670
671 static void
672 fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
673 {
674 sbus_writel(val, addr);
675 }
676
677
678 static u32
679 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
680 {
681 struct sbus_dev *sdev = fore200e->bus_dev;
682 struct device *dev = &sdev->ofdev.dev;
683 u32 dma_addr = sbus_map_single(dev, virt_addr, size, direction);
684
685 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
686 virt_addr, size, direction, dma_addr);
687
688 return dma_addr;
689 }
690
691
692 static void
693 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
694 {
695 struct sbus_dev *sdev = fore200e->bus_dev;
696 struct device *dev = &sdev->ofdev.dev;
697
698 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
699 dma_addr, size, direction);
700
701 sbus_unmap_single(dev, dma_addr, size, direction);
702 }
703
704
705 static void
706 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
707 {
708 struct sbus_dev *sdev = fore200e->bus_dev;
709 struct device *dev = &sdev->ofdev.dev;
710
711 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
712
713 sbus_dma_sync_single_for_cpu(dev, dma_addr, size, direction);
714 }
715
716 static void
717 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
718 {
719 struct sbus_dev *sdev = fore200e->bus_dev;
720 struct device *dev = &sdev->ofdev.dev;
721
722 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
723
724 sbus_dma_sync_single_for_device(dev, dma_addr, size, direction);
725 }
726
727
728 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
729 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
730
731 static int
732 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
733 int size, int nbr, int alignment)
734 {
735 struct sbus_dev *sdev = (struct sbus_dev *) fore200e->bus_dev;
736 struct device *dev = &sdev->ofdev.dev;
737
738 chunk->alloc_size = chunk->align_size = size * nbr;
739
740 /* returned chunks are page-aligned */
741 chunk->alloc_addr = sbus_alloc_consistent(dev, chunk->alloc_size,
742 &chunk->dma_addr);
743
744 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
745 return -ENOMEM;
746
747 chunk->align_addr = chunk->alloc_addr;
748
749 return 0;
750 }
751
752
753 /* free a DVMA consistent chunk of memory */
754
755 static void
756 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
757 {
758 struct sbus_dev *sdev = (struct sbus_dev *) fore200e->bus_dev;
759 struct device *dev = &sdev->ofdev.dev;
760
761 sbus_free_consistent(dev, chunk->alloc_size,
762 chunk->alloc_addr, chunk->dma_addr);
763 }
764
765
766 static void
767 fore200e_sba_irq_enable(struct fore200e* fore200e)
768 {
769 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
770 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
771 }
772
773
774 static int
775 fore200e_sba_irq_check(struct fore200e* fore200e)
776 {
777 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
778 }
779
780
781 static void
782 fore200e_sba_irq_ack(struct fore200e* fore200e)
783 {
784 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
785 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
786 }
787
788
789 static void
790 fore200e_sba_reset(struct fore200e* fore200e)
791 {
792 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
793 fore200e_spin(10);
794 fore200e->bus->write(0, fore200e->regs.sba.hcr);
795 }
796
797
798 static int __init
799 fore200e_sba_map(struct fore200e* fore200e)
800 {
801 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
802 unsigned int bursts;
803
804 /* gain access to the SBA specific registers */
805 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
806 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
807 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
808 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
809
810 if (fore200e->virt_base == NULL) {
811 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
812 return -EFAULT;
813 }
814
815 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
816
817 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
818
819 /* get the supported DVMA burst sizes */
820 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
821
822 if (sbus_can_dma_64bit(sbus_dev))
823 sbus_set_sbus64(sbus_dev, bursts);
824
825 fore200e->state = FORE200E_STATE_MAP;
826 return 0;
827 }
828
829
830 static void
831 fore200e_sba_unmap(struct fore200e* fore200e)
832 {
833 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
834 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
835 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
836 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
837 }
838
839
840 static int __init
841 fore200e_sba_configure(struct fore200e* fore200e)
842 {
843 fore200e->state = FORE200E_STATE_CONFIGURE;
844 return 0;
845 }
846
847
848 static struct fore200e* __init
849 fore200e_sba_detect(const struct fore200e_bus* bus, int index)
850 {
851 struct fore200e* fore200e;
852 struct sbus_bus* sbus_bus;
853 struct sbus_dev* sbus_dev = NULL;
854
855 unsigned int count = 0;
856
857 for_each_sbus (sbus_bus) {
858 for_each_sbusdev (sbus_dev, sbus_bus) {
859 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
860 if (count >= index)
861 goto found;
862 count++;
863 }
864 }
865 }
866 return NULL;
867
868 found:
869 if (sbus_dev->num_registers != 4) {
870 printk(FORE200E "this %s device has %d instead of 4 registers\n",
871 bus->model_name, sbus_dev->num_registers);
872 return NULL;
873 }
874
875 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
876 if (fore200e == NULL)
877 return NULL;
878
879 fore200e->bus = bus;
880 fore200e->bus_dev = sbus_dev;
881 fore200e->irq = sbus_dev->irqs[ 0 ];
882
883 fore200e->phys_base = (unsigned long)sbus_dev;
884
885 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
886
887 return fore200e;
888 }
889
890
891 static int __init
892 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
893 {
894 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
895 int len;
896
897 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
898 if (len < 0)
899 return -EBUSY;
900
901 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
902 if (len < 0)
903 return -EBUSY;
904
905 prom_getproperty(sbus_dev->prom_node, "serialnumber",
906 (char*)&prom->serial_number, sizeof(prom->serial_number));
907
908 prom_getproperty(sbus_dev->prom_node, "promversion",
909 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
910
911 return 0;
912 }
913
914
915 static int
916 fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
917 {
918 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
919
920 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
921 }
922 #endif /* CONFIG_SBUS */
923
924
925 static void
926 fore200e_tx_irq(struct fore200e* fore200e)
927 {
928 struct host_txq* txq = &fore200e->host_txq;
929 struct host_txq_entry* entry;
930 struct atm_vcc* vcc;
931 struct fore200e_vc_map* vc_map;
932
933 if (fore200e->host_txq.txing == 0)
934 return;
935
936 for (;;) {
937
938 entry = &txq->host_entry[ txq->tail ];
939
940 if ((*entry->status & STATUS_COMPLETE) == 0) {
941 break;
942 }
943
944 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
945 entry, txq->tail, entry->vc_map, entry->skb);
946
947 /* free copy of misaligned data */
948 kfree(entry->data);
949
950 /* remove DMA mapping */
951 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
952 DMA_TO_DEVICE);
953
954 vc_map = entry->vc_map;
955
956 /* vcc closed since the time the entry was submitted for tx? */
957 if ((vc_map->vcc == NULL) ||
958 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
959
960 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
961 fore200e->atm_dev->number);
962
963 dev_kfree_skb_any(entry->skb);
964 }
965 else {
966 ASSERT(vc_map->vcc);
967
968 /* vcc closed then immediately re-opened? */
969 if (vc_map->incarn != entry->incarn) {
970
971 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
972 if the same vcc is immediately re-opened, those pending PDUs must
973 not be popped after the completion of their emission, as they refer
974 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
975 would be decremented by the size of the (unrelated) skb, possibly
976 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
977 we thus bind the tx entry to the current incarnation of the vcc
978 when the entry is submitted for tx. When the tx later completes,
979 if the incarnation number of the tx entry does not match the one
980 of the vcc, then this implies that the vcc has been closed then re-opened.
981 we thus just drop the skb here. */
982
983 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
984 fore200e->atm_dev->number);
985
986 dev_kfree_skb_any(entry->skb);
987 }
988 else {
989 vcc = vc_map->vcc;
990 ASSERT(vcc);
991
992 /* notify tx completion */
993 if (vcc->pop) {
994 vcc->pop(vcc, entry->skb);
995 }
996 else {
997 dev_kfree_skb_any(entry->skb);
998 }
999 #if 1
1000 /* race fixed by the above incarnation mechanism, but... */
1001 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
1002 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
1003 }
1004 #endif
1005 /* check error condition */
1006 if (*entry->status & STATUS_ERROR)
1007 atomic_inc(&vcc->stats->tx_err);
1008 else
1009 atomic_inc(&vcc->stats->tx);
1010 }
1011 }
1012
1013 *entry->status = STATUS_FREE;
1014
1015 fore200e->host_txq.txing--;
1016
1017 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1018 }
1019 }
1020
1021
1022 #ifdef FORE200E_BSQ_DEBUG
1023 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1024 {
1025 struct buffer* buffer;
1026 int count = 0;
1027
1028 buffer = bsq->freebuf;
1029 while (buffer) {
1030
1031 if (buffer->supplied) {
1032 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1033 where, scheme, magn, buffer->index);
1034 }
1035
1036 if (buffer->magn != magn) {
1037 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1038 where, scheme, magn, buffer->index, buffer->magn);
1039 }
1040
1041 if (buffer->scheme != scheme) {
1042 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1043 where, scheme, magn, buffer->index, buffer->scheme);
1044 }
1045
1046 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1047 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1048 where, scheme, magn, buffer->index);
1049 }
1050
1051 count++;
1052 buffer = buffer->next;
1053 }
1054
1055 if (count != bsq->freebuf_count) {
1056 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1057 where, scheme, magn, count, bsq->freebuf_count);
1058 }
1059 return 0;
1060 }
1061 #endif
1062
1063
1064 static void
1065 fore200e_supply(struct fore200e* fore200e)
1066 {
1067 int scheme, magn, i;
1068
1069 struct host_bsq* bsq;
1070 struct host_bsq_entry* entry;
1071 struct buffer* buffer;
1072
1073 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1074 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1075
1076 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1077
1078 #ifdef FORE200E_BSQ_DEBUG
1079 bsq_audit(1, bsq, scheme, magn);
1080 #endif
1081 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1082
1083 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1084 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1085
1086 entry = &bsq->host_entry[ bsq->head ];
1087
1088 for (i = 0; i < RBD_BLK_SIZE; i++) {
1089
1090 /* take the first buffer in the free buffer list */
1091 buffer = bsq->freebuf;
1092 if (!buffer) {
1093 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1094 scheme, magn, bsq->freebuf_count);
1095 return;
1096 }
1097 bsq->freebuf = buffer->next;
1098
1099 #ifdef FORE200E_BSQ_DEBUG
1100 if (buffer->supplied)
1101 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1102 scheme, magn, buffer->index);
1103 buffer->supplied = 1;
1104 #endif
1105 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1106 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1107 }
1108
1109 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1110
1111 /* decrease accordingly the number of free rx buffers */
1112 bsq->freebuf_count -= RBD_BLK_SIZE;
1113
1114 *entry->status = STATUS_PENDING;
1115 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1116 }
1117 }
1118 }
1119 }
1120
1121
1122 static int
1123 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1124 {
1125 struct sk_buff* skb;
1126 struct buffer* buffer;
1127 struct fore200e_vcc* fore200e_vcc;
1128 int i, pdu_len = 0;
1129 #ifdef FORE200E_52BYTE_AAL0_SDU
1130 u32 cell_header = 0;
1131 #endif
1132
1133 ASSERT(vcc);
1134
1135 fore200e_vcc = FORE200E_VCC(vcc);
1136 ASSERT(fore200e_vcc);
1137
1138 #ifdef FORE200E_52BYTE_AAL0_SDU
1139 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1140
1141 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1142 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1143 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1144 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1145 rpd->atm_header.clp;
1146 pdu_len = 4;
1147 }
1148 #endif
1149
1150 /* compute total PDU length */
1151 for (i = 0; i < rpd->nseg; i++)
1152 pdu_len += rpd->rsd[ i ].length;
1153
1154 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1155 if (skb == NULL) {
1156 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1157
1158 atomic_inc(&vcc->stats->rx_drop);
1159 return -ENOMEM;
1160 }
1161
1162 __net_timestamp(skb);
1163
1164 #ifdef FORE200E_52BYTE_AAL0_SDU
1165 if (cell_header) {
1166 *((u32*)skb_put(skb, 4)) = cell_header;
1167 }
1168 #endif
1169
1170 /* reassemble segments */
1171 for (i = 0; i < rpd->nseg; i++) {
1172
1173 /* rebuild rx buffer address from rsd handle */
1174 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1175
1176 /* Make device DMA transfer visible to CPU. */
1177 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1178
1179 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1180
1181 /* Now let the device get at it again. */
1182 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1183 }
1184
1185 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1186
1187 if (pdu_len < fore200e_vcc->rx_min_pdu)
1188 fore200e_vcc->rx_min_pdu = pdu_len;
1189 if (pdu_len > fore200e_vcc->rx_max_pdu)
1190 fore200e_vcc->rx_max_pdu = pdu_len;
1191 fore200e_vcc->rx_pdu++;
1192
1193 /* push PDU */
1194 if (atm_charge(vcc, skb->truesize) == 0) {
1195
1196 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1197 vcc->itf, vcc->vpi, vcc->vci);
1198
1199 dev_kfree_skb_any(skb);
1200
1201 atomic_inc(&vcc->stats->rx_drop);
1202 return -ENOMEM;
1203 }
1204
1205 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1206
1207 vcc->push(vcc, skb);
1208 atomic_inc(&vcc->stats->rx);
1209
1210 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1211
1212 return 0;
1213 }
1214
1215
1216 static void
1217 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1218 {
1219 struct host_bsq* bsq;
1220 struct buffer* buffer;
1221 int i;
1222
1223 for (i = 0; i < rpd->nseg; i++) {
1224
1225 /* rebuild rx buffer address from rsd handle */
1226 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1227
1228 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1229
1230 #ifdef FORE200E_BSQ_DEBUG
1231 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1232
1233 if (buffer->supplied == 0)
1234 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1235 buffer->scheme, buffer->magn, buffer->index);
1236 buffer->supplied = 0;
1237 #endif
1238
1239 /* re-insert the buffer into the free buffer list */
1240 buffer->next = bsq->freebuf;
1241 bsq->freebuf = buffer;
1242
1243 /* then increment the number of free rx buffers */
1244 bsq->freebuf_count++;
1245 }
1246 }
1247
1248
1249 static void
1250 fore200e_rx_irq(struct fore200e* fore200e)
1251 {
1252 struct host_rxq* rxq = &fore200e->host_rxq;
1253 struct host_rxq_entry* entry;
1254 struct atm_vcc* vcc;
1255 struct fore200e_vc_map* vc_map;
1256
1257 for (;;) {
1258
1259 entry = &rxq->host_entry[ rxq->head ];
1260
1261 /* no more received PDUs */
1262 if ((*entry->status & STATUS_COMPLETE) == 0)
1263 break;
1264
1265 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1266
1267 if ((vc_map->vcc == NULL) ||
1268 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1269
1270 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1271 fore200e->atm_dev->number,
1272 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1273 }
1274 else {
1275 vcc = vc_map->vcc;
1276 ASSERT(vcc);
1277
1278 if ((*entry->status & STATUS_ERROR) == 0) {
1279
1280 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1281 }
1282 else {
1283 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1284 fore200e->atm_dev->number,
1285 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1286 atomic_inc(&vcc->stats->rx_err);
1287 }
1288 }
1289
1290 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1291
1292 fore200e_collect_rpd(fore200e, entry->rpd);
1293
1294 /* rewrite the rpd address to ack the received PDU */
1295 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1296 *entry->status = STATUS_FREE;
1297
1298 fore200e_supply(fore200e);
1299 }
1300 }
1301
1302
1303 #ifndef FORE200E_USE_TASKLET
1304 static void
1305 fore200e_irq(struct fore200e* fore200e)
1306 {
1307 unsigned long flags;
1308
1309 spin_lock_irqsave(&fore200e->q_lock, flags);
1310 fore200e_rx_irq(fore200e);
1311 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1312
1313 spin_lock_irqsave(&fore200e->q_lock, flags);
1314 fore200e_tx_irq(fore200e);
1315 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1316 }
1317 #endif
1318
1319
1320 static irqreturn_t
1321 fore200e_interrupt(int irq, void* dev)
1322 {
1323 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1324
1325 if (fore200e->bus->irq_check(fore200e) == 0) {
1326
1327 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1328 return IRQ_NONE;
1329 }
1330 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1331
1332 #ifdef FORE200E_USE_TASKLET
1333 tasklet_schedule(&fore200e->tx_tasklet);
1334 tasklet_schedule(&fore200e->rx_tasklet);
1335 #else
1336 fore200e_irq(fore200e);
1337 #endif
1338
1339 fore200e->bus->irq_ack(fore200e);
1340 return IRQ_HANDLED;
1341 }
1342
1343
1344 #ifdef FORE200E_USE_TASKLET
1345 static void
1346 fore200e_tx_tasklet(unsigned long data)
1347 {
1348 struct fore200e* fore200e = (struct fore200e*) data;
1349 unsigned long flags;
1350
1351 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1352
1353 spin_lock_irqsave(&fore200e->q_lock, flags);
1354 fore200e_tx_irq(fore200e);
1355 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1356 }
1357
1358
1359 static void
1360 fore200e_rx_tasklet(unsigned long data)
1361 {
1362 struct fore200e* fore200e = (struct fore200e*) data;
1363 unsigned long flags;
1364
1365 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1366
1367 spin_lock_irqsave(&fore200e->q_lock, flags);
1368 fore200e_rx_irq((struct fore200e*) data);
1369 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1370 }
1371 #endif
1372
1373
1374 static int
1375 fore200e_select_scheme(struct atm_vcc* vcc)
1376 {
1377 /* fairly balance the VCs over (identical) buffer schemes */
1378 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1379
1380 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1381 vcc->itf, vcc->vpi, vcc->vci, scheme);
1382
1383 return scheme;
1384 }
1385
1386
1387 static int
1388 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1389 {
1390 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1391 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1392 struct activate_opcode activ_opcode;
1393 struct deactivate_opcode deactiv_opcode;
1394 struct vpvc vpvc;
1395 int ok;
1396 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1397
1398 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1399
1400 if (activate) {
1401 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1402
1403 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1404 activ_opcode.aal = aal;
1405 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1406 activ_opcode.pad = 0;
1407 }
1408 else {
1409 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1410 deactiv_opcode.pad = 0;
1411 }
1412
1413 vpvc.vci = vcc->vci;
1414 vpvc.vpi = vcc->vpi;
1415
1416 *entry->status = STATUS_PENDING;
1417
1418 if (activate) {
1419
1420 #ifdef FORE200E_52BYTE_AAL0_SDU
1421 mtu = 48;
1422 #endif
1423 /* the MTU is not used by the cp, except in the case of AAL0 */
1424 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1425 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1426 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1427 }
1428 else {
1429 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1430 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1431 }
1432
1433 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1434
1435 *entry->status = STATUS_FREE;
1436
1437 if (ok == 0) {
1438 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1439 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1440 return -EIO;
1441 }
1442
1443 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1444 activate ? "open" : "clos");
1445
1446 return 0;
1447 }
1448
1449
1450 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1451
1452 static void
1453 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1454 {
1455 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1456
1457 /* compute the data cells to idle cells ratio from the tx PCR */
1458 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1459 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1460 }
1461 else {
1462 /* disable rate control */
1463 rate->data_cells = rate->idle_cells = 0;
1464 }
1465 }
1466
1467
1468 static int
1469 fore200e_open(struct atm_vcc *vcc)
1470 {
1471 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1472 struct fore200e_vcc* fore200e_vcc;
1473 struct fore200e_vc_map* vc_map;
1474 unsigned long flags;
1475 int vci = vcc->vci;
1476 short vpi = vcc->vpi;
1477
1478 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1479 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1480
1481 spin_lock_irqsave(&fore200e->q_lock, flags);
1482
1483 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1484 if (vc_map->vcc) {
1485
1486 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1487
1488 printk(FORE200E "VC %d.%d.%d already in use\n",
1489 fore200e->atm_dev->number, vpi, vci);
1490
1491 return -EINVAL;
1492 }
1493
1494 vc_map->vcc = vcc;
1495
1496 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1497
1498 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1499 if (fore200e_vcc == NULL) {
1500 vc_map->vcc = NULL;
1501 return -ENOMEM;
1502 }
1503
1504 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1505 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1506 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1507 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1508 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1509 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1510 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1511
1512 /* pseudo-CBR bandwidth requested? */
1513 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1514
1515 mutex_lock(&fore200e->rate_mtx);
1516 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1517 mutex_unlock(&fore200e->rate_mtx);
1518
1519 kfree(fore200e_vcc);
1520 vc_map->vcc = NULL;
1521 return -EAGAIN;
1522 }
1523
1524 /* reserve bandwidth */
1525 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1526 mutex_unlock(&fore200e->rate_mtx);
1527 }
1528
1529 vcc->itf = vcc->dev->number;
1530
1531 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1532 set_bit(ATM_VF_ADDR, &vcc->flags);
1533
1534 vcc->dev_data = fore200e_vcc;
1535
1536 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1537
1538 vc_map->vcc = NULL;
1539
1540 clear_bit(ATM_VF_ADDR, &vcc->flags);
1541 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1542
1543 vcc->dev_data = NULL;
1544
1545 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1546
1547 kfree(fore200e_vcc);
1548 return -EINVAL;
1549 }
1550
1551 /* compute rate control parameters */
1552 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1553
1554 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1555 set_bit(ATM_VF_HASQOS, &vcc->flags);
1556
1557 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1558 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1559 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1560 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1561 }
1562
1563 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1564 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1565 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1566
1567 /* new incarnation of the vcc */
1568 vc_map->incarn = ++fore200e->incarn_count;
1569
1570 /* VC unusable before this flag is set */
1571 set_bit(ATM_VF_READY, &vcc->flags);
1572
1573 return 0;
1574 }
1575
1576
1577 static void
1578 fore200e_close(struct atm_vcc* vcc)
1579 {
1580 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1581 struct fore200e_vcc* fore200e_vcc;
1582 struct fore200e_vc_map* vc_map;
1583 unsigned long flags;
1584
1585 ASSERT(vcc);
1586 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1587 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1588
1589 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1590
1591 clear_bit(ATM_VF_READY, &vcc->flags);
1592
1593 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1594
1595 spin_lock_irqsave(&fore200e->q_lock, flags);
1596
1597 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1598
1599 /* the vc is no longer considered as "in use" by fore200e_open() */
1600 vc_map->vcc = NULL;
1601
1602 vcc->itf = vcc->vci = vcc->vpi = 0;
1603
1604 fore200e_vcc = FORE200E_VCC(vcc);
1605 vcc->dev_data = NULL;
1606
1607 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1608
1609 /* release reserved bandwidth, if any */
1610 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1611
1612 mutex_lock(&fore200e->rate_mtx);
1613 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1614 mutex_unlock(&fore200e->rate_mtx);
1615
1616 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1617 }
1618
1619 clear_bit(ATM_VF_ADDR, &vcc->flags);
1620 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1621
1622 ASSERT(fore200e_vcc);
1623 kfree(fore200e_vcc);
1624 }
1625
1626
1627 static int
1628 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1629 {
1630 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1631 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1632 struct fore200e_vc_map* vc_map;
1633 struct host_txq* txq = &fore200e->host_txq;
1634 struct host_txq_entry* entry;
1635 struct tpd* tpd;
1636 struct tpd_haddr tpd_haddr;
1637 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1638 int tx_copy = 0;
1639 int tx_len = skb->len;
1640 u32* cell_header = NULL;
1641 unsigned char* skb_data;
1642 int skb_len;
1643 unsigned char* data;
1644 unsigned long flags;
1645
1646 ASSERT(vcc);
1647 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1648 ASSERT(fore200e);
1649 ASSERT(fore200e_vcc);
1650
1651 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1652 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1653 dev_kfree_skb_any(skb);
1654 return -EINVAL;
1655 }
1656
1657 #ifdef FORE200E_52BYTE_AAL0_SDU
1658 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1659 cell_header = (u32*) skb->data;
1660 skb_data = skb->data + 4; /* skip 4-byte cell header */
1661 skb_len = tx_len = skb->len - 4;
1662
1663 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1664 }
1665 else
1666 #endif
1667 {
1668 skb_data = skb->data;
1669 skb_len = skb->len;
1670 }
1671
1672 if (((unsigned long)skb_data) & 0x3) {
1673
1674 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1675 tx_copy = 1;
1676 tx_len = skb_len;
1677 }
1678
1679 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1680
1681 /* this simply NUKES the PCA board */
1682 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1683 tx_copy = 1;
1684 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1685 }
1686
1687 if (tx_copy) {
1688 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1689 if (data == NULL) {
1690 if (vcc->pop) {
1691 vcc->pop(vcc, skb);
1692 }
1693 else {
1694 dev_kfree_skb_any(skb);
1695 }
1696 return -ENOMEM;
1697 }
1698
1699 memcpy(data, skb_data, skb_len);
1700 if (skb_len < tx_len)
1701 memset(data + skb_len, 0x00, tx_len - skb_len);
1702 }
1703 else {
1704 data = skb_data;
1705 }
1706
1707 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1708 ASSERT(vc_map->vcc == vcc);
1709
1710 retry_here:
1711
1712 spin_lock_irqsave(&fore200e->q_lock, flags);
1713
1714 entry = &txq->host_entry[ txq->head ];
1715
1716 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1717
1718 /* try to free completed tx queue entries */
1719 fore200e_tx_irq(fore200e);
1720
1721 if (*entry->status != STATUS_FREE) {
1722
1723 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1724
1725 /* retry once again? */
1726 if (--retry > 0) {
1727 udelay(50);
1728 goto retry_here;
1729 }
1730
1731 atomic_inc(&vcc->stats->tx_err);
1732
1733 fore200e->tx_sat++;
1734 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1735 fore200e->name, fore200e->cp_queues->heartbeat);
1736 if (vcc->pop) {
1737 vcc->pop(vcc, skb);
1738 }
1739 else {
1740 dev_kfree_skb_any(skb);
1741 }
1742
1743 if (tx_copy)
1744 kfree(data);
1745
1746 return -ENOBUFS;
1747 }
1748 }
1749
1750 entry->incarn = vc_map->incarn;
1751 entry->vc_map = vc_map;
1752 entry->skb = skb;
1753 entry->data = tx_copy ? data : NULL;
1754
1755 tpd = entry->tpd;
1756 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1757 tpd->tsd[ 0 ].length = tx_len;
1758
1759 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1760 txq->txing++;
1761
1762 /* The dma_map call above implies a dma_sync so the device can use it,
1763 * thus no explicit dma_sync call is necessary here.
1764 */
1765
1766 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1767 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1768 tpd->tsd[0].length, skb_len);
1769
1770 if (skb_len < fore200e_vcc->tx_min_pdu)
1771 fore200e_vcc->tx_min_pdu = skb_len;
1772 if (skb_len > fore200e_vcc->tx_max_pdu)
1773 fore200e_vcc->tx_max_pdu = skb_len;
1774 fore200e_vcc->tx_pdu++;
1775
1776 /* set tx rate control information */
1777 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1778 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1779
1780 if (cell_header) {
1781 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1782 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1783 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1784 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1785 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1786 }
1787 else {
1788 /* set the ATM header, common to all cells conveying the PDU */
1789 tpd->atm_header.clp = 0;
1790 tpd->atm_header.plt = 0;
1791 tpd->atm_header.vci = vcc->vci;
1792 tpd->atm_header.vpi = vcc->vpi;
1793 tpd->atm_header.gfc = 0;
1794 }
1795
1796 tpd->spec.length = tx_len;
1797 tpd->spec.nseg = 1;
1798 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1799 tpd->spec.intr = 1;
1800
1801 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1802 tpd_haddr.pad = 0;
1803 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1804
1805 *entry->status = STATUS_PENDING;
1806 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1807
1808 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1809
1810 return 0;
1811 }
1812
1813
1814 static int
1815 fore200e_getstats(struct fore200e* fore200e)
1816 {
1817 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1818 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1819 struct stats_opcode opcode;
1820 int ok;
1821 u32 stats_dma_addr;
1822
1823 if (fore200e->stats == NULL) {
1824 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1825 if (fore200e->stats == NULL)
1826 return -ENOMEM;
1827 }
1828
1829 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1830 sizeof(struct stats), DMA_FROM_DEVICE);
1831
1832 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1833
1834 opcode.opcode = OPCODE_GET_STATS;
1835 opcode.pad = 0;
1836
1837 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1838
1839 *entry->status = STATUS_PENDING;
1840
1841 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1842
1843 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1844
1845 *entry->status = STATUS_FREE;
1846
1847 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1848
1849 if (ok == 0) {
1850 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1851 return -EIO;
1852 }
1853
1854 return 0;
1855 }
1856
1857
1858 static int
1859 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1860 {
1861 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1862
1863 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1864 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1865
1866 return -EINVAL;
1867 }
1868
1869
1870 static int
1871 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1872 {
1873 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1874
1875 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1876 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1877
1878 return -EINVAL;
1879 }
1880
1881
1882 #if 0 /* currently unused */
1883 static int
1884 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1885 {
1886 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1887 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1888 struct oc3_opcode opcode;
1889 int ok;
1890 u32 oc3_regs_dma_addr;
1891
1892 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1893
1894 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1895
1896 opcode.opcode = OPCODE_GET_OC3;
1897 opcode.reg = 0;
1898 opcode.value = 0;
1899 opcode.mask = 0;
1900
1901 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1902
1903 *entry->status = STATUS_PENDING;
1904
1905 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1906
1907 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1908
1909 *entry->status = STATUS_FREE;
1910
1911 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1912
1913 if (ok == 0) {
1914 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1915 return -EIO;
1916 }
1917
1918 return 0;
1919 }
1920 #endif
1921
1922
1923 static int
1924 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1925 {
1926 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1927 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1928 struct oc3_opcode opcode;
1929 int ok;
1930
1931 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1932
1933 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1934
1935 opcode.opcode = OPCODE_SET_OC3;
1936 opcode.reg = reg;
1937 opcode.value = value;
1938 opcode.mask = mask;
1939
1940 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1941
1942 *entry->status = STATUS_PENDING;
1943
1944 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1945
1946 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1947
1948 *entry->status = STATUS_FREE;
1949
1950 if (ok == 0) {
1951 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1952 return -EIO;
1953 }
1954
1955 return 0;
1956 }
1957
1958
1959 static int
1960 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1961 {
1962 u32 mct_value, mct_mask;
1963 int error;
1964
1965 if (!capable(CAP_NET_ADMIN))
1966 return -EPERM;
1967
1968 switch (loop_mode) {
1969
1970 case ATM_LM_NONE:
1971 mct_value = 0;
1972 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1973 break;
1974
1975 case ATM_LM_LOC_PHY:
1976 mct_value = mct_mask = SUNI_MCT_DLE;
1977 break;
1978
1979 case ATM_LM_RMT_PHY:
1980 mct_value = mct_mask = SUNI_MCT_LLE;
1981 break;
1982
1983 default:
1984 return -EINVAL;
1985 }
1986
1987 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1988 if (error == 0)
1989 fore200e->loop_mode = loop_mode;
1990
1991 return error;
1992 }
1993
1994
1995 static int
1996 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1997 {
1998 struct sonet_stats tmp;
1999
2000 if (fore200e_getstats(fore200e) < 0)
2001 return -EIO;
2002
2003 tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
2004 tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
2005 tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
2006 tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
2007 tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
2008 tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
2009 tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
2010 tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) +
2011 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
2012 be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
2013 tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) +
2014 be32_to_cpu(fore200e->stats->aal34.cells_received) +
2015 be32_to_cpu(fore200e->stats->aal5.cells_received);
2016
2017 if (arg)
2018 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2019
2020 return 0;
2021 }
2022
2023
2024 static int
2025 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2026 {
2027 struct fore200e* fore200e = FORE200E_DEV(dev);
2028
2029 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2030
2031 switch (cmd) {
2032
2033 case SONET_GETSTAT:
2034 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2035
2036 case SONET_GETDIAG:
2037 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2038
2039 case ATM_SETLOOP:
2040 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2041
2042 case ATM_GETLOOP:
2043 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2044
2045 case ATM_QUERYLOOP:
2046 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2047 }
2048
2049 return -ENOSYS; /* not implemented */
2050 }
2051
2052
2053 static int
2054 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2055 {
2056 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2057 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2058
2059 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2060 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2061 return -EINVAL;
2062 }
2063
2064 DPRINTK(2, "change_qos %d.%d.%d, "
2065 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2066 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2067 "available_cell_rate = %u",
2068 vcc->itf, vcc->vpi, vcc->vci,
2069 fore200e_traffic_class[ qos->txtp.traffic_class ],
2070 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2071 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2072 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2073 flags, fore200e->available_cell_rate);
2074
2075 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2076
2077 mutex_lock(&fore200e->rate_mtx);
2078 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2079 mutex_unlock(&fore200e->rate_mtx);
2080 return -EAGAIN;
2081 }
2082
2083 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2084 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2085
2086 mutex_unlock(&fore200e->rate_mtx);
2087
2088 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2089
2090 /* update rate control parameters */
2091 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2092
2093 set_bit(ATM_VF_HASQOS, &vcc->flags);
2094
2095 return 0;
2096 }
2097
2098 return -EINVAL;
2099 }
2100
2101
2102 static int __devinit
2103 fore200e_irq_request(struct fore200e* fore200e)
2104 {
2105 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
2106
2107 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2108 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2109 return -EBUSY;
2110 }
2111
2112 printk(FORE200E "IRQ %s reserved for device %s\n",
2113 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2114
2115 #ifdef FORE200E_USE_TASKLET
2116 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2117 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2118 #endif
2119
2120 fore200e->state = FORE200E_STATE_IRQ;
2121 return 0;
2122 }
2123
2124
2125 static int __devinit
2126 fore200e_get_esi(struct fore200e* fore200e)
2127 {
2128 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2129 int ok, i;
2130
2131 if (!prom)
2132 return -ENOMEM;
2133
2134 ok = fore200e->bus->prom_read(fore200e, prom);
2135 if (ok < 0) {
2136 kfree(prom);
2137 return -EBUSY;
2138 }
2139
2140 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2141 fore200e->name,
2142 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2143 prom->serial_number & 0xFFFF,
2144 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2145 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2146
2147 for (i = 0; i < ESI_LEN; i++) {
2148 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2149 }
2150
2151 kfree(prom);
2152
2153 return 0;
2154 }
2155
2156
2157 static int __devinit
2158 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2159 {
2160 int scheme, magn, nbr, size, i;
2161
2162 struct host_bsq* bsq;
2163 struct buffer* buffer;
2164
2165 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2166 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2167
2168 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2169
2170 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2171 size = fore200e_rx_buf_size[ scheme ][ magn ];
2172
2173 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2174
2175 /* allocate the array of receive buffers */
2176 buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2177
2178 if (buffer == NULL)
2179 return -ENOMEM;
2180
2181 bsq->freebuf = NULL;
2182
2183 for (i = 0; i < nbr; i++) {
2184
2185 buffer[ i ].scheme = scheme;
2186 buffer[ i ].magn = magn;
2187 #ifdef FORE200E_BSQ_DEBUG
2188 buffer[ i ].index = i;
2189 buffer[ i ].supplied = 0;
2190 #endif
2191
2192 /* allocate the receive buffer body */
2193 if (fore200e_chunk_alloc(fore200e,
2194 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2195 DMA_FROM_DEVICE) < 0) {
2196
2197 while (i > 0)
2198 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2199 kfree(buffer);
2200
2201 return -ENOMEM;
2202 }
2203
2204 /* insert the buffer into the free buffer list */
2205 buffer[ i ].next = bsq->freebuf;
2206 bsq->freebuf = &buffer[ i ];
2207 }
2208 /* all the buffers are free, initially */
2209 bsq->freebuf_count = nbr;
2210
2211 #ifdef FORE200E_BSQ_DEBUG
2212 bsq_audit(3, bsq, scheme, magn);
2213 #endif
2214 }
2215 }
2216
2217 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2218 return 0;
2219 }
2220
2221
2222 static int __devinit
2223 fore200e_init_bs_queue(struct fore200e* fore200e)
2224 {
2225 int scheme, magn, i;
2226
2227 struct host_bsq* bsq;
2228 struct cp_bsq_entry __iomem * cp_entry;
2229
2230 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2231 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2232
2233 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2234
2235 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2236
2237 /* allocate and align the array of status words */
2238 if (fore200e->bus->dma_chunk_alloc(fore200e,
2239 &bsq->status,
2240 sizeof(enum status),
2241 QUEUE_SIZE_BS,
2242 fore200e->bus->status_alignment) < 0) {
2243 return -ENOMEM;
2244 }
2245
2246 /* allocate and align the array of receive buffer descriptors */
2247 if (fore200e->bus->dma_chunk_alloc(fore200e,
2248 &bsq->rbd_block,
2249 sizeof(struct rbd_block),
2250 QUEUE_SIZE_BS,
2251 fore200e->bus->descr_alignment) < 0) {
2252
2253 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2254 return -ENOMEM;
2255 }
2256
2257 /* get the base address of the cp resident buffer supply queue entries */
2258 cp_entry = fore200e->virt_base +
2259 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2260
2261 /* fill the host resident and cp resident buffer supply queue entries */
2262 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2263
2264 bsq->host_entry[ i ].status =
2265 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2266 bsq->host_entry[ i ].rbd_block =
2267 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2268 bsq->host_entry[ i ].rbd_block_dma =
2269 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2270 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2271
2272 *bsq->host_entry[ i ].status = STATUS_FREE;
2273
2274 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2275 &cp_entry[ i ].status_haddr);
2276 }
2277 }
2278 }
2279
2280 fore200e->state = FORE200E_STATE_INIT_BSQ;
2281 return 0;
2282 }
2283
2284
2285 static int __devinit
2286 fore200e_init_rx_queue(struct fore200e* fore200e)
2287 {
2288 struct host_rxq* rxq = &fore200e->host_rxq;
2289 struct cp_rxq_entry __iomem * cp_entry;
2290 int i;
2291
2292 DPRINTK(2, "receive queue is being initialized\n");
2293
2294 /* allocate and align the array of status words */
2295 if (fore200e->bus->dma_chunk_alloc(fore200e,
2296 &rxq->status,
2297 sizeof(enum status),
2298 QUEUE_SIZE_RX,
2299 fore200e->bus->status_alignment) < 0) {
2300 return -ENOMEM;
2301 }
2302
2303 /* allocate and align the array of receive PDU descriptors */
2304 if (fore200e->bus->dma_chunk_alloc(fore200e,
2305 &rxq->rpd,
2306 sizeof(struct rpd),
2307 QUEUE_SIZE_RX,
2308 fore200e->bus->descr_alignment) < 0) {
2309
2310 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2311 return -ENOMEM;
2312 }
2313
2314 /* get the base address of the cp resident rx queue entries */
2315 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2316
2317 /* fill the host resident and cp resident rx entries */
2318 for (i=0; i < QUEUE_SIZE_RX; i++) {
2319
2320 rxq->host_entry[ i ].status =
2321 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2322 rxq->host_entry[ i ].rpd =
2323 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2324 rxq->host_entry[ i ].rpd_dma =
2325 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2326 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2327
2328 *rxq->host_entry[ i ].status = STATUS_FREE;
2329
2330 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2331 &cp_entry[ i ].status_haddr);
2332
2333 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2334 &cp_entry[ i ].rpd_haddr);
2335 }
2336
2337 /* set the head entry of the queue */
2338 rxq->head = 0;
2339
2340 fore200e->state = FORE200E_STATE_INIT_RXQ;
2341 return 0;
2342 }
2343
2344
2345 static int __devinit
2346 fore200e_init_tx_queue(struct fore200e* fore200e)
2347 {
2348 struct host_txq* txq = &fore200e->host_txq;
2349 struct cp_txq_entry __iomem * cp_entry;
2350 int i;
2351
2352 DPRINTK(2, "transmit queue is being initialized\n");
2353
2354 /* allocate and align the array of status words */
2355 if (fore200e->bus->dma_chunk_alloc(fore200e,
2356 &txq->status,
2357 sizeof(enum status),
2358 QUEUE_SIZE_TX,
2359 fore200e->bus->status_alignment) < 0) {
2360 return -ENOMEM;
2361 }
2362
2363 /* allocate and align the array of transmit PDU descriptors */
2364 if (fore200e->bus->dma_chunk_alloc(fore200e,
2365 &txq->tpd,
2366 sizeof(struct tpd),
2367 QUEUE_SIZE_TX,
2368 fore200e->bus->descr_alignment) < 0) {
2369
2370 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2371 return -ENOMEM;
2372 }
2373
2374 /* get the base address of the cp resident tx queue entries */
2375 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2376
2377 /* fill the host resident and cp resident tx entries */
2378 for (i=0; i < QUEUE_SIZE_TX; i++) {
2379
2380 txq->host_entry[ i ].status =
2381 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2382 txq->host_entry[ i ].tpd =
2383 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2384 txq->host_entry[ i ].tpd_dma =
2385 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2386 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2387
2388 *txq->host_entry[ i ].status = STATUS_FREE;
2389
2390 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2391 &cp_entry[ i ].status_haddr);
2392
2393 /* although there is a one-to-one mapping of tx queue entries and tpds,
2394 we do not write here the DMA (physical) base address of each tpd into
2395 the related cp resident entry, because the cp relies on this write
2396 operation to detect that a new pdu has been submitted for tx */
2397 }
2398
2399 /* set the head and tail entries of the queue */
2400 txq->head = 0;
2401 txq->tail = 0;
2402
2403 fore200e->state = FORE200E_STATE_INIT_TXQ;
2404 return 0;
2405 }
2406
2407
2408 static int __devinit
2409 fore200e_init_cmd_queue(struct fore200e* fore200e)
2410 {
2411 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2412 struct cp_cmdq_entry __iomem * cp_entry;
2413 int i;
2414
2415 DPRINTK(2, "command queue is being initialized\n");
2416
2417 /* allocate and align the array of status words */
2418 if (fore200e->bus->dma_chunk_alloc(fore200e,
2419 &cmdq->status,
2420 sizeof(enum status),
2421 QUEUE_SIZE_CMD,
2422 fore200e->bus->status_alignment) < 0) {
2423 return -ENOMEM;
2424 }
2425
2426 /* get the base address of the cp resident cmd queue entries */
2427 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2428
2429 /* fill the host resident and cp resident cmd entries */
2430 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2431
2432 cmdq->host_entry[ i ].status =
2433 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2434 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2435
2436 *cmdq->host_entry[ i ].status = STATUS_FREE;
2437
2438 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2439 &cp_entry[ i ].status_haddr);
2440 }
2441
2442 /* set the head entry of the queue */
2443 cmdq->head = 0;
2444
2445 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2446 return 0;
2447 }
2448
2449
2450 static void __devinit
2451 fore200e_param_bs_queue(struct fore200e* fore200e,
2452 enum buffer_scheme scheme, enum buffer_magn magn,
2453 int queue_length, int pool_size, int supply_blksize)
2454 {
2455 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2456
2457 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2458 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2459 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2460 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2461 }
2462
2463
2464 static int __devinit
2465 fore200e_initialize(struct fore200e* fore200e)
2466 {
2467 struct cp_queues __iomem * cpq;
2468 int ok, scheme, magn;
2469
2470 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2471
2472 mutex_init(&fore200e->rate_mtx);
2473 spin_lock_init(&fore200e->q_lock);
2474
2475 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2476
2477 /* enable cp to host interrupts */
2478 fore200e->bus->write(1, &cpq->imask);
2479
2480 if (fore200e->bus->irq_enable)
2481 fore200e->bus->irq_enable(fore200e);
2482
2483 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2484
2485 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2486 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2487 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2488
2489 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2490 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2491
2492 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2493 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2494 fore200e_param_bs_queue(fore200e, scheme, magn,
2495 QUEUE_SIZE_BS,
2496 fore200e_rx_buf_nbr[ scheme ][ magn ],
2497 RBD_BLK_SIZE);
2498
2499 /* issue the initialize command */
2500 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2501 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2502
2503 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2504 if (ok == 0) {
2505 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2506 return -ENODEV;
2507 }
2508
2509 printk(FORE200E "device %s initialized\n", fore200e->name);
2510
2511 fore200e->state = FORE200E_STATE_INITIALIZE;
2512 return 0;
2513 }
2514
2515
2516 static void __devinit
2517 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2518 {
2519 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2520
2521 #if 0
2522 printk("%c", c);
2523 #endif
2524 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2525 }
2526
2527
2528 static int __devinit
2529 fore200e_monitor_getc(struct fore200e* fore200e)
2530 {
2531 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2532 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2533 int c;
2534
2535 while (time_before(jiffies, timeout)) {
2536
2537 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2538
2539 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2540
2541 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2542 #if 0
2543 printk("%c", c & 0xFF);
2544 #endif
2545 return c & 0xFF;
2546 }
2547 }
2548
2549 return -1;
2550 }
2551
2552
2553 static void __devinit
2554 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2555 {
2556 while (*str) {
2557
2558 /* the i960 monitor doesn't accept any new character if it has something to say */
2559 while (fore200e_monitor_getc(fore200e) >= 0);
2560
2561 fore200e_monitor_putc(fore200e, *str++);
2562 }
2563
2564 while (fore200e_monitor_getc(fore200e) >= 0);
2565 }
2566
2567 #ifdef __LITTLE_ENDIAN
2568 #define FW_EXT ".bin"
2569 #else
2570 #define FW_EXT "_ecd.bin2"
2571 #endif
2572
2573 static int __devinit
2574 fore200e_load_and_start_fw(struct fore200e* fore200e)
2575 {
2576 const struct firmware *firmware;
2577 struct device *device;
2578 struct fw_header *fw_header;
2579 const __le32 *fw_data;
2580 u32 fw_size;
2581 u32 __iomem *load_addr;
2582 char buf[48];
2583 int err = -ENODEV;
2584
2585 if (strcmp(fore200e->bus->model_name, "PCA-200E") == 0)
2586 device = &((struct pci_dev *) fore200e->bus_dev)->dev;
2587 #ifdef CONFIG_SBUS
2588 else if (strcmp(fore200e->bus->model_name, "SBA-200E") == 0)
2589 device = &((struct sbus_dev *) fore200e->bus_dev)->ofdev.dev;
2590 #endif
2591 else
2592 return err;
2593
2594 sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2595 if (request_firmware(&firmware, buf, device) == 1) {
2596 printk(FORE200E "missing %s firmware image\n", fore200e->bus->model_name);
2597 return err;
2598 }
2599
2600 fw_data = (__le32 *) firmware->data;
2601 fw_size = firmware->size / sizeof(u32);
2602 fw_header = (struct fw_header *) firmware->data;
2603 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2604
2605 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2606 fore200e->name, load_addr, fw_size);
2607
2608 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2609 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2610 goto release;
2611 }
2612
2613 for (; fw_size--; fw_data++, load_addr++)
2614 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2615
2616 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2617
2618 #if defined(__sparc_v9__)
2619 /* reported to be required by SBA cards on some sparc64 hosts */
2620 fore200e_spin(100);
2621 #endif
2622
2623 sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2624 fore200e_monitor_puts(fore200e, buf);
2625
2626 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2627 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2628 goto release;
2629 }
2630
2631 printk(FORE200E "device %s firmware started\n", fore200e->name);
2632
2633 fore200e->state = FORE200E_STATE_START_FW;
2634 err = 0;
2635
2636 release:
2637 release_firmware(firmware);
2638 return err;
2639 }
2640
2641
2642 static int __devinit
2643 fore200e_register(struct fore200e* fore200e)
2644 {
2645 struct atm_dev* atm_dev;
2646
2647 DPRINTK(2, "device %s being registered\n", fore200e->name);
2648
2649 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2650 NULL);
2651 if (atm_dev == NULL) {
2652 printk(FORE200E "unable to register device %s\n", fore200e->name);
2653 return -ENODEV;
2654 }
2655
2656 atm_dev->dev_data = fore200e;
2657 fore200e->atm_dev = atm_dev;
2658
2659 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2660 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2661
2662 fore200e->available_cell_rate = ATM_OC3_PCR;
2663
2664 fore200e->state = FORE200E_STATE_REGISTER;
2665 return 0;
2666 }
2667
2668
2669 static int __devinit
2670 fore200e_init(struct fore200e* fore200e)
2671 {
2672 if (fore200e_register(fore200e) < 0)
2673 return -ENODEV;
2674
2675 if (fore200e->bus->configure(fore200e) < 0)
2676 return -ENODEV;
2677
2678 if (fore200e->bus->map(fore200e) < 0)
2679 return -ENODEV;
2680
2681 if (fore200e_reset(fore200e, 1) < 0)
2682 return -ENODEV;
2683
2684 if (fore200e_load_and_start_fw(fore200e) < 0)
2685 return -ENODEV;
2686
2687 if (fore200e_initialize(fore200e) < 0)
2688 return -ENODEV;
2689
2690 if (fore200e_init_cmd_queue(fore200e) < 0)
2691 return -ENOMEM;
2692
2693 if (fore200e_init_tx_queue(fore200e) < 0)
2694 return -ENOMEM;
2695
2696 if (fore200e_init_rx_queue(fore200e) < 0)
2697 return -ENOMEM;
2698
2699 if (fore200e_init_bs_queue(fore200e) < 0)
2700 return -ENOMEM;
2701
2702 if (fore200e_alloc_rx_buf(fore200e) < 0)
2703 return -ENOMEM;
2704
2705 if (fore200e_get_esi(fore200e) < 0)
2706 return -EIO;
2707
2708 if (fore200e_irq_request(fore200e) < 0)
2709 return -EBUSY;
2710
2711 fore200e_supply(fore200e);
2712
2713 /* all done, board initialization is now complete */
2714 fore200e->state = FORE200E_STATE_COMPLETE;
2715 return 0;
2716 }
2717
2718 #ifdef CONFIG_PCI
2719 static int __devinit
2720 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2721 {
2722 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2723 struct fore200e* fore200e;
2724 int err = 0;
2725 static int index = 0;
2726
2727 if (pci_enable_device(pci_dev)) {
2728 err = -EINVAL;
2729 goto out;
2730 }
2731
2732 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2733 if (fore200e == NULL) {
2734 err = -ENOMEM;
2735 goto out_disable;
2736 }
2737
2738 fore200e->bus = bus;
2739 fore200e->bus_dev = pci_dev;
2740 fore200e->irq = pci_dev->irq;
2741 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2742
2743 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2744
2745 pci_set_master(pci_dev);
2746
2747 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2748 fore200e->bus->model_name,
2749 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2750
2751 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2752
2753 err = fore200e_init(fore200e);
2754 if (err < 0) {
2755 fore200e_shutdown(fore200e);
2756 goto out_free;
2757 }
2758
2759 ++index;
2760 pci_set_drvdata(pci_dev, fore200e);
2761
2762 out:
2763 return err;
2764
2765 out_free:
2766 kfree(fore200e);
2767 out_disable:
2768 pci_disable_device(pci_dev);
2769 goto out;
2770 }
2771
2772
2773 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2774 {
2775 struct fore200e *fore200e;
2776
2777 fore200e = pci_get_drvdata(pci_dev);
2778
2779 fore200e_shutdown(fore200e);
2780 kfree(fore200e);
2781 pci_disable_device(pci_dev);
2782 }
2783
2784
2785 static struct pci_device_id fore200e_pca_tbl[] = {
2786 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2787 0, 0, (unsigned long) &fore200e_bus[0] },
2788 { 0, }
2789 };
2790
2791 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2792
2793 static struct pci_driver fore200e_pca_driver = {
2794 .name = "fore_200e",
2795 .probe = fore200e_pca_detect,
2796 .remove = __devexit_p(fore200e_pca_remove_one),
2797 .id_table = fore200e_pca_tbl,
2798 };
2799 #endif
2800
2801
2802 static int __init
2803 fore200e_module_init(void)
2804 {
2805 const struct fore200e_bus* bus;
2806 struct fore200e* fore200e;
2807 int index;
2808
2809 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2810
2811 /* for each configured bus interface */
2812 for (bus = fore200e_bus; bus->model_name; bus++) {
2813
2814 /* detect all boards present on that bus */
2815 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
2816
2817 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2818 fore200e->bus->model_name,
2819 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2820
2821 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2822
2823 if (fore200e_init(fore200e) < 0) {
2824
2825 fore200e_shutdown(fore200e);
2826 break;
2827 }
2828
2829 list_add(&fore200e->entry, &fore200e_boards);
2830 }
2831 }
2832
2833 #ifdef CONFIG_PCI
2834 if (!pci_register_driver(&fore200e_pca_driver))
2835 return 0;
2836 #endif
2837
2838 if (!list_empty(&fore200e_boards))
2839 return 0;
2840
2841 return -ENODEV;
2842 }
2843
2844
2845 static void __exit
2846 fore200e_module_cleanup(void)
2847 {
2848 struct fore200e *fore200e, *next;
2849
2850 #ifdef CONFIG_PCI
2851 pci_unregister_driver(&fore200e_pca_driver);
2852 #endif
2853
2854 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
2855 fore200e_shutdown(fore200e);
2856 kfree(fore200e);
2857 }
2858 DPRINTK(1, "module being removed\n");
2859 }
2860
2861
2862 static int
2863 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2864 {
2865 struct fore200e* fore200e = FORE200E_DEV(dev);
2866 struct fore200e_vcc* fore200e_vcc;
2867 struct atm_vcc* vcc;
2868 int i, len, left = *pos;
2869 unsigned long flags;
2870
2871 if (!left--) {
2872
2873 if (fore200e_getstats(fore200e) < 0)
2874 return -EIO;
2875
2876 len = sprintf(page,"\n"
2877 " device:\n"
2878 " internal name:\t\t%s\n", fore200e->name);
2879
2880 /* print bus-specific information */
2881 if (fore200e->bus->proc_read)
2882 len += fore200e->bus->proc_read(fore200e, page + len);
2883
2884 len += sprintf(page + len,
2885 " interrupt line:\t\t%s\n"
2886 " physical base address:\t0x%p\n"
2887 " virtual base address:\t0x%p\n"
2888 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2889 " board serial number:\t\t%d\n\n",
2890 fore200e_irq_itoa(fore200e->irq),
2891 (void*)fore200e->phys_base,
2892 fore200e->virt_base,
2893 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2894 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2895 fore200e->esi[4] * 256 + fore200e->esi[5]);
2896
2897 return len;
2898 }
2899
2900 if (!left--)
2901 return sprintf(page,
2902 " free small bufs, scheme 1:\t%d\n"
2903 " free large bufs, scheme 1:\t%d\n"
2904 " free small bufs, scheme 2:\t%d\n"
2905 " free large bufs, scheme 2:\t%d\n",
2906 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2907 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2908 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2909 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2910
2911 if (!left--) {
2912 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2913
2914 len = sprintf(page,"\n\n"
2915 " cell processor:\n"
2916 " heartbeat state:\t\t");
2917
2918 if (hb >> 16 != 0xDEAD)
2919 len += sprintf(page + len, "0x%08x\n", hb);
2920 else
2921 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2922
2923 return len;
2924 }
2925
2926 if (!left--) {
2927 static const char* media_name[] = {
2928 "unshielded twisted pair",
2929 "multimode optical fiber ST",
2930 "multimode optical fiber SC",
2931 "single-mode optical fiber ST",
2932 "single-mode optical fiber SC",
2933 "unknown"
2934 };
2935
2936 static const char* oc3_mode[] = {
2937 "normal operation",
2938 "diagnostic loopback",
2939 "line loopback",
2940 "unknown"
2941 };
2942
2943 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2944 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2945 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2946 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2947 u32 oc3_index;
2948
2949 if ((media_index < 0) || (media_index > 4))
2950 media_index = 5;
2951
2952 switch (fore200e->loop_mode) {
2953 case ATM_LM_NONE: oc3_index = 0;
2954 break;
2955 case ATM_LM_LOC_PHY: oc3_index = 1;
2956 break;
2957 case ATM_LM_RMT_PHY: oc3_index = 2;
2958 break;
2959 default: oc3_index = 3;
2960 }
2961
2962 return sprintf(page,
2963 " firmware release:\t\t%d.%d.%d\n"
2964 " monitor release:\t\t%d.%d\n"
2965 " media type:\t\t\t%s\n"
2966 " OC-3 revision:\t\t0x%x\n"
2967 " OC-3 mode:\t\t\t%s",
2968 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2969 mon960_release >> 16, mon960_release << 16 >> 16,
2970 media_name[ media_index ],
2971 oc3_revision,
2972 oc3_mode[ oc3_index ]);
2973 }
2974
2975 if (!left--) {
2976 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2977
2978 return sprintf(page,
2979 "\n\n"
2980 " monitor:\n"
2981 " version number:\t\t%d\n"
2982 " boot status word:\t\t0x%08x\n",
2983 fore200e->bus->read(&cp_monitor->mon_version),
2984 fore200e->bus->read(&cp_monitor->bstat));
2985 }
2986
2987 if (!left--)
2988 return sprintf(page,
2989 "\n"
2990 " device statistics:\n"
2991 " 4b5b:\n"
2992 " crc_header_errors:\t\t%10u\n"
2993 " framing_errors:\t\t%10u\n",
2994 be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2995 be32_to_cpu(fore200e->stats->phy.framing_errors));
2996
2997 if (!left--)
2998 return sprintf(page, "\n"
2999 " OC-3:\n"
3000 " section_bip8_errors:\t%10u\n"
3001 " path_bip8_errors:\t\t%10u\n"
3002 " line_bip24_errors:\t\t%10u\n"
3003 " line_febe_errors:\t\t%10u\n"
3004 " path_febe_errors:\t\t%10u\n"
3005 " corr_hcs_errors:\t\t%10u\n"
3006 " ucorr_hcs_errors:\t\t%10u\n",
3007 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
3008 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
3009 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
3010 be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
3011 be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
3012 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
3013 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
3014
3015 if (!left--)
3016 return sprintf(page,"\n"
3017 " ATM:\t\t\t\t cells\n"
3018 " TX:\t\t\t%10u\n"
3019 " RX:\t\t\t%10u\n"
3020 " vpi out of range:\t\t%10u\n"
3021 " vpi no conn:\t\t%10u\n"
3022 " vci out of range:\t\t%10u\n"
3023 " vci no conn:\t\t%10u\n",
3024 be32_to_cpu(fore200e->stats->atm.cells_transmitted),
3025 be32_to_cpu(fore200e->stats->atm.cells_received),
3026 be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
3027 be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
3028 be32_to_cpu(fore200e->stats->atm.vci_bad_range),
3029 be32_to_cpu(fore200e->stats->atm.vci_no_conn));
3030
3031 if (!left--)
3032 return sprintf(page,"\n"
3033 " AAL0:\t\t\t cells\n"
3034 " TX:\t\t\t%10u\n"
3035 " RX:\t\t\t%10u\n"
3036 " dropped:\t\t\t%10u\n",
3037 be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
3038 be32_to_cpu(fore200e->stats->aal0.cells_received),
3039 be32_to_cpu(fore200e->stats->aal0.cells_dropped));
3040
3041 if (!left--)
3042 return sprintf(page,"\n"
3043 " AAL3/4:\n"
3044 " SAR sublayer:\t\t cells\n"
3045 " TX:\t\t\t%10u\n"
3046 " RX:\t\t\t%10u\n"
3047 " dropped:\t\t\t%10u\n"
3048 " CRC errors:\t\t%10u\n"
3049 " protocol errors:\t\t%10u\n\n"
3050 " CS sublayer:\t\t PDUs\n"
3051 " TX:\t\t\t%10u\n"
3052 " RX:\t\t\t%10u\n"
3053 " dropped:\t\t\t%10u\n"
3054 " protocol errors:\t\t%10u\n",
3055 be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
3056 be32_to_cpu(fore200e->stats->aal34.cells_received),
3057 be32_to_cpu(fore200e->stats->aal34.cells_dropped),
3058 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
3059 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
3060 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
3061 be32_to_cpu(fore200e->stats->aal34.cspdus_received),
3062 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
3063 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
3064
3065 if (!left--)
3066 return sprintf(page,"\n"
3067 " AAL5:\n"
3068 " SAR sublayer:\t\t cells\n"
3069 " TX:\t\t\t%10u\n"
3070 " RX:\t\t\t%10u\n"
3071 " dropped:\t\t\t%10u\n"
3072 " congestions:\t\t%10u\n\n"
3073 " CS sublayer:\t\t PDUs\n"
3074 " TX:\t\t\t%10u\n"
3075 " RX:\t\t\t%10u\n"
3076 " dropped:\t\t\t%10u\n"
3077 " CRC errors:\t\t%10u\n"
3078 " protocol errors:\t\t%10u\n",
3079 be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
3080 be32_to_cpu(fore200e->stats->aal5.cells_received),
3081 be32_to_cpu(fore200e->stats->aal5.cells_dropped),
3082 be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
3083 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
3084 be32_to_cpu(fore200e->stats->aal5.cspdus_received),
3085 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
3086 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
3087 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
3088
3089 if (!left--)
3090 return sprintf(page,"\n"
3091 " AUX:\t\t allocation failures\n"
3092 " small b1:\t\t\t%10u\n"
3093 " large b1:\t\t\t%10u\n"
3094 " small b2:\t\t\t%10u\n"
3095 " large b2:\t\t\t%10u\n"
3096 " RX PDUs:\t\t\t%10u\n"
3097 " TX PDUs:\t\t\t%10lu\n",
3098 be32_to_cpu(fore200e->stats->aux.small_b1_failed),
3099 be32_to_cpu(fore200e->stats->aux.large_b1_failed),
3100 be32_to_cpu(fore200e->stats->aux.small_b2_failed),
3101 be32_to_cpu(fore200e->stats->aux.large_b2_failed),
3102 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
3103 fore200e->tx_sat);
3104
3105 if (!left--)
3106 return sprintf(page,"\n"
3107 " receive carrier:\t\t\t%s\n",
3108 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3109
3110 if (!left--) {
3111 return sprintf(page,"\n"
3112 " VCCs:\n address VPI VCI AAL "
3113 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3114 }
3115
3116 for (i = 0; i < NBR_CONNECT; i++) {
3117
3118 vcc = fore200e->vc_map[i].vcc;
3119
3120 if (vcc == NULL)
3121 continue;
3122
3123 spin_lock_irqsave(&fore200e->q_lock, flags);
3124
3125 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3126
3127 fore200e_vcc = FORE200E_VCC(vcc);
3128 ASSERT(fore200e_vcc);
3129
3130 len = sprintf(page,
3131 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3132 (u32)(unsigned long)vcc,
3133 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3134 fore200e_vcc->tx_pdu,
3135 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3136 fore200e_vcc->tx_max_pdu,
3137 fore200e_vcc->rx_pdu,
3138 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3139 fore200e_vcc->rx_max_pdu);
3140
3141 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3142 return len;
3143 }
3144
3145 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3146 }
3147
3148 return 0;
3149 }
3150
3151 module_init(fore200e_module_init);
3152 module_exit(fore200e_module_cleanup);
3153
3154
3155 static const struct atmdev_ops fore200e_ops =
3156 {
3157 .open = fore200e_open,
3158 .close = fore200e_close,
3159 .ioctl = fore200e_ioctl,
3160 .getsockopt = fore200e_getsockopt,
3161 .setsockopt = fore200e_setsockopt,
3162 .send = fore200e_send,
3163 .change_qos = fore200e_change_qos,
3164 .proc_read = fore200e_proc_read,
3165 .owner = THIS_MODULE
3166 };
3167
3168
3169 static const struct fore200e_bus fore200e_bus[] = {
3170 #ifdef CONFIG_PCI
3171 { "PCA-200E", "pca200e", 32, 4, 32,
3172 fore200e_pca_read,
3173 fore200e_pca_write,
3174 fore200e_pca_dma_map,
3175 fore200e_pca_dma_unmap,
3176 fore200e_pca_dma_sync_for_cpu,
3177 fore200e_pca_dma_sync_for_device,
3178 fore200e_pca_dma_chunk_alloc,
3179 fore200e_pca_dma_chunk_free,
3180 NULL,
3181 fore200e_pca_configure,
3182 fore200e_pca_map,
3183 fore200e_pca_reset,
3184 fore200e_pca_prom_read,
3185 fore200e_pca_unmap,
3186 NULL,
3187 fore200e_pca_irq_check,
3188 fore200e_pca_irq_ack,
3189 fore200e_pca_proc_read,
3190 },
3191 #endif
3192 #ifdef CONFIG_SBUS
3193 { "SBA-200E", "sba200e", 32, 64, 32,
3194 fore200e_sba_read,
3195 fore200e_sba_write,
3196 fore200e_sba_dma_map,
3197 fore200e_sba_dma_unmap,
3198 fore200e_sba_dma_sync_for_cpu,
3199 fore200e_sba_dma_sync_for_device,
3200 fore200e_sba_dma_chunk_alloc,
3201 fore200e_sba_dma_chunk_free,
3202 fore200e_sba_detect,
3203 fore200e_sba_configure,
3204 fore200e_sba_map,
3205 fore200e_sba_reset,
3206 fore200e_sba_prom_read,
3207 fore200e_sba_unmap,
3208 fore200e_sba_irq_enable,
3209 fore200e_sba_irq_check,
3210 fore200e_sba_irq_ack,
3211 fore200e_sba_proc_read,
3212 },
3213 #endif
3214 {}
3215 };
3216
3217 MODULE_LICENSE("GPL");
3218 #ifdef CONFIG_PCI
3219 #ifdef __LITTLE_ENDIAN__
3220 MODULE_FIRMWARE("pca200e.bin");
3221 #else
3222 MODULE_FIRMWARE("pca200e_ecd.bin2");
3223 #endif
3224 #endif /* CONFIG_PCI */
3225 #ifdef CONFIG_SBUS
3226 MODULE_FIRMWARE("sba200e_ecd.bin2");
3227 #endif
This page took 0.142873 seconds and 5 git commands to generate.