Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-serial
[deliverable/linux.git] / drivers / atm / fore200e.c
1 /*
2 $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $
3
4 A FORE Systems 200E-series driver for ATM on Linux.
5 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
6
7 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
8
9 This driver simultaneously supports PCA-200E and SBA-200E adapters
10 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27
28 #include <linux/config.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/capability.h>
33 #include <linux/sched.h>
34 #include <linux/interrupt.h>
35 #include <linux/bitops.h>
36 #include <linux/pci.h>
37 #include <linux/module.h>
38 #include <linux/atmdev.h>
39 #include <linux/sonet.h>
40 #include <linux/atm_suni.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/delay.h>
43 #include <asm/io.h>
44 #include <asm/string.h>
45 #include <asm/page.h>
46 #include <asm/irq.h>
47 #include <asm/dma.h>
48 #include <asm/byteorder.h>
49 #include <asm/uaccess.h>
50 #include <asm/atomic.h>
51
52 #ifdef CONFIG_ATM_FORE200E_SBA
53 #include <asm/idprom.h>
54 #include <asm/sbus.h>
55 #include <asm/openprom.h>
56 #include <asm/oplib.h>
57 #include <asm/pgtable.h>
58 #endif
59
60 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
61 #define FORE200E_USE_TASKLET
62 #endif
63
64 #if 0 /* enable the debugging code of the buffer supply queues */
65 #define FORE200E_BSQ_DEBUG
66 #endif
67
68 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
69 #define FORE200E_52BYTE_AAL0_SDU
70 #endif
71
72 #include "fore200e.h"
73 #include "suni.h"
74
75 #define FORE200E_VERSION "0.3e"
76
77 #define FORE200E "fore200e: "
78
79 #if 0 /* override .config */
80 #define CONFIG_ATM_FORE200E_DEBUG 1
81 #endif
82 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
83 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
84 printk(FORE200E format, ##args); } while (0)
85 #else
86 #define DPRINTK(level, format, args...) do {} while (0)
87 #endif
88
89
90 #define FORE200E_ALIGN(addr, alignment) \
91 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
92
93 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
94
95 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
96
97 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
98
99 #if 1
100 #define ASSERT(expr) if (!(expr)) { \
101 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
102 __FUNCTION__, __LINE__, #expr); \
103 panic(FORE200E "%s", __FUNCTION__); \
104 }
105 #else
106 #define ASSERT(expr) do {} while (0)
107 #endif
108
109
110 static const struct atmdev_ops fore200e_ops;
111 static const struct fore200e_bus fore200e_bus[];
112
113 static LIST_HEAD(fore200e_boards);
114
115
116 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
117 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
118 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
119
120
121 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
122 { BUFFER_S1_NBR, BUFFER_L1_NBR },
123 { BUFFER_S2_NBR, BUFFER_L2_NBR }
124 };
125
126 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
127 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
128 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
129 };
130
131
132 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
133 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
134 #endif
135
136
137 #if 0 /* currently unused */
138 static int
139 fore200e_fore2atm_aal(enum fore200e_aal aal)
140 {
141 switch(aal) {
142 case FORE200E_AAL0: return ATM_AAL0;
143 case FORE200E_AAL34: return ATM_AAL34;
144 case FORE200E_AAL5: return ATM_AAL5;
145 }
146
147 return -EINVAL;
148 }
149 #endif
150
151
152 static enum fore200e_aal
153 fore200e_atm2fore_aal(int aal)
154 {
155 switch(aal) {
156 case ATM_AAL0: return FORE200E_AAL0;
157 case ATM_AAL34: return FORE200E_AAL34;
158 case ATM_AAL1:
159 case ATM_AAL2:
160 case ATM_AAL5: return FORE200E_AAL5;
161 }
162
163 return -EINVAL;
164 }
165
166
167 static char*
168 fore200e_irq_itoa(int irq)
169 {
170 static char str[8];
171 sprintf(str, "%d", irq);
172 return str;
173 }
174
175
176 static void*
177 fore200e_kmalloc(int size, gfp_t flags)
178 {
179 void *chunk = kzalloc(size, flags);
180
181 if (!chunk)
182 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
183
184 return chunk;
185 }
186
187
188 static void
189 fore200e_kfree(void* chunk)
190 {
191 kfree(chunk);
192 }
193
194
195 /* allocate and align a chunk of memory intended to hold the data behing exchanged
196 between the driver and the adapter (using streaming DVMA) */
197
198 static int
199 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
200 {
201 unsigned long offset = 0;
202
203 if (alignment <= sizeof(int))
204 alignment = 0;
205
206 chunk->alloc_size = size + alignment;
207 chunk->align_size = size;
208 chunk->direction = direction;
209
210 chunk->alloc_addr = fore200e_kmalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
211 if (chunk->alloc_addr == NULL)
212 return -ENOMEM;
213
214 if (alignment > 0)
215 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
216
217 chunk->align_addr = chunk->alloc_addr + offset;
218
219 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
220
221 return 0;
222 }
223
224
225 /* free a chunk of memory */
226
227 static void
228 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
229 {
230 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
231
232 fore200e_kfree(chunk->alloc_addr);
233 }
234
235
236 static void
237 fore200e_spin(int msecs)
238 {
239 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
240 while (time_before(jiffies, timeout));
241 }
242
243
244 static int
245 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
246 {
247 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
248 int ok;
249
250 mb();
251 do {
252 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
253 break;
254
255 } while (time_before(jiffies, timeout));
256
257 #if 1
258 if (!ok) {
259 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
260 *addr, val);
261 }
262 #endif
263
264 return ok;
265 }
266
267
268 static int
269 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
270 {
271 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
272 int ok;
273
274 do {
275 if ((ok = (fore200e->bus->read(addr) == val)))
276 break;
277
278 } while (time_before(jiffies, timeout));
279
280 #if 1
281 if (!ok) {
282 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
283 fore200e->bus->read(addr), val);
284 }
285 #endif
286
287 return ok;
288 }
289
290
291 static void
292 fore200e_free_rx_buf(struct fore200e* fore200e)
293 {
294 int scheme, magn, nbr;
295 struct buffer* buffer;
296
297 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
298 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
299
300 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
301
302 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
303
304 struct chunk* data = &buffer[ nbr ].data;
305
306 if (data->alloc_addr != NULL)
307 fore200e_chunk_free(fore200e, data);
308 }
309 }
310 }
311 }
312 }
313
314
315 static void
316 fore200e_uninit_bs_queue(struct fore200e* fore200e)
317 {
318 int scheme, magn;
319
320 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
321 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
322
323 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
324 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
325
326 if (status->alloc_addr)
327 fore200e->bus->dma_chunk_free(fore200e, status);
328
329 if (rbd_block->alloc_addr)
330 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
331 }
332 }
333 }
334
335
336 static int
337 fore200e_reset(struct fore200e* fore200e, int diag)
338 {
339 int ok;
340
341 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
342
343 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
344
345 fore200e->bus->reset(fore200e);
346
347 if (diag) {
348 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
349 if (ok == 0) {
350
351 printk(FORE200E "device %s self-test failed\n", fore200e->name);
352 return -ENODEV;
353 }
354
355 printk(FORE200E "device %s self-test passed\n", fore200e->name);
356
357 fore200e->state = FORE200E_STATE_RESET;
358 }
359
360 return 0;
361 }
362
363
364 static void
365 fore200e_shutdown(struct fore200e* fore200e)
366 {
367 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
368 fore200e->name, fore200e->phys_base,
369 fore200e_irq_itoa(fore200e->irq));
370
371 if (fore200e->state > FORE200E_STATE_RESET) {
372 /* first, reset the board to prevent further interrupts or data transfers */
373 fore200e_reset(fore200e, 0);
374 }
375
376 /* then, release all allocated resources */
377 switch(fore200e->state) {
378
379 case FORE200E_STATE_COMPLETE:
380 kfree(fore200e->stats);
381
382 case FORE200E_STATE_IRQ:
383 free_irq(fore200e->irq, fore200e->atm_dev);
384
385 case FORE200E_STATE_ALLOC_BUF:
386 fore200e_free_rx_buf(fore200e);
387
388 case FORE200E_STATE_INIT_BSQ:
389 fore200e_uninit_bs_queue(fore200e);
390
391 case FORE200E_STATE_INIT_RXQ:
392 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
393 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
394
395 case FORE200E_STATE_INIT_TXQ:
396 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
397 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
398
399 case FORE200E_STATE_INIT_CMDQ:
400 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
401
402 case FORE200E_STATE_INITIALIZE:
403 /* nothing to do for that state */
404
405 case FORE200E_STATE_START_FW:
406 /* nothing to do for that state */
407
408 case FORE200E_STATE_LOAD_FW:
409 /* nothing to do for that state */
410
411 case FORE200E_STATE_RESET:
412 /* nothing to do for that state */
413
414 case FORE200E_STATE_MAP:
415 fore200e->bus->unmap(fore200e);
416
417 case FORE200E_STATE_CONFIGURE:
418 /* nothing to do for that state */
419
420 case FORE200E_STATE_REGISTER:
421 /* XXX shouldn't we *start* by deregistering the device? */
422 atm_dev_deregister(fore200e->atm_dev);
423
424 case FORE200E_STATE_BLANK:
425 /* nothing to do for that state */
426 break;
427 }
428 }
429
430
431 #ifdef CONFIG_ATM_FORE200E_PCA
432
433 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
434 {
435 /* on big-endian hosts, the board is configured to convert
436 the endianess of slave RAM accesses */
437 return le32_to_cpu(readl(addr));
438 }
439
440
441 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
442 {
443 /* on big-endian hosts, the board is configured to convert
444 the endianess of slave RAM accesses */
445 writel(cpu_to_le32(val), addr);
446 }
447
448
449 static u32
450 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
451 {
452 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
453
454 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
455 virt_addr, size, direction, dma_addr);
456
457 return dma_addr;
458 }
459
460
461 static void
462 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
463 {
464 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
465 dma_addr, size, direction);
466
467 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
468 }
469
470
471 static void
472 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
473 {
474 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
475
476 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
477 }
478
479 static void
480 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
481 {
482 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
483
484 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
485 }
486
487
488 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
489 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
490
491 static int
492 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
493 int size, int nbr, int alignment)
494 {
495 /* returned chunks are page-aligned */
496 chunk->alloc_size = size * nbr;
497 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
498 chunk->alloc_size,
499 &chunk->dma_addr);
500
501 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
502 return -ENOMEM;
503
504 chunk->align_addr = chunk->alloc_addr;
505
506 return 0;
507 }
508
509
510 /* free a DMA consistent chunk of memory */
511
512 static void
513 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
514 {
515 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
516 chunk->alloc_size,
517 chunk->alloc_addr,
518 chunk->dma_addr);
519 }
520
521
522 static int
523 fore200e_pca_irq_check(struct fore200e* fore200e)
524 {
525 /* this is a 1 bit register */
526 int irq_posted = readl(fore200e->regs.pca.psr);
527
528 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
529 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
530 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
531 }
532 #endif
533
534 return irq_posted;
535 }
536
537
538 static void
539 fore200e_pca_irq_ack(struct fore200e* fore200e)
540 {
541 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
542 }
543
544
545 static void
546 fore200e_pca_reset(struct fore200e* fore200e)
547 {
548 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
549 fore200e_spin(10);
550 writel(0, fore200e->regs.pca.hcr);
551 }
552
553
554 static int __devinit
555 fore200e_pca_map(struct fore200e* fore200e)
556 {
557 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
558
559 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
560
561 if (fore200e->virt_base == NULL) {
562 printk(FORE200E "can't map device %s\n", fore200e->name);
563 return -EFAULT;
564 }
565
566 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
567
568 /* gain access to the PCA specific registers */
569 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
570 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
571 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
572
573 fore200e->state = FORE200E_STATE_MAP;
574 return 0;
575 }
576
577
578 static void
579 fore200e_pca_unmap(struct fore200e* fore200e)
580 {
581 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
582
583 if (fore200e->virt_base != NULL)
584 iounmap(fore200e->virt_base);
585 }
586
587
588 static int __devinit
589 fore200e_pca_configure(struct fore200e* fore200e)
590 {
591 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
592 u8 master_ctrl, latency;
593
594 DPRINTK(2, "device %s being configured\n", fore200e->name);
595
596 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
597 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
598 return -EIO;
599 }
600
601 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
602
603 master_ctrl = master_ctrl
604 #if defined(__BIG_ENDIAN)
605 /* request the PCA board to convert the endianess of slave RAM accesses */
606 | PCA200E_CTRL_CONVERT_ENDIAN
607 #endif
608 #if 0
609 | PCA200E_CTRL_DIS_CACHE_RD
610 | PCA200E_CTRL_DIS_WRT_INVAL
611 | PCA200E_CTRL_ENA_CONT_REQ_MODE
612 | PCA200E_CTRL_2_CACHE_WRT_INVAL
613 #endif
614 | PCA200E_CTRL_LARGE_PCI_BURSTS;
615
616 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
617
618 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
619 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
620 this may impact the performances of other PCI devices on the same bus, though */
621 latency = 192;
622 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
623
624 fore200e->state = FORE200E_STATE_CONFIGURE;
625 return 0;
626 }
627
628
629 static int __init
630 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
631 {
632 struct host_cmdq* cmdq = &fore200e->host_cmdq;
633 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
634 struct prom_opcode opcode;
635 int ok;
636 u32 prom_dma;
637
638 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
639
640 opcode.opcode = OPCODE_GET_PROM;
641 opcode.pad = 0;
642
643 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
644
645 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
646
647 *entry->status = STATUS_PENDING;
648
649 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
650
651 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
652
653 *entry->status = STATUS_FREE;
654
655 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
656
657 if (ok == 0) {
658 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
659 return -EIO;
660 }
661
662 #if defined(__BIG_ENDIAN)
663
664 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
665
666 /* MAC address is stored as little-endian */
667 swap_here(&prom->mac_addr[0]);
668 swap_here(&prom->mac_addr[4]);
669 #endif
670
671 return 0;
672 }
673
674
675 static int
676 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
677 {
678 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
679
680 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
681 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
682 }
683
684 #endif /* CONFIG_ATM_FORE200E_PCA */
685
686
687 #ifdef CONFIG_ATM_FORE200E_SBA
688
689 static u32
690 fore200e_sba_read(volatile u32 __iomem *addr)
691 {
692 return sbus_readl(addr);
693 }
694
695
696 static void
697 fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
698 {
699 sbus_writel(val, addr);
700 }
701
702
703 static u32
704 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
705 {
706 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
707
708 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
709 virt_addr, size, direction, dma_addr);
710
711 return dma_addr;
712 }
713
714
715 static void
716 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
717 {
718 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
719 dma_addr, size, direction);
720
721 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
722 }
723
724
725 static void
726 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
727 {
728 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
729
730 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
731 }
732
733 static void
734 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
735 {
736 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
737
738 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
739 }
740
741
742 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
743 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
744
745 static int
746 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
747 int size, int nbr, int alignment)
748 {
749 chunk->alloc_size = chunk->align_size = size * nbr;
750
751 /* returned chunks are page-aligned */
752 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
753 chunk->alloc_size,
754 &chunk->dma_addr);
755
756 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
757 return -ENOMEM;
758
759 chunk->align_addr = chunk->alloc_addr;
760
761 return 0;
762 }
763
764
765 /* free a DVMA consistent chunk of memory */
766
767 static void
768 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
769 {
770 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
771 chunk->alloc_size,
772 chunk->alloc_addr,
773 chunk->dma_addr);
774 }
775
776
777 static void
778 fore200e_sba_irq_enable(struct fore200e* fore200e)
779 {
780 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
781 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
782 }
783
784
785 static int
786 fore200e_sba_irq_check(struct fore200e* fore200e)
787 {
788 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
789 }
790
791
792 static void
793 fore200e_sba_irq_ack(struct fore200e* fore200e)
794 {
795 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
796 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
797 }
798
799
800 static void
801 fore200e_sba_reset(struct fore200e* fore200e)
802 {
803 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
804 fore200e_spin(10);
805 fore200e->bus->write(0, fore200e->regs.sba.hcr);
806 }
807
808
809 static int __init
810 fore200e_sba_map(struct fore200e* fore200e)
811 {
812 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
813 unsigned int bursts;
814
815 /* gain access to the SBA specific registers */
816 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
817 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
818 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
819 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
820
821 if (fore200e->virt_base == NULL) {
822 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
823 return -EFAULT;
824 }
825
826 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
827
828 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
829
830 /* get the supported DVMA burst sizes */
831 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
832
833 if (sbus_can_dma_64bit(sbus_dev))
834 sbus_set_sbus64(sbus_dev, bursts);
835
836 fore200e->state = FORE200E_STATE_MAP;
837 return 0;
838 }
839
840
841 static void
842 fore200e_sba_unmap(struct fore200e* fore200e)
843 {
844 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
845 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
846 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
847 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
848 }
849
850
851 static int __init
852 fore200e_sba_configure(struct fore200e* fore200e)
853 {
854 fore200e->state = FORE200E_STATE_CONFIGURE;
855 return 0;
856 }
857
858
859 static struct fore200e* __init
860 fore200e_sba_detect(const struct fore200e_bus* bus, int index)
861 {
862 struct fore200e* fore200e;
863 struct sbus_bus* sbus_bus;
864 struct sbus_dev* sbus_dev = NULL;
865
866 unsigned int count = 0;
867
868 for_each_sbus (sbus_bus) {
869 for_each_sbusdev (sbus_dev, sbus_bus) {
870 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
871 if (count >= index)
872 goto found;
873 count++;
874 }
875 }
876 }
877 return NULL;
878
879 found:
880 if (sbus_dev->num_registers != 4) {
881 printk(FORE200E "this %s device has %d instead of 4 registers\n",
882 bus->model_name, sbus_dev->num_registers);
883 return NULL;
884 }
885
886 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
887 if (fore200e == NULL)
888 return NULL;
889
890 fore200e->bus = bus;
891 fore200e->bus_dev = sbus_dev;
892 fore200e->irq = sbus_dev->irqs[ 0 ];
893
894 fore200e->phys_base = (unsigned long)sbus_dev;
895
896 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
897
898 return fore200e;
899 }
900
901
902 static int __init
903 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
904 {
905 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
906 int len;
907
908 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
909 if (len < 0)
910 return -EBUSY;
911
912 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
913 if (len < 0)
914 return -EBUSY;
915
916 prom_getproperty(sbus_dev->prom_node, "serialnumber",
917 (char*)&prom->serial_number, sizeof(prom->serial_number));
918
919 prom_getproperty(sbus_dev->prom_node, "promversion",
920 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
921
922 return 0;
923 }
924
925
926 static int
927 fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
928 {
929 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
930
931 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
932 }
933 #endif /* CONFIG_ATM_FORE200E_SBA */
934
935
936 static void
937 fore200e_tx_irq(struct fore200e* fore200e)
938 {
939 struct host_txq* txq = &fore200e->host_txq;
940 struct host_txq_entry* entry;
941 struct atm_vcc* vcc;
942 struct fore200e_vc_map* vc_map;
943
944 if (fore200e->host_txq.txing == 0)
945 return;
946
947 for (;;) {
948
949 entry = &txq->host_entry[ txq->tail ];
950
951 if ((*entry->status & STATUS_COMPLETE) == 0) {
952 break;
953 }
954
955 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
956 entry, txq->tail, entry->vc_map, entry->skb);
957
958 /* free copy of misaligned data */
959 kfree(entry->data);
960
961 /* remove DMA mapping */
962 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
963 DMA_TO_DEVICE);
964
965 vc_map = entry->vc_map;
966
967 /* vcc closed since the time the entry was submitted for tx? */
968 if ((vc_map->vcc == NULL) ||
969 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
970
971 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
972 fore200e->atm_dev->number);
973
974 dev_kfree_skb_any(entry->skb);
975 }
976 else {
977 ASSERT(vc_map->vcc);
978
979 /* vcc closed then immediately re-opened? */
980 if (vc_map->incarn != entry->incarn) {
981
982 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
983 if the same vcc is immediately re-opened, those pending PDUs must
984 not be popped after the completion of their emission, as they refer
985 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
986 would be decremented by the size of the (unrelated) skb, possibly
987 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
988 we thus bind the tx entry to the current incarnation of the vcc
989 when the entry is submitted for tx. When the tx later completes,
990 if the incarnation number of the tx entry does not match the one
991 of the vcc, then this implies that the vcc has been closed then re-opened.
992 we thus just drop the skb here. */
993
994 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
995 fore200e->atm_dev->number);
996
997 dev_kfree_skb_any(entry->skb);
998 }
999 else {
1000 vcc = vc_map->vcc;
1001 ASSERT(vcc);
1002
1003 /* notify tx completion */
1004 if (vcc->pop) {
1005 vcc->pop(vcc, entry->skb);
1006 }
1007 else {
1008 dev_kfree_skb_any(entry->skb);
1009 }
1010 #if 1
1011 /* race fixed by the above incarnation mechanism, but... */
1012 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
1013 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
1014 }
1015 #endif
1016 /* check error condition */
1017 if (*entry->status & STATUS_ERROR)
1018 atomic_inc(&vcc->stats->tx_err);
1019 else
1020 atomic_inc(&vcc->stats->tx);
1021 }
1022 }
1023
1024 *entry->status = STATUS_FREE;
1025
1026 fore200e->host_txq.txing--;
1027
1028 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1029 }
1030 }
1031
1032
1033 #ifdef FORE200E_BSQ_DEBUG
1034 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1035 {
1036 struct buffer* buffer;
1037 int count = 0;
1038
1039 buffer = bsq->freebuf;
1040 while (buffer) {
1041
1042 if (buffer->supplied) {
1043 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1044 where, scheme, magn, buffer->index);
1045 }
1046
1047 if (buffer->magn != magn) {
1048 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1049 where, scheme, magn, buffer->index, buffer->magn);
1050 }
1051
1052 if (buffer->scheme != scheme) {
1053 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1054 where, scheme, magn, buffer->index, buffer->scheme);
1055 }
1056
1057 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1058 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1059 where, scheme, magn, buffer->index);
1060 }
1061
1062 count++;
1063 buffer = buffer->next;
1064 }
1065
1066 if (count != bsq->freebuf_count) {
1067 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1068 where, scheme, magn, count, bsq->freebuf_count);
1069 }
1070 return 0;
1071 }
1072 #endif
1073
1074
1075 static void
1076 fore200e_supply(struct fore200e* fore200e)
1077 {
1078 int scheme, magn, i;
1079
1080 struct host_bsq* bsq;
1081 struct host_bsq_entry* entry;
1082 struct buffer* buffer;
1083
1084 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1085 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1086
1087 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1088
1089 #ifdef FORE200E_BSQ_DEBUG
1090 bsq_audit(1, bsq, scheme, magn);
1091 #endif
1092 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1093
1094 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1095 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1096
1097 entry = &bsq->host_entry[ bsq->head ];
1098
1099 for (i = 0; i < RBD_BLK_SIZE; i++) {
1100
1101 /* take the first buffer in the free buffer list */
1102 buffer = bsq->freebuf;
1103 if (!buffer) {
1104 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1105 scheme, magn, bsq->freebuf_count);
1106 return;
1107 }
1108 bsq->freebuf = buffer->next;
1109
1110 #ifdef FORE200E_BSQ_DEBUG
1111 if (buffer->supplied)
1112 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1113 scheme, magn, buffer->index);
1114 buffer->supplied = 1;
1115 #endif
1116 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1117 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1118 }
1119
1120 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1121
1122 /* decrease accordingly the number of free rx buffers */
1123 bsq->freebuf_count -= RBD_BLK_SIZE;
1124
1125 *entry->status = STATUS_PENDING;
1126 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1127 }
1128 }
1129 }
1130 }
1131
1132
1133 static int
1134 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1135 {
1136 struct sk_buff* skb;
1137 struct buffer* buffer;
1138 struct fore200e_vcc* fore200e_vcc;
1139 int i, pdu_len = 0;
1140 #ifdef FORE200E_52BYTE_AAL0_SDU
1141 u32 cell_header = 0;
1142 #endif
1143
1144 ASSERT(vcc);
1145
1146 fore200e_vcc = FORE200E_VCC(vcc);
1147 ASSERT(fore200e_vcc);
1148
1149 #ifdef FORE200E_52BYTE_AAL0_SDU
1150 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1151
1152 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1153 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1154 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1155 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1156 rpd->atm_header.clp;
1157 pdu_len = 4;
1158 }
1159 #endif
1160
1161 /* compute total PDU length */
1162 for (i = 0; i < rpd->nseg; i++)
1163 pdu_len += rpd->rsd[ i ].length;
1164
1165 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1166 if (skb == NULL) {
1167 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1168
1169 atomic_inc(&vcc->stats->rx_drop);
1170 return -ENOMEM;
1171 }
1172
1173 __net_timestamp(skb);
1174
1175 #ifdef FORE200E_52BYTE_AAL0_SDU
1176 if (cell_header) {
1177 *((u32*)skb_put(skb, 4)) = cell_header;
1178 }
1179 #endif
1180
1181 /* reassemble segments */
1182 for (i = 0; i < rpd->nseg; i++) {
1183
1184 /* rebuild rx buffer address from rsd handle */
1185 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1186
1187 /* Make device DMA transfer visible to CPU. */
1188 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1189
1190 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1191
1192 /* Now let the device get at it again. */
1193 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1194 }
1195
1196 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1197
1198 if (pdu_len < fore200e_vcc->rx_min_pdu)
1199 fore200e_vcc->rx_min_pdu = pdu_len;
1200 if (pdu_len > fore200e_vcc->rx_max_pdu)
1201 fore200e_vcc->rx_max_pdu = pdu_len;
1202 fore200e_vcc->rx_pdu++;
1203
1204 /* push PDU */
1205 if (atm_charge(vcc, skb->truesize) == 0) {
1206
1207 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1208 vcc->itf, vcc->vpi, vcc->vci);
1209
1210 dev_kfree_skb_any(skb);
1211
1212 atomic_inc(&vcc->stats->rx_drop);
1213 return -ENOMEM;
1214 }
1215
1216 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1217
1218 vcc->push(vcc, skb);
1219 atomic_inc(&vcc->stats->rx);
1220
1221 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1222
1223 return 0;
1224 }
1225
1226
1227 static void
1228 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1229 {
1230 struct host_bsq* bsq;
1231 struct buffer* buffer;
1232 int i;
1233
1234 for (i = 0; i < rpd->nseg; i++) {
1235
1236 /* rebuild rx buffer address from rsd handle */
1237 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1238
1239 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1240
1241 #ifdef FORE200E_BSQ_DEBUG
1242 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1243
1244 if (buffer->supplied == 0)
1245 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1246 buffer->scheme, buffer->magn, buffer->index);
1247 buffer->supplied = 0;
1248 #endif
1249
1250 /* re-insert the buffer into the free buffer list */
1251 buffer->next = bsq->freebuf;
1252 bsq->freebuf = buffer;
1253
1254 /* then increment the number of free rx buffers */
1255 bsq->freebuf_count++;
1256 }
1257 }
1258
1259
1260 static void
1261 fore200e_rx_irq(struct fore200e* fore200e)
1262 {
1263 struct host_rxq* rxq = &fore200e->host_rxq;
1264 struct host_rxq_entry* entry;
1265 struct atm_vcc* vcc;
1266 struct fore200e_vc_map* vc_map;
1267
1268 for (;;) {
1269
1270 entry = &rxq->host_entry[ rxq->head ];
1271
1272 /* no more received PDUs */
1273 if ((*entry->status & STATUS_COMPLETE) == 0)
1274 break;
1275
1276 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1277
1278 if ((vc_map->vcc == NULL) ||
1279 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1280
1281 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1282 fore200e->atm_dev->number,
1283 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1284 }
1285 else {
1286 vcc = vc_map->vcc;
1287 ASSERT(vcc);
1288
1289 if ((*entry->status & STATUS_ERROR) == 0) {
1290
1291 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1292 }
1293 else {
1294 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1295 fore200e->atm_dev->number,
1296 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1297 atomic_inc(&vcc->stats->rx_err);
1298 }
1299 }
1300
1301 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1302
1303 fore200e_collect_rpd(fore200e, entry->rpd);
1304
1305 /* rewrite the rpd address to ack the received PDU */
1306 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1307 *entry->status = STATUS_FREE;
1308
1309 fore200e_supply(fore200e);
1310 }
1311 }
1312
1313
1314 #ifndef FORE200E_USE_TASKLET
1315 static void
1316 fore200e_irq(struct fore200e* fore200e)
1317 {
1318 unsigned long flags;
1319
1320 spin_lock_irqsave(&fore200e->q_lock, flags);
1321 fore200e_rx_irq(fore200e);
1322 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1323
1324 spin_lock_irqsave(&fore200e->q_lock, flags);
1325 fore200e_tx_irq(fore200e);
1326 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1327 }
1328 #endif
1329
1330
1331 static irqreturn_t
1332 fore200e_interrupt(int irq, void* dev, struct pt_regs* regs)
1333 {
1334 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1335
1336 if (fore200e->bus->irq_check(fore200e) == 0) {
1337
1338 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1339 return IRQ_NONE;
1340 }
1341 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1342
1343 #ifdef FORE200E_USE_TASKLET
1344 tasklet_schedule(&fore200e->tx_tasklet);
1345 tasklet_schedule(&fore200e->rx_tasklet);
1346 #else
1347 fore200e_irq(fore200e);
1348 #endif
1349
1350 fore200e->bus->irq_ack(fore200e);
1351 return IRQ_HANDLED;
1352 }
1353
1354
1355 #ifdef FORE200E_USE_TASKLET
1356 static void
1357 fore200e_tx_tasklet(unsigned long data)
1358 {
1359 struct fore200e* fore200e = (struct fore200e*) data;
1360 unsigned long flags;
1361
1362 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1363
1364 spin_lock_irqsave(&fore200e->q_lock, flags);
1365 fore200e_tx_irq(fore200e);
1366 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1367 }
1368
1369
1370 static void
1371 fore200e_rx_tasklet(unsigned long data)
1372 {
1373 struct fore200e* fore200e = (struct fore200e*) data;
1374 unsigned long flags;
1375
1376 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1377
1378 spin_lock_irqsave(&fore200e->q_lock, flags);
1379 fore200e_rx_irq((struct fore200e*) data);
1380 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1381 }
1382 #endif
1383
1384
1385 static int
1386 fore200e_select_scheme(struct atm_vcc* vcc)
1387 {
1388 /* fairly balance the VCs over (identical) buffer schemes */
1389 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1390
1391 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1392 vcc->itf, vcc->vpi, vcc->vci, scheme);
1393
1394 return scheme;
1395 }
1396
1397
1398 static int
1399 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1400 {
1401 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1402 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1403 struct activate_opcode activ_opcode;
1404 struct deactivate_opcode deactiv_opcode;
1405 struct vpvc vpvc;
1406 int ok;
1407 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1408
1409 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1410
1411 if (activate) {
1412 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1413
1414 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1415 activ_opcode.aal = aal;
1416 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1417 activ_opcode.pad = 0;
1418 }
1419 else {
1420 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1421 deactiv_opcode.pad = 0;
1422 }
1423
1424 vpvc.vci = vcc->vci;
1425 vpvc.vpi = vcc->vpi;
1426
1427 *entry->status = STATUS_PENDING;
1428
1429 if (activate) {
1430
1431 #ifdef FORE200E_52BYTE_AAL0_SDU
1432 mtu = 48;
1433 #endif
1434 /* the MTU is not used by the cp, except in the case of AAL0 */
1435 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1436 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1437 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1438 }
1439 else {
1440 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1441 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1442 }
1443
1444 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1445
1446 *entry->status = STATUS_FREE;
1447
1448 if (ok == 0) {
1449 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1450 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1451 return -EIO;
1452 }
1453
1454 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1455 activate ? "open" : "clos");
1456
1457 return 0;
1458 }
1459
1460
1461 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1462
1463 static void
1464 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1465 {
1466 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1467
1468 /* compute the data cells to idle cells ratio from the tx PCR */
1469 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1470 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1471 }
1472 else {
1473 /* disable rate control */
1474 rate->data_cells = rate->idle_cells = 0;
1475 }
1476 }
1477
1478
1479 static int
1480 fore200e_open(struct atm_vcc *vcc)
1481 {
1482 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1483 struct fore200e_vcc* fore200e_vcc;
1484 struct fore200e_vc_map* vc_map;
1485 unsigned long flags;
1486 int vci = vcc->vci;
1487 short vpi = vcc->vpi;
1488
1489 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1490 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1491
1492 spin_lock_irqsave(&fore200e->q_lock, flags);
1493
1494 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1495 if (vc_map->vcc) {
1496
1497 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1498
1499 printk(FORE200E "VC %d.%d.%d already in use\n",
1500 fore200e->atm_dev->number, vpi, vci);
1501
1502 return -EINVAL;
1503 }
1504
1505 vc_map->vcc = vcc;
1506
1507 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1508
1509 fore200e_vcc = fore200e_kmalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1510 if (fore200e_vcc == NULL) {
1511 vc_map->vcc = NULL;
1512 return -ENOMEM;
1513 }
1514
1515 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1516 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1517 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1518 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1519 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1520 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1521 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1522
1523 /* pseudo-CBR bandwidth requested? */
1524 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1525
1526 down(&fore200e->rate_sf);
1527 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1528 up(&fore200e->rate_sf);
1529
1530 fore200e_kfree(fore200e_vcc);
1531 vc_map->vcc = NULL;
1532 return -EAGAIN;
1533 }
1534
1535 /* reserve bandwidth */
1536 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1537 up(&fore200e->rate_sf);
1538 }
1539
1540 vcc->itf = vcc->dev->number;
1541
1542 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1543 set_bit(ATM_VF_ADDR, &vcc->flags);
1544
1545 vcc->dev_data = fore200e_vcc;
1546
1547 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1548
1549 vc_map->vcc = NULL;
1550
1551 clear_bit(ATM_VF_ADDR, &vcc->flags);
1552 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1553
1554 vcc->dev_data = NULL;
1555
1556 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1557
1558 fore200e_kfree(fore200e_vcc);
1559 return -EINVAL;
1560 }
1561
1562 /* compute rate control parameters */
1563 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1564
1565 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1566 set_bit(ATM_VF_HASQOS, &vcc->flags);
1567
1568 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1569 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1570 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1571 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1572 }
1573
1574 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1575 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1576 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1577
1578 /* new incarnation of the vcc */
1579 vc_map->incarn = ++fore200e->incarn_count;
1580
1581 /* VC unusable before this flag is set */
1582 set_bit(ATM_VF_READY, &vcc->flags);
1583
1584 return 0;
1585 }
1586
1587
1588 static void
1589 fore200e_close(struct atm_vcc* vcc)
1590 {
1591 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1592 struct fore200e_vcc* fore200e_vcc;
1593 struct fore200e_vc_map* vc_map;
1594 unsigned long flags;
1595
1596 ASSERT(vcc);
1597 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1598 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1599
1600 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1601
1602 clear_bit(ATM_VF_READY, &vcc->flags);
1603
1604 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1605
1606 spin_lock_irqsave(&fore200e->q_lock, flags);
1607
1608 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1609
1610 /* the vc is no longer considered as "in use" by fore200e_open() */
1611 vc_map->vcc = NULL;
1612
1613 vcc->itf = vcc->vci = vcc->vpi = 0;
1614
1615 fore200e_vcc = FORE200E_VCC(vcc);
1616 vcc->dev_data = NULL;
1617
1618 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1619
1620 /* release reserved bandwidth, if any */
1621 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1622
1623 down(&fore200e->rate_sf);
1624 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1625 up(&fore200e->rate_sf);
1626
1627 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1628 }
1629
1630 clear_bit(ATM_VF_ADDR, &vcc->flags);
1631 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1632
1633 ASSERT(fore200e_vcc);
1634 fore200e_kfree(fore200e_vcc);
1635 }
1636
1637
1638 static int
1639 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1640 {
1641 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1642 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1643 struct fore200e_vc_map* vc_map;
1644 struct host_txq* txq = &fore200e->host_txq;
1645 struct host_txq_entry* entry;
1646 struct tpd* tpd;
1647 struct tpd_haddr tpd_haddr;
1648 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1649 int tx_copy = 0;
1650 int tx_len = skb->len;
1651 u32* cell_header = NULL;
1652 unsigned char* skb_data;
1653 int skb_len;
1654 unsigned char* data;
1655 unsigned long flags;
1656
1657 ASSERT(vcc);
1658 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1659 ASSERT(fore200e);
1660 ASSERT(fore200e_vcc);
1661
1662 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1663 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1664 dev_kfree_skb_any(skb);
1665 return -EINVAL;
1666 }
1667
1668 #ifdef FORE200E_52BYTE_AAL0_SDU
1669 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1670 cell_header = (u32*) skb->data;
1671 skb_data = skb->data + 4; /* skip 4-byte cell header */
1672 skb_len = tx_len = skb->len - 4;
1673
1674 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1675 }
1676 else
1677 #endif
1678 {
1679 skb_data = skb->data;
1680 skb_len = skb->len;
1681 }
1682
1683 if (((unsigned long)skb_data) & 0x3) {
1684
1685 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1686 tx_copy = 1;
1687 tx_len = skb_len;
1688 }
1689
1690 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1691
1692 /* this simply NUKES the PCA board */
1693 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1694 tx_copy = 1;
1695 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1696 }
1697
1698 if (tx_copy) {
1699 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1700 if (data == NULL) {
1701 if (vcc->pop) {
1702 vcc->pop(vcc, skb);
1703 }
1704 else {
1705 dev_kfree_skb_any(skb);
1706 }
1707 return -ENOMEM;
1708 }
1709
1710 memcpy(data, skb_data, skb_len);
1711 if (skb_len < tx_len)
1712 memset(data + skb_len, 0x00, tx_len - skb_len);
1713 }
1714 else {
1715 data = skb_data;
1716 }
1717
1718 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1719 ASSERT(vc_map->vcc == vcc);
1720
1721 retry_here:
1722
1723 spin_lock_irqsave(&fore200e->q_lock, flags);
1724
1725 entry = &txq->host_entry[ txq->head ];
1726
1727 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1728
1729 /* try to free completed tx queue entries */
1730 fore200e_tx_irq(fore200e);
1731
1732 if (*entry->status != STATUS_FREE) {
1733
1734 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1735
1736 /* retry once again? */
1737 if (--retry > 0) {
1738 udelay(50);
1739 goto retry_here;
1740 }
1741
1742 atomic_inc(&vcc->stats->tx_err);
1743
1744 fore200e->tx_sat++;
1745 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1746 fore200e->name, fore200e->cp_queues->heartbeat);
1747 if (vcc->pop) {
1748 vcc->pop(vcc, skb);
1749 }
1750 else {
1751 dev_kfree_skb_any(skb);
1752 }
1753
1754 if (tx_copy)
1755 kfree(data);
1756
1757 return -ENOBUFS;
1758 }
1759 }
1760
1761 entry->incarn = vc_map->incarn;
1762 entry->vc_map = vc_map;
1763 entry->skb = skb;
1764 entry->data = tx_copy ? data : NULL;
1765
1766 tpd = entry->tpd;
1767 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1768 tpd->tsd[ 0 ].length = tx_len;
1769
1770 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1771 txq->txing++;
1772
1773 /* The dma_map call above implies a dma_sync so the device can use it,
1774 * thus no explicit dma_sync call is necessary here.
1775 */
1776
1777 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1778 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1779 tpd->tsd[0].length, skb_len);
1780
1781 if (skb_len < fore200e_vcc->tx_min_pdu)
1782 fore200e_vcc->tx_min_pdu = skb_len;
1783 if (skb_len > fore200e_vcc->tx_max_pdu)
1784 fore200e_vcc->tx_max_pdu = skb_len;
1785 fore200e_vcc->tx_pdu++;
1786
1787 /* set tx rate control information */
1788 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1789 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1790
1791 if (cell_header) {
1792 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1793 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1794 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1795 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1796 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1797 }
1798 else {
1799 /* set the ATM header, common to all cells conveying the PDU */
1800 tpd->atm_header.clp = 0;
1801 tpd->atm_header.plt = 0;
1802 tpd->atm_header.vci = vcc->vci;
1803 tpd->atm_header.vpi = vcc->vpi;
1804 tpd->atm_header.gfc = 0;
1805 }
1806
1807 tpd->spec.length = tx_len;
1808 tpd->spec.nseg = 1;
1809 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1810 tpd->spec.intr = 1;
1811
1812 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1813 tpd_haddr.pad = 0;
1814 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1815
1816 *entry->status = STATUS_PENDING;
1817 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1818
1819 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1820
1821 return 0;
1822 }
1823
1824
1825 static int
1826 fore200e_getstats(struct fore200e* fore200e)
1827 {
1828 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1829 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1830 struct stats_opcode opcode;
1831 int ok;
1832 u32 stats_dma_addr;
1833
1834 if (fore200e->stats == NULL) {
1835 fore200e->stats = fore200e_kmalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1836 if (fore200e->stats == NULL)
1837 return -ENOMEM;
1838 }
1839
1840 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1841 sizeof(struct stats), DMA_FROM_DEVICE);
1842
1843 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1844
1845 opcode.opcode = OPCODE_GET_STATS;
1846 opcode.pad = 0;
1847
1848 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1849
1850 *entry->status = STATUS_PENDING;
1851
1852 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1853
1854 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1855
1856 *entry->status = STATUS_FREE;
1857
1858 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1859
1860 if (ok == 0) {
1861 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1862 return -EIO;
1863 }
1864
1865 return 0;
1866 }
1867
1868
1869 static int
1870 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1871 {
1872 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1873
1874 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1875 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1876
1877 return -EINVAL;
1878 }
1879
1880
1881 static int
1882 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1883 {
1884 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1885
1886 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1887 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1888
1889 return -EINVAL;
1890 }
1891
1892
1893 #if 0 /* currently unused */
1894 static int
1895 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1896 {
1897 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1898 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1899 struct oc3_opcode opcode;
1900 int ok;
1901 u32 oc3_regs_dma_addr;
1902
1903 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1904
1905 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1906
1907 opcode.opcode = OPCODE_GET_OC3;
1908 opcode.reg = 0;
1909 opcode.value = 0;
1910 opcode.mask = 0;
1911
1912 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1913
1914 *entry->status = STATUS_PENDING;
1915
1916 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1917
1918 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1919
1920 *entry->status = STATUS_FREE;
1921
1922 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1923
1924 if (ok == 0) {
1925 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1926 return -EIO;
1927 }
1928
1929 return 0;
1930 }
1931 #endif
1932
1933
1934 static int
1935 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1936 {
1937 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1938 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1939 struct oc3_opcode opcode;
1940 int ok;
1941
1942 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1943
1944 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1945
1946 opcode.opcode = OPCODE_SET_OC3;
1947 opcode.reg = reg;
1948 opcode.value = value;
1949 opcode.mask = mask;
1950
1951 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1952
1953 *entry->status = STATUS_PENDING;
1954
1955 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1956
1957 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1958
1959 *entry->status = STATUS_FREE;
1960
1961 if (ok == 0) {
1962 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1963 return -EIO;
1964 }
1965
1966 return 0;
1967 }
1968
1969
1970 static int
1971 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1972 {
1973 u32 mct_value, mct_mask;
1974 int error;
1975
1976 if (!capable(CAP_NET_ADMIN))
1977 return -EPERM;
1978
1979 switch (loop_mode) {
1980
1981 case ATM_LM_NONE:
1982 mct_value = 0;
1983 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1984 break;
1985
1986 case ATM_LM_LOC_PHY:
1987 mct_value = mct_mask = SUNI_MCT_DLE;
1988 break;
1989
1990 case ATM_LM_RMT_PHY:
1991 mct_value = mct_mask = SUNI_MCT_LLE;
1992 break;
1993
1994 default:
1995 return -EINVAL;
1996 }
1997
1998 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1999 if (error == 0)
2000 fore200e->loop_mode = loop_mode;
2001
2002 return error;
2003 }
2004
2005
2006 static inline unsigned int
2007 fore200e_swap(unsigned int in)
2008 {
2009 #if defined(__LITTLE_ENDIAN)
2010 return swab32(in);
2011 #else
2012 return in;
2013 #endif
2014 }
2015
2016
2017 static int
2018 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
2019 {
2020 struct sonet_stats tmp;
2021
2022 if (fore200e_getstats(fore200e) < 0)
2023 return -EIO;
2024
2025 tmp.section_bip = fore200e_swap(fore200e->stats->oc3.section_bip8_errors);
2026 tmp.line_bip = fore200e_swap(fore200e->stats->oc3.line_bip24_errors);
2027 tmp.path_bip = fore200e_swap(fore200e->stats->oc3.path_bip8_errors);
2028 tmp.line_febe = fore200e_swap(fore200e->stats->oc3.line_febe_errors);
2029 tmp.path_febe = fore200e_swap(fore200e->stats->oc3.path_febe_errors);
2030 tmp.corr_hcs = fore200e_swap(fore200e->stats->oc3.corr_hcs_errors);
2031 tmp.uncorr_hcs = fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors);
2032 tmp.tx_cells = fore200e_swap(fore200e->stats->aal0.cells_transmitted) +
2033 fore200e_swap(fore200e->stats->aal34.cells_transmitted) +
2034 fore200e_swap(fore200e->stats->aal5.cells_transmitted);
2035 tmp.rx_cells = fore200e_swap(fore200e->stats->aal0.cells_received) +
2036 fore200e_swap(fore200e->stats->aal34.cells_received) +
2037 fore200e_swap(fore200e->stats->aal5.cells_received);
2038
2039 if (arg)
2040 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2041
2042 return 0;
2043 }
2044
2045
2046 static int
2047 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2048 {
2049 struct fore200e* fore200e = FORE200E_DEV(dev);
2050
2051 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2052
2053 switch (cmd) {
2054
2055 case SONET_GETSTAT:
2056 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2057
2058 case SONET_GETDIAG:
2059 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2060
2061 case ATM_SETLOOP:
2062 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2063
2064 case ATM_GETLOOP:
2065 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2066
2067 case ATM_QUERYLOOP:
2068 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2069 }
2070
2071 return -ENOSYS; /* not implemented */
2072 }
2073
2074
2075 static int
2076 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2077 {
2078 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2079 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2080
2081 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2082 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2083 return -EINVAL;
2084 }
2085
2086 DPRINTK(2, "change_qos %d.%d.%d, "
2087 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2088 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2089 "available_cell_rate = %u",
2090 vcc->itf, vcc->vpi, vcc->vci,
2091 fore200e_traffic_class[ qos->txtp.traffic_class ],
2092 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2093 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2094 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2095 flags, fore200e->available_cell_rate);
2096
2097 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2098
2099 down(&fore200e->rate_sf);
2100 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2101 up(&fore200e->rate_sf);
2102 return -EAGAIN;
2103 }
2104
2105 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2106 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2107
2108 up(&fore200e->rate_sf);
2109
2110 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2111
2112 /* update rate control parameters */
2113 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2114
2115 set_bit(ATM_VF_HASQOS, &vcc->flags);
2116
2117 return 0;
2118 }
2119
2120 return -EINVAL;
2121 }
2122
2123
2124 static int __devinit
2125 fore200e_irq_request(struct fore200e* fore200e)
2126 {
2127 if (request_irq(fore200e->irq, fore200e_interrupt, SA_SHIRQ, fore200e->name, fore200e->atm_dev) < 0) {
2128
2129 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2130 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2131 return -EBUSY;
2132 }
2133
2134 printk(FORE200E "IRQ %s reserved for device %s\n",
2135 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2136
2137 #ifdef FORE200E_USE_TASKLET
2138 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2139 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2140 #endif
2141
2142 fore200e->state = FORE200E_STATE_IRQ;
2143 return 0;
2144 }
2145
2146
2147 static int __devinit
2148 fore200e_get_esi(struct fore200e* fore200e)
2149 {
2150 struct prom_data* prom = fore200e_kmalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2151 int ok, i;
2152
2153 if (!prom)
2154 return -ENOMEM;
2155
2156 ok = fore200e->bus->prom_read(fore200e, prom);
2157 if (ok < 0) {
2158 fore200e_kfree(prom);
2159 return -EBUSY;
2160 }
2161
2162 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2163 fore200e->name,
2164 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2165 prom->serial_number & 0xFFFF,
2166 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2167 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2168
2169 for (i = 0; i < ESI_LEN; i++) {
2170 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2171 }
2172
2173 fore200e_kfree(prom);
2174
2175 return 0;
2176 }
2177
2178
2179 static int __devinit
2180 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2181 {
2182 int scheme, magn, nbr, size, i;
2183
2184 struct host_bsq* bsq;
2185 struct buffer* buffer;
2186
2187 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2188 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2189
2190 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2191
2192 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2193 size = fore200e_rx_buf_size[ scheme ][ magn ];
2194
2195 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2196
2197 /* allocate the array of receive buffers */
2198 buffer = bsq->buffer = fore200e_kmalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2199
2200 if (buffer == NULL)
2201 return -ENOMEM;
2202
2203 bsq->freebuf = NULL;
2204
2205 for (i = 0; i < nbr; i++) {
2206
2207 buffer[ i ].scheme = scheme;
2208 buffer[ i ].magn = magn;
2209 #ifdef FORE200E_BSQ_DEBUG
2210 buffer[ i ].index = i;
2211 buffer[ i ].supplied = 0;
2212 #endif
2213
2214 /* allocate the receive buffer body */
2215 if (fore200e_chunk_alloc(fore200e,
2216 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2217 DMA_FROM_DEVICE) < 0) {
2218
2219 while (i > 0)
2220 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2221 fore200e_kfree(buffer);
2222
2223 return -ENOMEM;
2224 }
2225
2226 /* insert the buffer into the free buffer list */
2227 buffer[ i ].next = bsq->freebuf;
2228 bsq->freebuf = &buffer[ i ];
2229 }
2230 /* all the buffers are free, initially */
2231 bsq->freebuf_count = nbr;
2232
2233 #ifdef FORE200E_BSQ_DEBUG
2234 bsq_audit(3, bsq, scheme, magn);
2235 #endif
2236 }
2237 }
2238
2239 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2240 return 0;
2241 }
2242
2243
2244 static int __devinit
2245 fore200e_init_bs_queue(struct fore200e* fore200e)
2246 {
2247 int scheme, magn, i;
2248
2249 struct host_bsq* bsq;
2250 struct cp_bsq_entry __iomem * cp_entry;
2251
2252 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2253 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2254
2255 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2256
2257 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2258
2259 /* allocate and align the array of status words */
2260 if (fore200e->bus->dma_chunk_alloc(fore200e,
2261 &bsq->status,
2262 sizeof(enum status),
2263 QUEUE_SIZE_BS,
2264 fore200e->bus->status_alignment) < 0) {
2265 return -ENOMEM;
2266 }
2267
2268 /* allocate and align the array of receive buffer descriptors */
2269 if (fore200e->bus->dma_chunk_alloc(fore200e,
2270 &bsq->rbd_block,
2271 sizeof(struct rbd_block),
2272 QUEUE_SIZE_BS,
2273 fore200e->bus->descr_alignment) < 0) {
2274
2275 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2276 return -ENOMEM;
2277 }
2278
2279 /* get the base address of the cp resident buffer supply queue entries */
2280 cp_entry = fore200e->virt_base +
2281 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2282
2283 /* fill the host resident and cp resident buffer supply queue entries */
2284 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2285
2286 bsq->host_entry[ i ].status =
2287 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2288 bsq->host_entry[ i ].rbd_block =
2289 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2290 bsq->host_entry[ i ].rbd_block_dma =
2291 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2292 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2293
2294 *bsq->host_entry[ i ].status = STATUS_FREE;
2295
2296 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2297 &cp_entry[ i ].status_haddr);
2298 }
2299 }
2300 }
2301
2302 fore200e->state = FORE200E_STATE_INIT_BSQ;
2303 return 0;
2304 }
2305
2306
2307 static int __devinit
2308 fore200e_init_rx_queue(struct fore200e* fore200e)
2309 {
2310 struct host_rxq* rxq = &fore200e->host_rxq;
2311 struct cp_rxq_entry __iomem * cp_entry;
2312 int i;
2313
2314 DPRINTK(2, "receive queue is being initialized\n");
2315
2316 /* allocate and align the array of status words */
2317 if (fore200e->bus->dma_chunk_alloc(fore200e,
2318 &rxq->status,
2319 sizeof(enum status),
2320 QUEUE_SIZE_RX,
2321 fore200e->bus->status_alignment) < 0) {
2322 return -ENOMEM;
2323 }
2324
2325 /* allocate and align the array of receive PDU descriptors */
2326 if (fore200e->bus->dma_chunk_alloc(fore200e,
2327 &rxq->rpd,
2328 sizeof(struct rpd),
2329 QUEUE_SIZE_RX,
2330 fore200e->bus->descr_alignment) < 0) {
2331
2332 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2333 return -ENOMEM;
2334 }
2335
2336 /* get the base address of the cp resident rx queue entries */
2337 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2338
2339 /* fill the host resident and cp resident rx entries */
2340 for (i=0; i < QUEUE_SIZE_RX; i++) {
2341
2342 rxq->host_entry[ i ].status =
2343 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2344 rxq->host_entry[ i ].rpd =
2345 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2346 rxq->host_entry[ i ].rpd_dma =
2347 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2348 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2349
2350 *rxq->host_entry[ i ].status = STATUS_FREE;
2351
2352 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2353 &cp_entry[ i ].status_haddr);
2354
2355 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2356 &cp_entry[ i ].rpd_haddr);
2357 }
2358
2359 /* set the head entry of the queue */
2360 rxq->head = 0;
2361
2362 fore200e->state = FORE200E_STATE_INIT_RXQ;
2363 return 0;
2364 }
2365
2366
2367 static int __devinit
2368 fore200e_init_tx_queue(struct fore200e* fore200e)
2369 {
2370 struct host_txq* txq = &fore200e->host_txq;
2371 struct cp_txq_entry __iomem * cp_entry;
2372 int i;
2373
2374 DPRINTK(2, "transmit queue is being initialized\n");
2375
2376 /* allocate and align the array of status words */
2377 if (fore200e->bus->dma_chunk_alloc(fore200e,
2378 &txq->status,
2379 sizeof(enum status),
2380 QUEUE_SIZE_TX,
2381 fore200e->bus->status_alignment) < 0) {
2382 return -ENOMEM;
2383 }
2384
2385 /* allocate and align the array of transmit PDU descriptors */
2386 if (fore200e->bus->dma_chunk_alloc(fore200e,
2387 &txq->tpd,
2388 sizeof(struct tpd),
2389 QUEUE_SIZE_TX,
2390 fore200e->bus->descr_alignment) < 0) {
2391
2392 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2393 return -ENOMEM;
2394 }
2395
2396 /* get the base address of the cp resident tx queue entries */
2397 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2398
2399 /* fill the host resident and cp resident tx entries */
2400 for (i=0; i < QUEUE_SIZE_TX; i++) {
2401
2402 txq->host_entry[ i ].status =
2403 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2404 txq->host_entry[ i ].tpd =
2405 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2406 txq->host_entry[ i ].tpd_dma =
2407 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2408 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2409
2410 *txq->host_entry[ i ].status = STATUS_FREE;
2411
2412 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2413 &cp_entry[ i ].status_haddr);
2414
2415 /* although there is a one-to-one mapping of tx queue entries and tpds,
2416 we do not write here the DMA (physical) base address of each tpd into
2417 the related cp resident entry, because the cp relies on this write
2418 operation to detect that a new pdu has been submitted for tx */
2419 }
2420
2421 /* set the head and tail entries of the queue */
2422 txq->head = 0;
2423 txq->tail = 0;
2424
2425 fore200e->state = FORE200E_STATE_INIT_TXQ;
2426 return 0;
2427 }
2428
2429
2430 static int __devinit
2431 fore200e_init_cmd_queue(struct fore200e* fore200e)
2432 {
2433 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2434 struct cp_cmdq_entry __iomem * cp_entry;
2435 int i;
2436
2437 DPRINTK(2, "command queue is being initialized\n");
2438
2439 /* allocate and align the array of status words */
2440 if (fore200e->bus->dma_chunk_alloc(fore200e,
2441 &cmdq->status,
2442 sizeof(enum status),
2443 QUEUE_SIZE_CMD,
2444 fore200e->bus->status_alignment) < 0) {
2445 return -ENOMEM;
2446 }
2447
2448 /* get the base address of the cp resident cmd queue entries */
2449 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2450
2451 /* fill the host resident and cp resident cmd entries */
2452 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2453
2454 cmdq->host_entry[ i ].status =
2455 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2456 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2457
2458 *cmdq->host_entry[ i ].status = STATUS_FREE;
2459
2460 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2461 &cp_entry[ i ].status_haddr);
2462 }
2463
2464 /* set the head entry of the queue */
2465 cmdq->head = 0;
2466
2467 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2468 return 0;
2469 }
2470
2471
2472 static void __init
2473 fore200e_param_bs_queue(struct fore200e* fore200e,
2474 enum buffer_scheme scheme, enum buffer_magn magn,
2475 int queue_length, int pool_size, int supply_blksize)
2476 {
2477 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2478
2479 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2480 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2481 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2482 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2483 }
2484
2485
2486 static int __devinit
2487 fore200e_initialize(struct fore200e* fore200e)
2488 {
2489 struct cp_queues __iomem * cpq;
2490 int ok, scheme, magn;
2491
2492 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2493
2494 init_MUTEX(&fore200e->rate_sf);
2495 spin_lock_init(&fore200e->q_lock);
2496
2497 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2498
2499 /* enable cp to host interrupts */
2500 fore200e->bus->write(1, &cpq->imask);
2501
2502 if (fore200e->bus->irq_enable)
2503 fore200e->bus->irq_enable(fore200e);
2504
2505 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2506
2507 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2508 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2509 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2510
2511 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2512 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2513
2514 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2515 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2516 fore200e_param_bs_queue(fore200e, scheme, magn,
2517 QUEUE_SIZE_BS,
2518 fore200e_rx_buf_nbr[ scheme ][ magn ],
2519 RBD_BLK_SIZE);
2520
2521 /* issue the initialize command */
2522 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2523 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2524
2525 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2526 if (ok == 0) {
2527 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2528 return -ENODEV;
2529 }
2530
2531 printk(FORE200E "device %s initialized\n", fore200e->name);
2532
2533 fore200e->state = FORE200E_STATE_INITIALIZE;
2534 return 0;
2535 }
2536
2537
2538 static void __devinit
2539 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2540 {
2541 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2542
2543 #if 0
2544 printk("%c", c);
2545 #endif
2546 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2547 }
2548
2549
2550 static int __devinit
2551 fore200e_monitor_getc(struct fore200e* fore200e)
2552 {
2553 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2554 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2555 int c;
2556
2557 while (time_before(jiffies, timeout)) {
2558
2559 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2560
2561 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2562
2563 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2564 #if 0
2565 printk("%c", c & 0xFF);
2566 #endif
2567 return c & 0xFF;
2568 }
2569 }
2570
2571 return -1;
2572 }
2573
2574
2575 static void __devinit
2576 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2577 {
2578 while (*str) {
2579
2580 /* the i960 monitor doesn't accept any new character if it has something to say */
2581 while (fore200e_monitor_getc(fore200e) >= 0);
2582
2583 fore200e_monitor_putc(fore200e, *str++);
2584 }
2585
2586 while (fore200e_monitor_getc(fore200e) >= 0);
2587 }
2588
2589
2590 static int __devinit
2591 fore200e_start_fw(struct fore200e* fore200e)
2592 {
2593 int ok;
2594 char cmd[ 48 ];
2595 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data;
2596
2597 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2598
2599 #if defined(__sparc_v9__)
2600 /* reported to be required by SBA cards on some sparc64 hosts */
2601 fore200e_spin(100);
2602 #endif
2603
2604 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2605
2606 fore200e_monitor_puts(fore200e, cmd);
2607
2608 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000);
2609 if (ok == 0) {
2610 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2611 return -ENODEV;
2612 }
2613
2614 printk(FORE200E "device %s firmware started\n", fore200e->name);
2615
2616 fore200e->state = FORE200E_STATE_START_FW;
2617 return 0;
2618 }
2619
2620
2621 static int __devinit
2622 fore200e_load_fw(struct fore200e* fore200e)
2623 {
2624 u32* fw_data = (u32*) fore200e->bus->fw_data;
2625 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
2626
2627 struct fw_header* fw_header = (struct fw_header*) fw_data;
2628
2629 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2630
2631 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2632 fore200e->name, load_addr, fw_size);
2633
2634 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2635 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2636 return -ENODEV;
2637 }
2638
2639 for (; fw_size--; fw_data++, load_addr++)
2640 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2641
2642 fore200e->state = FORE200E_STATE_LOAD_FW;
2643 return 0;
2644 }
2645
2646
2647 static int __devinit
2648 fore200e_register(struct fore200e* fore200e)
2649 {
2650 struct atm_dev* atm_dev;
2651
2652 DPRINTK(2, "device %s being registered\n", fore200e->name);
2653
2654 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2655 NULL);
2656 if (atm_dev == NULL) {
2657 printk(FORE200E "unable to register device %s\n", fore200e->name);
2658 return -ENODEV;
2659 }
2660
2661 atm_dev->dev_data = fore200e;
2662 fore200e->atm_dev = atm_dev;
2663
2664 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2665 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2666
2667 fore200e->available_cell_rate = ATM_OC3_PCR;
2668
2669 fore200e->state = FORE200E_STATE_REGISTER;
2670 return 0;
2671 }
2672
2673
2674 static int __devinit
2675 fore200e_init(struct fore200e* fore200e)
2676 {
2677 if (fore200e_register(fore200e) < 0)
2678 return -ENODEV;
2679
2680 if (fore200e->bus->configure(fore200e) < 0)
2681 return -ENODEV;
2682
2683 if (fore200e->bus->map(fore200e) < 0)
2684 return -ENODEV;
2685
2686 if (fore200e_reset(fore200e, 1) < 0)
2687 return -ENODEV;
2688
2689 if (fore200e_load_fw(fore200e) < 0)
2690 return -ENODEV;
2691
2692 if (fore200e_start_fw(fore200e) < 0)
2693 return -ENODEV;
2694
2695 if (fore200e_initialize(fore200e) < 0)
2696 return -ENODEV;
2697
2698 if (fore200e_init_cmd_queue(fore200e) < 0)
2699 return -ENOMEM;
2700
2701 if (fore200e_init_tx_queue(fore200e) < 0)
2702 return -ENOMEM;
2703
2704 if (fore200e_init_rx_queue(fore200e) < 0)
2705 return -ENOMEM;
2706
2707 if (fore200e_init_bs_queue(fore200e) < 0)
2708 return -ENOMEM;
2709
2710 if (fore200e_alloc_rx_buf(fore200e) < 0)
2711 return -ENOMEM;
2712
2713 if (fore200e_get_esi(fore200e) < 0)
2714 return -EIO;
2715
2716 if (fore200e_irq_request(fore200e) < 0)
2717 return -EBUSY;
2718
2719 fore200e_supply(fore200e);
2720
2721 /* all done, board initialization is now complete */
2722 fore200e->state = FORE200E_STATE_COMPLETE;
2723 return 0;
2724 }
2725
2726
2727 static int __devinit
2728 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2729 {
2730 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2731 struct fore200e* fore200e;
2732 int err = 0;
2733 static int index = 0;
2734
2735 if (pci_enable_device(pci_dev)) {
2736 err = -EINVAL;
2737 goto out;
2738 }
2739
2740 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
2741 if (fore200e == NULL) {
2742 err = -ENOMEM;
2743 goto out_disable;
2744 }
2745
2746 fore200e->bus = bus;
2747 fore200e->bus_dev = pci_dev;
2748 fore200e->irq = pci_dev->irq;
2749 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2750
2751 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2752
2753 pci_set_master(pci_dev);
2754
2755 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2756 fore200e->bus->model_name,
2757 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2758
2759 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2760
2761 err = fore200e_init(fore200e);
2762 if (err < 0) {
2763 fore200e_shutdown(fore200e);
2764 goto out_free;
2765 }
2766
2767 ++index;
2768 pci_set_drvdata(pci_dev, fore200e);
2769
2770 out:
2771 return err;
2772
2773 out_free:
2774 kfree(fore200e);
2775 out_disable:
2776 pci_disable_device(pci_dev);
2777 goto out;
2778 }
2779
2780
2781 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2782 {
2783 struct fore200e *fore200e;
2784
2785 fore200e = pci_get_drvdata(pci_dev);
2786
2787 fore200e_shutdown(fore200e);
2788 kfree(fore200e);
2789 pci_disable_device(pci_dev);
2790 }
2791
2792
2793 #ifdef CONFIG_ATM_FORE200E_PCA
2794 static struct pci_device_id fore200e_pca_tbl[] = {
2795 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2796 0, 0, (unsigned long) &fore200e_bus[0] },
2797 { 0, }
2798 };
2799
2800 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2801
2802 static struct pci_driver fore200e_pca_driver = {
2803 .name = "fore_200e",
2804 .probe = fore200e_pca_detect,
2805 .remove = __devexit_p(fore200e_pca_remove_one),
2806 .id_table = fore200e_pca_tbl,
2807 };
2808 #endif
2809
2810
2811 static int __init
2812 fore200e_module_init(void)
2813 {
2814 const struct fore200e_bus* bus;
2815 struct fore200e* fore200e;
2816 int index;
2817
2818 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2819
2820 /* for each configured bus interface */
2821 for (bus = fore200e_bus; bus->model_name; bus++) {
2822
2823 /* detect all boards present on that bus */
2824 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
2825
2826 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2827 fore200e->bus->model_name,
2828 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2829
2830 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2831
2832 if (fore200e_init(fore200e) < 0) {
2833
2834 fore200e_shutdown(fore200e);
2835 break;
2836 }
2837
2838 list_add(&fore200e->entry, &fore200e_boards);
2839 }
2840 }
2841
2842 #ifdef CONFIG_ATM_FORE200E_PCA
2843 if (!pci_register_driver(&fore200e_pca_driver))
2844 return 0;
2845 #endif
2846
2847 if (!list_empty(&fore200e_boards))
2848 return 0;
2849
2850 return -ENODEV;
2851 }
2852
2853
2854 static void __exit
2855 fore200e_module_cleanup(void)
2856 {
2857 struct fore200e *fore200e, *next;
2858
2859 #ifdef CONFIG_ATM_FORE200E_PCA
2860 pci_unregister_driver(&fore200e_pca_driver);
2861 #endif
2862
2863 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
2864 fore200e_shutdown(fore200e);
2865 kfree(fore200e);
2866 }
2867 DPRINTK(1, "module being removed\n");
2868 }
2869
2870
2871 static int
2872 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2873 {
2874 struct fore200e* fore200e = FORE200E_DEV(dev);
2875 struct fore200e_vcc* fore200e_vcc;
2876 struct atm_vcc* vcc;
2877 int i, len, left = *pos;
2878 unsigned long flags;
2879
2880 if (!left--) {
2881
2882 if (fore200e_getstats(fore200e) < 0)
2883 return -EIO;
2884
2885 len = sprintf(page,"\n"
2886 " device:\n"
2887 " internal name:\t\t%s\n", fore200e->name);
2888
2889 /* print bus-specific information */
2890 if (fore200e->bus->proc_read)
2891 len += fore200e->bus->proc_read(fore200e, page + len);
2892
2893 len += sprintf(page + len,
2894 " interrupt line:\t\t%s\n"
2895 " physical base address:\t0x%p\n"
2896 " virtual base address:\t0x%p\n"
2897 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2898 " board serial number:\t\t%d\n\n",
2899 fore200e_irq_itoa(fore200e->irq),
2900 (void*)fore200e->phys_base,
2901 fore200e->virt_base,
2902 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2903 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2904 fore200e->esi[4] * 256 + fore200e->esi[5]);
2905
2906 return len;
2907 }
2908
2909 if (!left--)
2910 return sprintf(page,
2911 " free small bufs, scheme 1:\t%d\n"
2912 " free large bufs, scheme 1:\t%d\n"
2913 " free small bufs, scheme 2:\t%d\n"
2914 " free large bufs, scheme 2:\t%d\n",
2915 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2916 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2917 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2918 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2919
2920 if (!left--) {
2921 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2922
2923 len = sprintf(page,"\n\n"
2924 " cell processor:\n"
2925 " heartbeat state:\t\t");
2926
2927 if (hb >> 16 != 0xDEAD)
2928 len += sprintf(page + len, "0x%08x\n", hb);
2929 else
2930 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2931
2932 return len;
2933 }
2934
2935 if (!left--) {
2936 static const char* media_name[] = {
2937 "unshielded twisted pair",
2938 "multimode optical fiber ST",
2939 "multimode optical fiber SC",
2940 "single-mode optical fiber ST",
2941 "single-mode optical fiber SC",
2942 "unknown"
2943 };
2944
2945 static const char* oc3_mode[] = {
2946 "normal operation",
2947 "diagnostic loopback",
2948 "line loopback",
2949 "unknown"
2950 };
2951
2952 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2953 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2954 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2955 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2956 u32 oc3_index;
2957
2958 if ((media_index < 0) || (media_index > 4))
2959 media_index = 5;
2960
2961 switch (fore200e->loop_mode) {
2962 case ATM_LM_NONE: oc3_index = 0;
2963 break;
2964 case ATM_LM_LOC_PHY: oc3_index = 1;
2965 break;
2966 case ATM_LM_RMT_PHY: oc3_index = 2;
2967 break;
2968 default: oc3_index = 3;
2969 }
2970
2971 return sprintf(page,
2972 " firmware release:\t\t%d.%d.%d\n"
2973 " monitor release:\t\t%d.%d\n"
2974 " media type:\t\t\t%s\n"
2975 " OC-3 revision:\t\t0x%x\n"
2976 " OC-3 mode:\t\t\t%s",
2977 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2978 mon960_release >> 16, mon960_release << 16 >> 16,
2979 media_name[ media_index ],
2980 oc3_revision,
2981 oc3_mode[ oc3_index ]);
2982 }
2983
2984 if (!left--) {
2985 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2986
2987 return sprintf(page,
2988 "\n\n"
2989 " monitor:\n"
2990 " version number:\t\t%d\n"
2991 " boot status word:\t\t0x%08x\n",
2992 fore200e->bus->read(&cp_monitor->mon_version),
2993 fore200e->bus->read(&cp_monitor->bstat));
2994 }
2995
2996 if (!left--)
2997 return sprintf(page,
2998 "\n"
2999 " device statistics:\n"
3000 " 4b5b:\n"
3001 " crc_header_errors:\t\t%10u\n"
3002 " framing_errors:\t\t%10u\n",
3003 fore200e_swap(fore200e->stats->phy.crc_header_errors),
3004 fore200e_swap(fore200e->stats->phy.framing_errors));
3005
3006 if (!left--)
3007 return sprintf(page, "\n"
3008 " OC-3:\n"
3009 " section_bip8_errors:\t%10u\n"
3010 " path_bip8_errors:\t\t%10u\n"
3011 " line_bip24_errors:\t\t%10u\n"
3012 " line_febe_errors:\t\t%10u\n"
3013 " path_febe_errors:\t\t%10u\n"
3014 " corr_hcs_errors:\t\t%10u\n"
3015 " ucorr_hcs_errors:\t\t%10u\n",
3016 fore200e_swap(fore200e->stats->oc3.section_bip8_errors),
3017 fore200e_swap(fore200e->stats->oc3.path_bip8_errors),
3018 fore200e_swap(fore200e->stats->oc3.line_bip24_errors),
3019 fore200e_swap(fore200e->stats->oc3.line_febe_errors),
3020 fore200e_swap(fore200e->stats->oc3.path_febe_errors),
3021 fore200e_swap(fore200e->stats->oc3.corr_hcs_errors),
3022 fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors));
3023
3024 if (!left--)
3025 return sprintf(page,"\n"
3026 " ATM:\t\t\t\t cells\n"
3027 " TX:\t\t\t%10u\n"
3028 " RX:\t\t\t%10u\n"
3029 " vpi out of range:\t\t%10u\n"
3030 " vpi no conn:\t\t%10u\n"
3031 " vci out of range:\t\t%10u\n"
3032 " vci no conn:\t\t%10u\n",
3033 fore200e_swap(fore200e->stats->atm.cells_transmitted),
3034 fore200e_swap(fore200e->stats->atm.cells_received),
3035 fore200e_swap(fore200e->stats->atm.vpi_bad_range),
3036 fore200e_swap(fore200e->stats->atm.vpi_no_conn),
3037 fore200e_swap(fore200e->stats->atm.vci_bad_range),
3038 fore200e_swap(fore200e->stats->atm.vci_no_conn));
3039
3040 if (!left--)
3041 return sprintf(page,"\n"
3042 " AAL0:\t\t\t cells\n"
3043 " TX:\t\t\t%10u\n"
3044 " RX:\t\t\t%10u\n"
3045 " dropped:\t\t\t%10u\n",
3046 fore200e_swap(fore200e->stats->aal0.cells_transmitted),
3047 fore200e_swap(fore200e->stats->aal0.cells_received),
3048 fore200e_swap(fore200e->stats->aal0.cells_dropped));
3049
3050 if (!left--)
3051 return sprintf(page,"\n"
3052 " AAL3/4:\n"
3053 " SAR sublayer:\t\t cells\n"
3054 " TX:\t\t\t%10u\n"
3055 " RX:\t\t\t%10u\n"
3056 " dropped:\t\t\t%10u\n"
3057 " CRC errors:\t\t%10u\n"
3058 " protocol errors:\t\t%10u\n\n"
3059 " CS sublayer:\t\t PDUs\n"
3060 " TX:\t\t\t%10u\n"
3061 " RX:\t\t\t%10u\n"
3062 " dropped:\t\t\t%10u\n"
3063 " protocol errors:\t\t%10u\n",
3064 fore200e_swap(fore200e->stats->aal34.cells_transmitted),
3065 fore200e_swap(fore200e->stats->aal34.cells_received),
3066 fore200e_swap(fore200e->stats->aal34.cells_dropped),
3067 fore200e_swap(fore200e->stats->aal34.cells_crc_errors),
3068 fore200e_swap(fore200e->stats->aal34.cells_protocol_errors),
3069 fore200e_swap(fore200e->stats->aal34.cspdus_transmitted),
3070 fore200e_swap(fore200e->stats->aal34.cspdus_received),
3071 fore200e_swap(fore200e->stats->aal34.cspdus_dropped),
3072 fore200e_swap(fore200e->stats->aal34.cspdus_protocol_errors));
3073
3074 if (!left--)
3075 return sprintf(page,"\n"
3076 " AAL5:\n"
3077 " SAR sublayer:\t\t cells\n"
3078 " TX:\t\t\t%10u\n"
3079 " RX:\t\t\t%10u\n"
3080 " dropped:\t\t\t%10u\n"
3081 " congestions:\t\t%10u\n\n"
3082 " CS sublayer:\t\t PDUs\n"
3083 " TX:\t\t\t%10u\n"
3084 " RX:\t\t\t%10u\n"
3085 " dropped:\t\t\t%10u\n"
3086 " CRC errors:\t\t%10u\n"
3087 " protocol errors:\t\t%10u\n",
3088 fore200e_swap(fore200e->stats->aal5.cells_transmitted),
3089 fore200e_swap(fore200e->stats->aal5.cells_received),
3090 fore200e_swap(fore200e->stats->aal5.cells_dropped),
3091 fore200e_swap(fore200e->stats->aal5.congestion_experienced),
3092 fore200e_swap(fore200e->stats->aal5.cspdus_transmitted),
3093 fore200e_swap(fore200e->stats->aal5.cspdus_received),
3094 fore200e_swap(fore200e->stats->aal5.cspdus_dropped),
3095 fore200e_swap(fore200e->stats->aal5.cspdus_crc_errors),
3096 fore200e_swap(fore200e->stats->aal5.cspdus_protocol_errors));
3097
3098 if (!left--)
3099 return sprintf(page,"\n"
3100 " AUX:\t\t allocation failures\n"
3101 " small b1:\t\t\t%10u\n"
3102 " large b1:\t\t\t%10u\n"
3103 " small b2:\t\t\t%10u\n"
3104 " large b2:\t\t\t%10u\n"
3105 " RX PDUs:\t\t\t%10u\n"
3106 " TX PDUs:\t\t\t%10lu\n",
3107 fore200e_swap(fore200e->stats->aux.small_b1_failed),
3108 fore200e_swap(fore200e->stats->aux.large_b1_failed),
3109 fore200e_swap(fore200e->stats->aux.small_b2_failed),
3110 fore200e_swap(fore200e->stats->aux.large_b2_failed),
3111 fore200e_swap(fore200e->stats->aux.rpd_alloc_failed),
3112 fore200e->tx_sat);
3113
3114 if (!left--)
3115 return sprintf(page,"\n"
3116 " receive carrier:\t\t\t%s\n",
3117 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3118
3119 if (!left--) {
3120 return sprintf(page,"\n"
3121 " VCCs:\n address VPI VCI AAL "
3122 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3123 }
3124
3125 for (i = 0; i < NBR_CONNECT; i++) {
3126
3127 vcc = fore200e->vc_map[i].vcc;
3128
3129 if (vcc == NULL)
3130 continue;
3131
3132 spin_lock_irqsave(&fore200e->q_lock, flags);
3133
3134 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3135
3136 fore200e_vcc = FORE200E_VCC(vcc);
3137 ASSERT(fore200e_vcc);
3138
3139 len = sprintf(page,
3140 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3141 (u32)(unsigned long)vcc,
3142 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3143 fore200e_vcc->tx_pdu,
3144 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3145 fore200e_vcc->tx_max_pdu,
3146 fore200e_vcc->rx_pdu,
3147 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3148 fore200e_vcc->rx_max_pdu);
3149
3150 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3151 return len;
3152 }
3153
3154 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3155 }
3156
3157 return 0;
3158 }
3159
3160 module_init(fore200e_module_init);
3161 module_exit(fore200e_module_cleanup);
3162
3163
3164 static const struct atmdev_ops fore200e_ops =
3165 {
3166 .open = fore200e_open,
3167 .close = fore200e_close,
3168 .ioctl = fore200e_ioctl,
3169 .getsockopt = fore200e_getsockopt,
3170 .setsockopt = fore200e_setsockopt,
3171 .send = fore200e_send,
3172 .change_qos = fore200e_change_qos,
3173 .proc_read = fore200e_proc_read,
3174 .owner = THIS_MODULE
3175 };
3176
3177
3178 #ifdef CONFIG_ATM_FORE200E_PCA
3179 extern const unsigned char _fore200e_pca_fw_data[];
3180 extern const unsigned int _fore200e_pca_fw_size;
3181 #endif
3182 #ifdef CONFIG_ATM_FORE200E_SBA
3183 extern const unsigned char _fore200e_sba_fw_data[];
3184 extern const unsigned int _fore200e_sba_fw_size;
3185 #endif
3186
3187 static const struct fore200e_bus fore200e_bus[] = {
3188 #ifdef CONFIG_ATM_FORE200E_PCA
3189 { "PCA-200E", "pca200e", 32, 4, 32,
3190 _fore200e_pca_fw_data, &_fore200e_pca_fw_size,
3191 fore200e_pca_read,
3192 fore200e_pca_write,
3193 fore200e_pca_dma_map,
3194 fore200e_pca_dma_unmap,
3195 fore200e_pca_dma_sync_for_cpu,
3196 fore200e_pca_dma_sync_for_device,
3197 fore200e_pca_dma_chunk_alloc,
3198 fore200e_pca_dma_chunk_free,
3199 NULL,
3200 fore200e_pca_configure,
3201 fore200e_pca_map,
3202 fore200e_pca_reset,
3203 fore200e_pca_prom_read,
3204 fore200e_pca_unmap,
3205 NULL,
3206 fore200e_pca_irq_check,
3207 fore200e_pca_irq_ack,
3208 fore200e_pca_proc_read,
3209 },
3210 #endif
3211 #ifdef CONFIG_ATM_FORE200E_SBA
3212 { "SBA-200E", "sba200e", 32, 64, 32,
3213 _fore200e_sba_fw_data, &_fore200e_sba_fw_size,
3214 fore200e_sba_read,
3215 fore200e_sba_write,
3216 fore200e_sba_dma_map,
3217 fore200e_sba_dma_unmap,
3218 fore200e_sba_dma_sync_for_cpu,
3219 fore200e_sba_dma_sync_for_device,
3220 fore200e_sba_dma_chunk_alloc,
3221 fore200e_sba_dma_chunk_free,
3222 fore200e_sba_detect,
3223 fore200e_sba_configure,
3224 fore200e_sba_map,
3225 fore200e_sba_reset,
3226 fore200e_sba_prom_read,
3227 fore200e_sba_unmap,
3228 fore200e_sba_irq_enable,
3229 fore200e_sba_irq_check,
3230 fore200e_sba_irq_ack,
3231 fore200e_sba_proc_read,
3232 },
3233 #endif
3234 {}
3235 };
3236
3237 #ifdef MODULE_LICENSE
3238 MODULE_LICENSE("GPL");
3239 #endif
This page took 0.094975 seconds and 6 git commands to generate.