ea9cbe596a2829b24823255fc01a58faddc4115b
[deliverable/linux.git] / drivers / atm / he.c
1 /*
2
3 he.c
4
5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
7
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
12
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
22 */
23
24 /*
25
26 he.c
27
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
30
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
36
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
40
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44 AUTHORS:
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48 NOTES:
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54 */
55
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
75
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
79
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW /* still confused about this */
82 /* #undef HE_DEBUG */
83
84 #include "he.h"
85 #include "suni.h"
86 #include <linux/atm_he.h>
87
88 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89
90 #ifdef HE_DEBUG
91 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92 #else /* !HE_DEBUG */
93 #define HPRINTK(fmt,args...) do { } while (0)
94 #endif /* HE_DEBUG */
95
96 /* declarations */
97
98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111
112 /* globals */
113
114 static struct he_dev *he_devs;
115 static int disable64;
116 static short nvpibits = -1;
117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16;
119 static int irq_coalesce = 1;
120 static int sdh = 0;
121
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = {
124 CS_HIGH | CLK_HIGH,
125 CS_LOW | CLK_LOW,
126 CLK_HIGH, /* 0 */
127 CLK_LOW,
128 CLK_HIGH, /* 0 */
129 CLK_LOW,
130 CLK_HIGH, /* 0 */
131 CLK_LOW,
132 CLK_HIGH, /* 0 */
133 CLK_LOW,
134 CLK_HIGH, /* 0 */
135 CLK_LOW,
136 CLK_HIGH, /* 0 */
137 CLK_LOW | SI_HIGH,
138 CLK_HIGH | SI_HIGH, /* 1 */
139 CLK_LOW | SI_HIGH,
140 CLK_HIGH | SI_HIGH /* 1 */
141 };
142
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = {
145 CLK_LOW,
146 CLK_HIGH,
147 CLK_LOW,
148 CLK_HIGH,
149 CLK_LOW,
150 CLK_HIGH,
151 CLK_LOW,
152 CLK_HIGH,
153 CLK_LOW,
154 CLK_HIGH,
155 CLK_LOW,
156 CLK_HIGH,
157 CLK_LOW,
158 CLK_HIGH,
159 CLK_LOW,
160 CLK_HIGH,
161 CLK_LOW
162 };
163
164 static struct atmdev_ops he_ops =
165 {
166 .open = he_open,
167 .close = he_close,
168 .ioctl = he_ioctl,
169 .send = he_send,
170 .phy_put = he_phy_put,
171 .phy_get = he_phy_get,
172 .proc_read = he_proc_read,
173 .owner = THIS_MODULE
174 };
175
176 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg) readl((dev)->membase + (reg))
178
179 /* section 2.12 connection memory access */
180
181 static __inline__ void
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 unsigned flags)
184 {
185 he_writel(he_dev, val, CON_DAT);
186 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
187 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189 }
190
191 #define he_writel_rcm(dev, val, reg) \
192 he_writel_internal(dev, val, reg, CON_CTL_RCM)
193
194 #define he_writel_tcm(dev, val, reg) \
195 he_writel_internal(dev, val, reg, CON_CTL_TCM)
196
197 #define he_writel_mbox(dev, val, reg) \
198 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199
200 static unsigned
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202 {
203 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 return he_readl(he_dev, CON_DAT);
206 }
207
208 #define he_readl_rcm(dev, reg) \
209 he_readl_internal(dev, reg, CON_CTL_RCM)
210
211 #define he_readl_tcm(dev, reg) \
212 he_readl_internal(dev, reg, CON_CTL_TCM)
213
214 #define he_readl_mbox(dev, reg) \
215 he_readl_internal(dev, reg, CON_CTL_MBOX)
216
217
218 /* figure 2.2 connection id */
219
220 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
221
222 /* 2.5.1 per connection transmit state registers */
223
224 #define he_writel_tsr0(dev, val, cid) \
225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228
229 #define he_writel_tsr1(dev, val, cid) \
230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231
232 #define he_writel_tsr2(dev, val, cid) \
233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234
235 #define he_writel_tsr3(dev, val, cid) \
236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237
238 #define he_writel_tsr4(dev, val, cid) \
239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240
241 /* from page 2-20
242 *
243 * NOTE While the transmit connection is active, bits 23 through 0
244 * of this register must not be written by the host. Byte
245 * enables should be used during normal operation when writing
246 * the most significant byte.
247 */
248
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 CON_CTL_TCM \
252 | CON_BYTE_DISABLE_2 \
253 | CON_BYTE_DISABLE_1 \
254 | CON_BYTE_DISABLE_0)
255
256 #define he_readl_tsr4(dev, cid) \
257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258
259 #define he_writel_tsr5(dev, val, cid) \
260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261
262 #define he_writel_tsr6(dev, val, cid) \
263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264
265 #define he_writel_tsr7(dev, val, cid) \
266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267
268
269 #define he_writel_tsr8(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271
272 #define he_writel_tsr9(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274
275 #define he_writel_tsr10(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277
278 #define he_writel_tsr11(dev, val, cid) \
279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280
281
282 #define he_writel_tsr12(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284
285 #define he_writel_tsr13(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287
288
289 #define he_writel_tsr14(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 CON_CTL_TCM \
295 | CON_BYTE_DISABLE_2 \
296 | CON_BYTE_DISABLE_1 \
297 | CON_BYTE_DISABLE_0)
298
299 /* 2.7.1 per connection receive state registers */
300
301 #define he_writel_rsr0(dev, val, cid) \
302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305
306 #define he_writel_rsr1(dev, val, cid) \
307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308
309 #define he_writel_rsr2(dev, val, cid) \
310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311
312 #define he_writel_rsr3(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314
315 #define he_writel_rsr4(dev, val, cid) \
316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317
318 #define he_writel_rsr5(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320
321 #define he_writel_rsr6(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323
324 #define he_writel_rsr7(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326
327 static __inline__ struct atm_vcc*
328 __find_vcc(struct he_dev *he_dev, unsigned cid)
329 {
330 struct hlist_head *head;
331 struct atm_vcc *vcc;
332 struct hlist_node *node;
333 struct sock *s;
334 short vpi;
335 int vci;
336
337 vpi = cid >> he_dev->vcibits;
338 vci = cid & ((1 << he_dev->vcibits) - 1);
339 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
340
341 sk_for_each(s, node, head) {
342 vcc = atm_sk(s);
343 if (vcc->dev == he_dev->atm_dev &&
344 vcc->vci == vci && vcc->vpi == vpi &&
345 vcc->qos.rxtp.traffic_class != ATM_NONE) {
346 return vcc;
347 }
348 }
349 return NULL;
350 }
351
352 static int __devinit
353 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
354 {
355 struct atm_dev *atm_dev = NULL;
356 struct he_dev *he_dev = NULL;
357 int err = 0;
358
359 printk(KERN_INFO "ATM he driver\n");
360
361 if (pci_enable_device(pci_dev))
362 return -EIO;
363 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
364 printk(KERN_WARNING "he: no suitable dma available\n");
365 err = -EIO;
366 goto init_one_failure;
367 }
368
369 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
370 if (!atm_dev) {
371 err = -ENODEV;
372 goto init_one_failure;
373 }
374 pci_set_drvdata(pci_dev, atm_dev);
375
376 he_dev = kzalloc(sizeof(struct he_dev),
377 GFP_KERNEL);
378 if (!he_dev) {
379 err = -ENOMEM;
380 goto init_one_failure;
381 }
382 he_dev->pci_dev = pci_dev;
383 he_dev->atm_dev = atm_dev;
384 he_dev->atm_dev->dev_data = he_dev;
385 atm_dev->dev_data = he_dev;
386 he_dev->number = atm_dev->number;
387 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
388 spin_lock_init(&he_dev->global_lock);
389
390 if (he_start(atm_dev)) {
391 he_stop(he_dev);
392 err = -ENODEV;
393 goto init_one_failure;
394 }
395 he_dev->next = NULL;
396 if (he_devs)
397 he_dev->next = he_devs;
398 he_devs = he_dev;
399 return 0;
400
401 init_one_failure:
402 if (atm_dev)
403 atm_dev_deregister(atm_dev);
404 kfree(he_dev);
405 pci_disable_device(pci_dev);
406 return err;
407 }
408
409 static void __devexit
410 he_remove_one (struct pci_dev *pci_dev)
411 {
412 struct atm_dev *atm_dev;
413 struct he_dev *he_dev;
414
415 atm_dev = pci_get_drvdata(pci_dev);
416 he_dev = HE_DEV(atm_dev);
417
418 /* need to remove from he_devs */
419
420 he_stop(he_dev);
421 atm_dev_deregister(atm_dev);
422 kfree(he_dev);
423
424 pci_set_drvdata(pci_dev, NULL);
425 pci_disable_device(pci_dev);
426 }
427
428
429 static unsigned
430 rate_to_atmf(unsigned rate) /* cps to atm forum format */
431 {
432 #define NONZERO (1 << 14)
433
434 unsigned exp = 0;
435
436 if (rate == 0)
437 return 0;
438
439 rate <<= 9;
440 while (rate > 0x3ff) {
441 ++exp;
442 rate >>= 1;
443 }
444
445 return (NONZERO | (exp << 9) | (rate & 0x1ff));
446 }
447
448 static void __devinit
449 he_init_rx_lbfp0(struct he_dev *he_dev)
450 {
451 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
452 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
453 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
454 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
455
456 lbufd_index = 0;
457 lbm_offset = he_readl(he_dev, RCMLBM_BA);
458
459 he_writel(he_dev, lbufd_index, RLBF0_H);
460
461 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
462 lbufd_index += 2;
463 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
464
465 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
466 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
467
468 if (++lbuf_count == lbufs_per_row) {
469 lbuf_count = 0;
470 row_offset += he_dev->bytes_per_row;
471 }
472 lbm_offset += 4;
473 }
474
475 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
476 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
477 }
478
479 static void __devinit
480 he_init_rx_lbfp1(struct he_dev *he_dev)
481 {
482 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
483 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
484 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
485 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
486
487 lbufd_index = 1;
488 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
489
490 he_writel(he_dev, lbufd_index, RLBF1_H);
491
492 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
493 lbufd_index += 2;
494 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
495
496 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
497 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
498
499 if (++lbuf_count == lbufs_per_row) {
500 lbuf_count = 0;
501 row_offset += he_dev->bytes_per_row;
502 }
503 lbm_offset += 4;
504 }
505
506 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
507 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
508 }
509
510 static void __devinit
511 he_init_tx_lbfp(struct he_dev *he_dev)
512 {
513 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
514 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
515 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
516 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
517
518 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
519 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
520
521 he_writel(he_dev, lbufd_index, TLBF_H);
522
523 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
524 lbufd_index += 1;
525 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
526
527 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
528 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
529
530 if (++lbuf_count == lbufs_per_row) {
531 lbuf_count = 0;
532 row_offset += he_dev->bytes_per_row;
533 }
534 lbm_offset += 2;
535 }
536
537 he_writel(he_dev, lbufd_index - 1, TLBF_T);
538 }
539
540 static int __devinit
541 he_init_tpdrq(struct he_dev *he_dev)
542 {
543 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
544 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
545 if (he_dev->tpdrq_base == NULL) {
546 hprintk("failed to alloc tpdrq\n");
547 return -ENOMEM;
548 }
549 memset(he_dev->tpdrq_base, 0,
550 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
551
552 he_dev->tpdrq_tail = he_dev->tpdrq_base;
553 he_dev->tpdrq_head = he_dev->tpdrq_base;
554
555 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
556 he_writel(he_dev, 0, TPDRQ_T);
557 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
558
559 return 0;
560 }
561
562 static void __devinit
563 he_init_cs_block(struct he_dev *he_dev)
564 {
565 unsigned clock, rate, delta;
566 int reg;
567
568 /* 5.1.7 cs block initialization */
569
570 for (reg = 0; reg < 0x20; ++reg)
571 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
572
573 /* rate grid timer reload values */
574
575 clock = he_is622(he_dev) ? 66667000 : 50000000;
576 rate = he_dev->atm_dev->link_rate;
577 delta = rate / 16 / 2;
578
579 for (reg = 0; reg < 0x10; ++reg) {
580 /* 2.4 internal transmit function
581 *
582 * we initialize the first row in the rate grid.
583 * values are period (in clock cycles) of timer
584 */
585 unsigned period = clock / rate;
586
587 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
588 rate -= delta;
589 }
590
591 if (he_is622(he_dev)) {
592 /* table 5.2 (4 cells per lbuf) */
593 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
594 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
595 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
596 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
597 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
598
599 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
600 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
601 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
602 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
603 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
604 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
605 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
606
607 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
608
609 /* table 5.8 */
610 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
611 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
612 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
613 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
614 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
615 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
616
617 /* table 5.9 */
618 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
619 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
620 } else {
621 /* table 5.1 (4 cells per lbuf) */
622 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
623 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
624 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
625 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
626 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
627
628 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
629 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
630 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
631 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
632 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
633 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
634 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
635
636 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
637
638 /* table 5.8 */
639 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
640 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
641 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
642 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
643 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
644 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
645
646 /* table 5.9 */
647 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
648 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
649 }
650
651 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
652
653 for (reg = 0; reg < 0x8; ++reg)
654 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
655
656 }
657
658 static int __devinit
659 he_init_cs_block_rcm(struct he_dev *he_dev)
660 {
661 unsigned (*rategrid)[16][16];
662 unsigned rate, delta;
663 int i, j, reg;
664
665 unsigned rate_atmf, exp, man;
666 unsigned long long rate_cps;
667 int mult, buf, buf_limit = 4;
668
669 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
670 if (!rategrid)
671 return -ENOMEM;
672
673 /* initialize rate grid group table */
674
675 for (reg = 0x0; reg < 0xff; ++reg)
676 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
677
678 /* initialize rate controller groups */
679
680 for (reg = 0x100; reg < 0x1ff; ++reg)
681 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
682
683 /* initialize tNrm lookup table */
684
685 /* the manual makes reference to a routine in a sample driver
686 for proper configuration; fortunately, we only need this
687 in order to support abr connection */
688
689 /* initialize rate to group table */
690
691 rate = he_dev->atm_dev->link_rate;
692 delta = rate / 32;
693
694 /*
695 * 2.4 transmit internal functions
696 *
697 * we construct a copy of the rate grid used by the scheduler
698 * in order to construct the rate to group table below
699 */
700
701 for (j = 0; j < 16; j++) {
702 (*rategrid)[0][j] = rate;
703 rate -= delta;
704 }
705
706 for (i = 1; i < 16; i++)
707 for (j = 0; j < 16; j++)
708 if (i > 14)
709 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
710 else
711 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
712
713 /*
714 * 2.4 transmit internal function
715 *
716 * this table maps the upper 5 bits of exponent and mantissa
717 * of the atm forum representation of the rate into an index
718 * on rate grid
719 */
720
721 rate_atmf = 0;
722 while (rate_atmf < 0x400) {
723 man = (rate_atmf & 0x1f) << 4;
724 exp = rate_atmf >> 5;
725
726 /*
727 instead of '/ 512', use '>> 9' to prevent a call
728 to divdu3 on x86 platforms
729 */
730 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
731
732 if (rate_cps < 10)
733 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
734
735 for (i = 255; i > 0; i--)
736 if ((*rategrid)[i/16][i%16] >= rate_cps)
737 break; /* pick nearest rate instead? */
738
739 /*
740 * each table entry is 16 bits: (rate grid index (8 bits)
741 * and a buffer limit (8 bits)
742 * there are two table entries in each 32-bit register
743 */
744
745 #ifdef notdef
746 buf = rate_cps * he_dev->tx_numbuffs /
747 (he_dev->atm_dev->link_rate * 2);
748 #else
749 /* this is pretty, but avoids _divdu3 and is mostly correct */
750 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
751 if (rate_cps > (272 * mult))
752 buf = 4;
753 else if (rate_cps > (204 * mult))
754 buf = 3;
755 else if (rate_cps > (136 * mult))
756 buf = 2;
757 else if (rate_cps > (68 * mult))
758 buf = 1;
759 else
760 buf = 0;
761 #endif
762 if (buf > buf_limit)
763 buf = buf_limit;
764 reg = (reg << 16) | ((i << 8) | buf);
765
766 #define RTGTBL_OFFSET 0x400
767
768 if (rate_atmf & 0x1)
769 he_writel_rcm(he_dev, reg,
770 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
771
772 ++rate_atmf;
773 }
774
775 kfree(rategrid);
776 return 0;
777 }
778
779 static int __devinit
780 he_init_group(struct he_dev *he_dev, int group)
781 {
782 struct he_buff *heb, *next;
783 dma_addr_t mapping;
784 int i;
785
786 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
787 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
788 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
789 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
790 G0_RBPS_BS + (group * 32));
791
792 /* bitmap table */
793 he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
794 * sizeof(unsigned long), GFP_KERNEL);
795 if (!he_dev->rbpl_table) {
796 hprintk("unable to allocate rbpl bitmap table\n");
797 return -ENOMEM;
798 }
799 bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
800
801 /* rbpl_virt 64-bit pointers */
802 he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
803 * sizeof(struct he_buff *), GFP_KERNEL);
804 if (!he_dev->rbpl_virt) {
805 hprintk("unable to allocate rbpl virt table\n");
806 goto out_free_rbpl_table;
807 }
808
809 /* large buffer pool */
810 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
811 CONFIG_RBPL_BUFSIZE, 64, 0);
812 if (he_dev->rbpl_pool == NULL) {
813 hprintk("unable to create rbpl pool\n");
814 goto out_free_rbpl_virt;
815 }
816
817 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
818 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
819 if (he_dev->rbpl_base == NULL) {
820 hprintk("failed to alloc rbpl_base\n");
821 goto out_destroy_rbpl_pool;
822 }
823 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
824
825 INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
826
827 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
828
829 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
830 if (!heb)
831 goto out_free_rbpl;
832 heb->mapping = mapping;
833 list_add(&heb->entry, &he_dev->rbpl_outstanding);
834
835 set_bit(i, he_dev->rbpl_table);
836 he_dev->rbpl_virt[i] = heb;
837 he_dev->rbpl_hint = i + 1;
838 he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
839 he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
840 }
841 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
842
843 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
844 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
845 G0_RBPL_T + (group * 32));
846 he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
847 G0_RBPL_BS + (group * 32));
848 he_writel(he_dev,
849 RBP_THRESH(CONFIG_RBPL_THRESH) |
850 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
851 RBP_INT_ENB,
852 G0_RBPL_QI + (group * 32));
853
854 /* rx buffer ready queue */
855
856 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
857 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
858 if (he_dev->rbrq_base == NULL) {
859 hprintk("failed to allocate rbrq\n");
860 goto out_free_rbpl;
861 }
862 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
863
864 he_dev->rbrq_head = he_dev->rbrq_base;
865 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
866 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
867 he_writel(he_dev,
868 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
869 G0_RBRQ_Q + (group * 16));
870 if (irq_coalesce) {
871 hprintk("coalescing interrupts\n");
872 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
873 G0_RBRQ_I + (group * 16));
874 } else
875 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
876 G0_RBRQ_I + (group * 16));
877
878 /* tx buffer ready queue */
879
880 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
881 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
882 if (he_dev->tbrq_base == NULL) {
883 hprintk("failed to allocate tbrq\n");
884 goto out_free_rbpq_base;
885 }
886 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
887
888 he_dev->tbrq_head = he_dev->tbrq_base;
889
890 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
891 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
892 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
893 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
894
895 return 0;
896
897 out_free_rbpq_base:
898 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
899 sizeof(struct he_rbrq), he_dev->rbrq_base,
900 he_dev->rbrq_phys);
901 out_free_rbpl:
902 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
903 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
904
905 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
906 sizeof(struct he_rbp), he_dev->rbpl_base,
907 he_dev->rbpl_phys);
908 out_destroy_rbpl_pool:
909 pci_pool_destroy(he_dev->rbpl_pool);
910 out_free_rbpl_virt:
911 kfree(he_dev->rbpl_virt);
912 out_free_rbpl_table:
913 kfree(he_dev->rbpl_table);
914
915 return -ENOMEM;
916 }
917
918 static int __devinit
919 he_init_irq(struct he_dev *he_dev)
920 {
921 int i;
922
923 /* 2.9.3.5 tail offset for each interrupt queue is located after the
924 end of the interrupt queue */
925
926 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
927 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
928 if (he_dev->irq_base == NULL) {
929 hprintk("failed to allocate irq\n");
930 return -ENOMEM;
931 }
932 he_dev->irq_tailoffset = (unsigned *)
933 &he_dev->irq_base[CONFIG_IRQ_SIZE];
934 *he_dev->irq_tailoffset = 0;
935 he_dev->irq_head = he_dev->irq_base;
936 he_dev->irq_tail = he_dev->irq_base;
937
938 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
939 he_dev->irq_base[i].isw = ITYPE_INVALID;
940
941 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
942 he_writel(he_dev,
943 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
944 IRQ0_HEAD);
945 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
946 he_writel(he_dev, 0x0, IRQ0_DATA);
947
948 he_writel(he_dev, 0x0, IRQ1_BASE);
949 he_writel(he_dev, 0x0, IRQ1_HEAD);
950 he_writel(he_dev, 0x0, IRQ1_CNTL);
951 he_writel(he_dev, 0x0, IRQ1_DATA);
952
953 he_writel(he_dev, 0x0, IRQ2_BASE);
954 he_writel(he_dev, 0x0, IRQ2_HEAD);
955 he_writel(he_dev, 0x0, IRQ2_CNTL);
956 he_writel(he_dev, 0x0, IRQ2_DATA);
957
958 he_writel(he_dev, 0x0, IRQ3_BASE);
959 he_writel(he_dev, 0x0, IRQ3_HEAD);
960 he_writel(he_dev, 0x0, IRQ3_CNTL);
961 he_writel(he_dev, 0x0, IRQ3_DATA);
962
963 /* 2.9.3.2 interrupt queue mapping registers */
964
965 he_writel(he_dev, 0x0, GRP_10_MAP);
966 he_writel(he_dev, 0x0, GRP_32_MAP);
967 he_writel(he_dev, 0x0, GRP_54_MAP);
968 he_writel(he_dev, 0x0, GRP_76_MAP);
969
970 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
971 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
972 return -EINVAL;
973 }
974
975 he_dev->irq = he_dev->pci_dev->irq;
976
977 return 0;
978 }
979
980 static int __devinit
981 he_start(struct atm_dev *dev)
982 {
983 struct he_dev *he_dev;
984 struct pci_dev *pci_dev;
985 unsigned long membase;
986
987 u16 command;
988 u32 gen_cntl_0, host_cntl, lb_swap;
989 u8 cache_size, timer;
990
991 unsigned err;
992 unsigned int status, reg;
993 int i, group;
994
995 he_dev = HE_DEV(dev);
996 pci_dev = he_dev->pci_dev;
997
998 membase = pci_resource_start(pci_dev, 0);
999 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1000
1001 /*
1002 * pci bus controller initialization
1003 */
1004
1005 /* 4.3 pci bus controller-specific initialization */
1006 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1007 hprintk("can't read GEN_CNTL_0\n");
1008 return -EINVAL;
1009 }
1010 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1011 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1012 hprintk("can't write GEN_CNTL_0.\n");
1013 return -EINVAL;
1014 }
1015
1016 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1017 hprintk("can't read PCI_COMMAND.\n");
1018 return -EINVAL;
1019 }
1020
1021 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1022 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1023 hprintk("can't enable memory.\n");
1024 return -EINVAL;
1025 }
1026
1027 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1028 hprintk("can't read cache line size?\n");
1029 return -EINVAL;
1030 }
1031
1032 if (cache_size < 16) {
1033 cache_size = 16;
1034 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1035 hprintk("can't set cache line size to %d\n", cache_size);
1036 }
1037
1038 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1039 hprintk("can't read latency timer?\n");
1040 return -EINVAL;
1041 }
1042
1043 /* from table 3.9
1044 *
1045 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1046 *
1047 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1048 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1049 *
1050 */
1051 #define LAT_TIMER 209
1052 if (timer < LAT_TIMER) {
1053 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1054 timer = LAT_TIMER;
1055 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1056 hprintk("can't set latency timer to %d\n", timer);
1057 }
1058
1059 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1060 hprintk("can't set up page mapping\n");
1061 return -EINVAL;
1062 }
1063
1064 /* 4.4 card reset */
1065 he_writel(he_dev, 0x0, RESET_CNTL);
1066 he_writel(he_dev, 0xff, RESET_CNTL);
1067
1068 udelay(16*1000); /* 16 ms */
1069 status = he_readl(he_dev, RESET_CNTL);
1070 if ((status & BOARD_RST_STATUS) == 0) {
1071 hprintk("reset failed\n");
1072 return -EINVAL;
1073 }
1074
1075 /* 4.5 set bus width */
1076 host_cntl = he_readl(he_dev, HOST_CNTL);
1077 if (host_cntl & PCI_BUS_SIZE64)
1078 gen_cntl_0 |= ENBL_64;
1079 else
1080 gen_cntl_0 &= ~ENBL_64;
1081
1082 if (disable64 == 1) {
1083 hprintk("disabling 64-bit pci bus transfers\n");
1084 gen_cntl_0 &= ~ENBL_64;
1085 }
1086
1087 if (gen_cntl_0 & ENBL_64)
1088 hprintk("64-bit transfers enabled\n");
1089
1090 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1091
1092 /* 4.7 read prom contents */
1093 for (i = 0; i < PROD_ID_LEN; ++i)
1094 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1095
1096 he_dev->media = read_prom_byte(he_dev, MEDIA);
1097
1098 for (i = 0; i < 6; ++i)
1099 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1100
1101 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1102 he_dev->prod_id,
1103 he_dev->media & 0x40 ? "SM" : "MM",
1104 dev->esi[0],
1105 dev->esi[1],
1106 dev->esi[2],
1107 dev->esi[3],
1108 dev->esi[4],
1109 dev->esi[5]);
1110 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1111 ATM_OC12_PCR : ATM_OC3_PCR;
1112
1113 /* 4.6 set host endianess */
1114 lb_swap = he_readl(he_dev, LB_SWAP);
1115 if (he_is622(he_dev))
1116 lb_swap &= ~XFER_SIZE; /* 4 cells */
1117 else
1118 lb_swap |= XFER_SIZE; /* 8 cells */
1119 #ifdef __BIG_ENDIAN
1120 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1121 #else
1122 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1123 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1124 #endif /* __BIG_ENDIAN */
1125 he_writel(he_dev, lb_swap, LB_SWAP);
1126
1127 /* 4.8 sdram controller initialization */
1128 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1129
1130 /* 4.9 initialize rnum value */
1131 lb_swap |= SWAP_RNUM_MAX(0xf);
1132 he_writel(he_dev, lb_swap, LB_SWAP);
1133
1134 /* 4.10 initialize the interrupt queues */
1135 if ((err = he_init_irq(he_dev)) != 0)
1136 return err;
1137
1138 /* 4.11 enable pci bus controller state machines */
1139 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1140 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1141 he_writel(he_dev, host_cntl, HOST_CNTL);
1142
1143 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1144 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1145
1146 /*
1147 * atm network controller initialization
1148 */
1149
1150 /* 5.1.1 generic configuration state */
1151
1152 /*
1153 * local (cell) buffer memory map
1154 *
1155 * HE155 HE622
1156 *
1157 * 0 ____________1023 bytes 0 _______________________2047 bytes
1158 * | | | | |
1159 * | utility | | rx0 | |
1160 * 5|____________| 255|___________________| u |
1161 * 6| | 256| | t |
1162 * | | | | i |
1163 * | rx0 | row | tx | l |
1164 * | | | | i |
1165 * | | 767|___________________| t |
1166 * 517|____________| 768| | y |
1167 * row 518| | | rx1 | |
1168 * | | 1023|___________________|___|
1169 * | |
1170 * | tx |
1171 * | |
1172 * | |
1173 * 1535|____________|
1174 * 1536| |
1175 * | rx1 |
1176 * 2047|____________|
1177 *
1178 */
1179
1180 /* total 4096 connections */
1181 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1182 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1183
1184 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1185 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1186 return -ENODEV;
1187 }
1188
1189 if (nvpibits != -1) {
1190 he_dev->vpibits = nvpibits;
1191 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1192 }
1193
1194 if (nvcibits != -1) {
1195 he_dev->vcibits = nvcibits;
1196 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1197 }
1198
1199
1200 if (he_is622(he_dev)) {
1201 he_dev->cells_per_row = 40;
1202 he_dev->bytes_per_row = 2048;
1203 he_dev->r0_numrows = 256;
1204 he_dev->tx_numrows = 512;
1205 he_dev->r1_numrows = 256;
1206 he_dev->r0_startrow = 0;
1207 he_dev->tx_startrow = 256;
1208 he_dev->r1_startrow = 768;
1209 } else {
1210 he_dev->cells_per_row = 20;
1211 he_dev->bytes_per_row = 1024;
1212 he_dev->r0_numrows = 512;
1213 he_dev->tx_numrows = 1018;
1214 he_dev->r1_numrows = 512;
1215 he_dev->r0_startrow = 6;
1216 he_dev->tx_startrow = 518;
1217 he_dev->r1_startrow = 1536;
1218 }
1219
1220 he_dev->cells_per_lbuf = 4;
1221 he_dev->buffer_limit = 4;
1222 he_dev->r0_numbuffs = he_dev->r0_numrows *
1223 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1224 if (he_dev->r0_numbuffs > 2560)
1225 he_dev->r0_numbuffs = 2560;
1226
1227 he_dev->r1_numbuffs = he_dev->r1_numrows *
1228 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1229 if (he_dev->r1_numbuffs > 2560)
1230 he_dev->r1_numbuffs = 2560;
1231
1232 he_dev->tx_numbuffs = he_dev->tx_numrows *
1233 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1234 if (he_dev->tx_numbuffs > 5120)
1235 he_dev->tx_numbuffs = 5120;
1236
1237 /* 5.1.2 configure hardware dependent registers */
1238
1239 he_writel(he_dev,
1240 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1241 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1242 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1243 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1244 LBARB);
1245
1246 he_writel(he_dev, BANK_ON |
1247 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1248 SDRAMCON);
1249
1250 he_writel(he_dev,
1251 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1252 RM_RW_WAIT(1), RCMCONFIG);
1253 he_writel(he_dev,
1254 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1255 TM_RW_WAIT(1), TCMCONFIG);
1256
1257 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1258
1259 he_writel(he_dev,
1260 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1261 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1262 RX_VALVP(he_dev->vpibits) |
1263 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1264
1265 he_writel(he_dev, DRF_THRESH(0x20) |
1266 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1267 TX_VCI_MASK(he_dev->vcibits) |
1268 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1269
1270 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1271
1272 he_writel(he_dev, PHY_INT_ENB |
1273 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1274 RH_CONFIG);
1275
1276 /* 5.1.3 initialize connection memory */
1277
1278 for (i = 0; i < TCM_MEM_SIZE; ++i)
1279 he_writel_tcm(he_dev, 0, i);
1280
1281 for (i = 0; i < RCM_MEM_SIZE; ++i)
1282 he_writel_rcm(he_dev, 0, i);
1283
1284 /*
1285 * transmit connection memory map
1286 *
1287 * tx memory
1288 * 0x0 ___________________
1289 * | |
1290 * | |
1291 * | TSRa |
1292 * | |
1293 * | |
1294 * 0x8000|___________________|
1295 * | |
1296 * | TSRb |
1297 * 0xc000|___________________|
1298 * | |
1299 * | TSRc |
1300 * 0xe000|___________________|
1301 * | TSRd |
1302 * 0xf000|___________________|
1303 * | tmABR |
1304 * 0x10000|___________________|
1305 * | |
1306 * | tmTPD |
1307 * |___________________|
1308 * | |
1309 * ....
1310 * 0x1ffff|___________________|
1311 *
1312 *
1313 */
1314
1315 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1316 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1317 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1318 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1319 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1320
1321
1322 /*
1323 * receive connection memory map
1324 *
1325 * 0x0 ___________________
1326 * | |
1327 * | |
1328 * | RSRa |
1329 * | |
1330 * | |
1331 * 0x8000|___________________|
1332 * | |
1333 * | rx0/1 |
1334 * | LBM | link lists of local
1335 * | tx | buffer memory
1336 * | |
1337 * 0xd000|___________________|
1338 * | |
1339 * | rmABR |
1340 * 0xe000|___________________|
1341 * | |
1342 * | RSRb |
1343 * |___________________|
1344 * | |
1345 * ....
1346 * 0xffff|___________________|
1347 */
1348
1349 he_writel(he_dev, 0x08000, RCMLBM_BA);
1350 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1351 he_writel(he_dev, 0x0d800, RCMABR_BA);
1352
1353 /* 5.1.4 initialize local buffer free pools linked lists */
1354
1355 he_init_rx_lbfp0(he_dev);
1356 he_init_rx_lbfp1(he_dev);
1357
1358 he_writel(he_dev, 0x0, RLBC_H);
1359 he_writel(he_dev, 0x0, RLBC_T);
1360 he_writel(he_dev, 0x0, RLBC_H2);
1361
1362 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1363 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1364
1365 he_init_tx_lbfp(he_dev);
1366
1367 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1368
1369 /* 5.1.5 initialize intermediate receive queues */
1370
1371 if (he_is622(he_dev)) {
1372 he_writel(he_dev, 0x000f, G0_INMQ_S);
1373 he_writel(he_dev, 0x200f, G0_INMQ_L);
1374
1375 he_writel(he_dev, 0x001f, G1_INMQ_S);
1376 he_writel(he_dev, 0x201f, G1_INMQ_L);
1377
1378 he_writel(he_dev, 0x002f, G2_INMQ_S);
1379 he_writel(he_dev, 0x202f, G2_INMQ_L);
1380
1381 he_writel(he_dev, 0x003f, G3_INMQ_S);
1382 he_writel(he_dev, 0x203f, G3_INMQ_L);
1383
1384 he_writel(he_dev, 0x004f, G4_INMQ_S);
1385 he_writel(he_dev, 0x204f, G4_INMQ_L);
1386
1387 he_writel(he_dev, 0x005f, G5_INMQ_S);
1388 he_writel(he_dev, 0x205f, G5_INMQ_L);
1389
1390 he_writel(he_dev, 0x006f, G6_INMQ_S);
1391 he_writel(he_dev, 0x206f, G6_INMQ_L);
1392
1393 he_writel(he_dev, 0x007f, G7_INMQ_S);
1394 he_writel(he_dev, 0x207f, G7_INMQ_L);
1395 } else {
1396 he_writel(he_dev, 0x0000, G0_INMQ_S);
1397 he_writel(he_dev, 0x0008, G0_INMQ_L);
1398
1399 he_writel(he_dev, 0x0001, G1_INMQ_S);
1400 he_writel(he_dev, 0x0009, G1_INMQ_L);
1401
1402 he_writel(he_dev, 0x0002, G2_INMQ_S);
1403 he_writel(he_dev, 0x000a, G2_INMQ_L);
1404
1405 he_writel(he_dev, 0x0003, G3_INMQ_S);
1406 he_writel(he_dev, 0x000b, G3_INMQ_L);
1407
1408 he_writel(he_dev, 0x0004, G4_INMQ_S);
1409 he_writel(he_dev, 0x000c, G4_INMQ_L);
1410
1411 he_writel(he_dev, 0x0005, G5_INMQ_S);
1412 he_writel(he_dev, 0x000d, G5_INMQ_L);
1413
1414 he_writel(he_dev, 0x0006, G6_INMQ_S);
1415 he_writel(he_dev, 0x000e, G6_INMQ_L);
1416
1417 he_writel(he_dev, 0x0007, G7_INMQ_S);
1418 he_writel(he_dev, 0x000f, G7_INMQ_L);
1419 }
1420
1421 /* 5.1.6 application tunable parameters */
1422
1423 he_writel(he_dev, 0x0, MCC);
1424 he_writel(he_dev, 0x0, OEC);
1425 he_writel(he_dev, 0x0, DCC);
1426 he_writel(he_dev, 0x0, CEC);
1427
1428 /* 5.1.7 cs block initialization */
1429
1430 he_init_cs_block(he_dev);
1431
1432 /* 5.1.8 cs block connection memory initialization */
1433
1434 if (he_init_cs_block_rcm(he_dev) < 0)
1435 return -ENOMEM;
1436
1437 /* 5.1.10 initialize host structures */
1438
1439 he_init_tpdrq(he_dev);
1440
1441 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1442 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1443 if (he_dev->tpd_pool == NULL) {
1444 hprintk("unable to create tpd pci_pool\n");
1445 return -ENOMEM;
1446 }
1447
1448 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1449
1450 if (he_init_group(he_dev, 0) != 0)
1451 return -ENOMEM;
1452
1453 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1454 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1455 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1456 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1457 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1458 G0_RBPS_BS + (group * 32));
1459
1460 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1461 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1462 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1463 G0_RBPL_QI + (group * 32));
1464 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1465
1466 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1467 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1468 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1469 G0_RBRQ_Q + (group * 16));
1470 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1471
1472 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1473 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1474 he_writel(he_dev, TBRQ_THRESH(0x1),
1475 G0_TBRQ_THRESH + (group * 16));
1476 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1477 }
1478
1479 /* host status page */
1480
1481 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1482 sizeof(struct he_hsp), &he_dev->hsp_phys);
1483 if (he_dev->hsp == NULL) {
1484 hprintk("failed to allocate host status page\n");
1485 return -ENOMEM;
1486 }
1487 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1488 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1489
1490 /* initialize framer */
1491
1492 #ifdef CONFIG_ATM_HE_USE_SUNI
1493 if (he_isMM(he_dev))
1494 suni_init(he_dev->atm_dev);
1495 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1496 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1497 #endif /* CONFIG_ATM_HE_USE_SUNI */
1498
1499 if (sdh) {
1500 /* this really should be in suni.c but for now... */
1501 int val;
1502
1503 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1504 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1505 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1506 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1507 }
1508
1509 /* 5.1.12 enable transmit and receive */
1510
1511 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1512 reg |= TX_ENABLE|ER_ENABLE;
1513 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1514
1515 reg = he_readl(he_dev, RC_CONFIG);
1516 reg |= RX_ENABLE;
1517 he_writel(he_dev, reg, RC_CONFIG);
1518
1519 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1520 he_dev->cs_stper[i].inuse = 0;
1521 he_dev->cs_stper[i].pcr = -1;
1522 }
1523 he_dev->total_bw = 0;
1524
1525
1526 /* atm linux initialization */
1527
1528 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1529 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1530
1531 he_dev->irq_peak = 0;
1532 he_dev->rbrq_peak = 0;
1533 he_dev->rbpl_peak = 0;
1534 he_dev->tbrq_peak = 0;
1535
1536 HPRINTK("hell bent for leather!\n");
1537
1538 return 0;
1539 }
1540
1541 static void
1542 he_stop(struct he_dev *he_dev)
1543 {
1544 struct he_buff *heb, *next;
1545 struct pci_dev *pci_dev;
1546 u32 gen_cntl_0, reg;
1547 u16 command;
1548
1549 pci_dev = he_dev->pci_dev;
1550
1551 /* disable interrupts */
1552
1553 if (he_dev->membase) {
1554 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1555 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1556 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1557
1558 tasklet_disable(&he_dev->tasklet);
1559
1560 /* disable recv and transmit */
1561
1562 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1563 reg &= ~(TX_ENABLE|ER_ENABLE);
1564 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1565
1566 reg = he_readl(he_dev, RC_CONFIG);
1567 reg &= ~(RX_ENABLE);
1568 he_writel(he_dev, reg, RC_CONFIG);
1569 }
1570
1571 #ifdef CONFIG_ATM_HE_USE_SUNI
1572 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1573 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1574 #endif /* CONFIG_ATM_HE_USE_SUNI */
1575
1576 if (he_dev->irq)
1577 free_irq(he_dev->irq, he_dev);
1578
1579 if (he_dev->irq_base)
1580 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1581 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1582
1583 if (he_dev->hsp)
1584 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1585 he_dev->hsp, he_dev->hsp_phys);
1586
1587 if (he_dev->rbpl_base) {
1588 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1589 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1590
1591 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1592 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1593 }
1594
1595 kfree(he_dev->rbpl_virt);
1596 kfree(he_dev->rbpl_table);
1597
1598 if (he_dev->rbpl_pool)
1599 pci_pool_destroy(he_dev->rbpl_pool);
1600
1601 if (he_dev->rbrq_base)
1602 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1603 he_dev->rbrq_base, he_dev->rbrq_phys);
1604
1605 if (he_dev->tbrq_base)
1606 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1607 he_dev->tbrq_base, he_dev->tbrq_phys);
1608
1609 if (he_dev->tpdrq_base)
1610 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1611 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1612
1613 if (he_dev->tpd_pool)
1614 pci_pool_destroy(he_dev->tpd_pool);
1615
1616 if (he_dev->pci_dev) {
1617 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1618 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1619 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1620 }
1621
1622 if (he_dev->membase)
1623 iounmap(he_dev->membase);
1624 }
1625
1626 static struct he_tpd *
1627 __alloc_tpd(struct he_dev *he_dev)
1628 {
1629 struct he_tpd *tpd;
1630 dma_addr_t mapping;
1631
1632 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1633 if (tpd == NULL)
1634 return NULL;
1635
1636 tpd->status = TPD_ADDR(mapping);
1637 tpd->reserved = 0;
1638 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1639 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1640 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1641
1642 return tpd;
1643 }
1644
1645 #define AAL5_LEN(buf,len) \
1646 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1647 (((unsigned char *)(buf))[(len)-5]))
1648
1649 /* 2.10.1.2 receive
1650 *
1651 * aal5 packets can optionally return the tcp checksum in the lower
1652 * 16 bits of the crc (RSR0_TCP_CKSUM)
1653 */
1654
1655 #define TCP_CKSUM(buf,len) \
1656 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1657 (((unsigned char *)(buf))[(len-1)]))
1658
1659 static int
1660 he_service_rbrq(struct he_dev *he_dev, int group)
1661 {
1662 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1663 ((unsigned long)he_dev->rbrq_base |
1664 he_dev->hsp->group[group].rbrq_tail);
1665 unsigned cid, lastcid = -1;
1666 struct sk_buff *skb;
1667 struct atm_vcc *vcc = NULL;
1668 struct he_vcc *he_vcc;
1669 struct he_buff *heb, *next;
1670 int i;
1671 int pdus_assembled = 0;
1672 int updated = 0;
1673
1674 read_lock(&vcc_sklist_lock);
1675 while (he_dev->rbrq_head != rbrq_tail) {
1676 ++updated;
1677
1678 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1679 he_dev->rbrq_head, group,
1680 RBRQ_ADDR(he_dev->rbrq_head),
1681 RBRQ_BUFLEN(he_dev->rbrq_head),
1682 RBRQ_CID(he_dev->rbrq_head),
1683 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1684 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1685 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1686 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1687 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1688 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1689
1690 i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1691 heb = he_dev->rbpl_virt[i];
1692
1693 cid = RBRQ_CID(he_dev->rbrq_head);
1694 if (cid != lastcid)
1695 vcc = __find_vcc(he_dev, cid);
1696 lastcid = cid;
1697
1698 if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1699 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1700 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1701 clear_bit(i, he_dev->rbpl_table);
1702 list_del(&heb->entry);
1703 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1704 }
1705
1706 goto next_rbrq_entry;
1707 }
1708
1709 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1710 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1711 atomic_inc(&vcc->stats->rx_drop);
1712 goto return_host_buffers;
1713 }
1714
1715 heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1716 clear_bit(i, he_dev->rbpl_table);
1717 list_move_tail(&heb->entry, &he_vcc->buffers);
1718 he_vcc->pdu_len += heb->len;
1719
1720 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1721 lastcid = -1;
1722 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1723 wake_up(&he_vcc->rx_waitq);
1724 goto return_host_buffers;
1725 }
1726
1727 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1728 goto next_rbrq_entry;
1729
1730 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1731 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1732 HPRINTK("%s%s (%d.%d)\n",
1733 RBRQ_CRC_ERR(he_dev->rbrq_head)
1734 ? "CRC_ERR " : "",
1735 RBRQ_LEN_ERR(he_dev->rbrq_head)
1736 ? "LEN_ERR" : "",
1737 vcc->vpi, vcc->vci);
1738 atomic_inc(&vcc->stats->rx_err);
1739 goto return_host_buffers;
1740 }
1741
1742 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1743 GFP_ATOMIC);
1744 if (!skb) {
1745 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1746 goto return_host_buffers;
1747 }
1748
1749 if (rx_skb_reserve > 0)
1750 skb_reserve(skb, rx_skb_reserve);
1751
1752 __net_timestamp(skb);
1753
1754 list_for_each_entry(heb, &he_vcc->buffers, entry)
1755 memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1756
1757 switch (vcc->qos.aal) {
1758 case ATM_AAL0:
1759 /* 2.10.1.5 raw cell receive */
1760 skb->len = ATM_AAL0_SDU;
1761 skb_set_tail_pointer(skb, skb->len);
1762 break;
1763 case ATM_AAL5:
1764 /* 2.10.1.2 aal5 receive */
1765
1766 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1767 skb_set_tail_pointer(skb, skb->len);
1768 #ifdef USE_CHECKSUM_HW
1769 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1770 skb->ip_summed = CHECKSUM_COMPLETE;
1771 skb->csum = TCP_CKSUM(skb->data,
1772 he_vcc->pdu_len);
1773 }
1774 #endif
1775 break;
1776 }
1777
1778 #ifdef should_never_happen
1779 if (skb->len > vcc->qos.rxtp.max_sdu)
1780 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1781 #endif
1782
1783 #ifdef notdef
1784 ATM_SKB(skb)->vcc = vcc;
1785 #endif
1786 spin_unlock(&he_dev->global_lock);
1787 vcc->push(vcc, skb);
1788 spin_lock(&he_dev->global_lock);
1789
1790 atomic_inc(&vcc->stats->rx);
1791
1792 return_host_buffers:
1793 ++pdus_assembled;
1794
1795 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1796 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1797 INIT_LIST_HEAD(&he_vcc->buffers);
1798 he_vcc->pdu_len = 0;
1799
1800 next_rbrq_entry:
1801 he_dev->rbrq_head = (struct he_rbrq *)
1802 ((unsigned long) he_dev->rbrq_base |
1803 RBRQ_MASK(++he_dev->rbrq_head));
1804
1805 }
1806 read_unlock(&vcc_sklist_lock);
1807
1808 if (updated) {
1809 if (updated > he_dev->rbrq_peak)
1810 he_dev->rbrq_peak = updated;
1811
1812 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1813 G0_RBRQ_H + (group * 16));
1814 }
1815
1816 return pdus_assembled;
1817 }
1818
1819 static void
1820 he_service_tbrq(struct he_dev *he_dev, int group)
1821 {
1822 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1823 ((unsigned long)he_dev->tbrq_base |
1824 he_dev->hsp->group[group].tbrq_tail);
1825 struct he_tpd *tpd;
1826 int slot, updated = 0;
1827 struct he_tpd *__tpd;
1828
1829 /* 2.1.6 transmit buffer return queue */
1830
1831 while (he_dev->tbrq_head != tbrq_tail) {
1832 ++updated;
1833
1834 HPRINTK("tbrq%d 0x%x%s%s\n",
1835 group,
1836 TBRQ_TPD(he_dev->tbrq_head),
1837 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1838 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1839 tpd = NULL;
1840 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1841 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1842 tpd = __tpd;
1843 list_del(&__tpd->entry);
1844 break;
1845 }
1846 }
1847
1848 if (tpd == NULL) {
1849 hprintk("unable to locate tpd for dma buffer %x\n",
1850 TBRQ_TPD(he_dev->tbrq_head));
1851 goto next_tbrq_entry;
1852 }
1853
1854 if (TBRQ_EOS(he_dev->tbrq_head)) {
1855 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1856 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1857 if (tpd->vcc)
1858 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1859
1860 goto next_tbrq_entry;
1861 }
1862
1863 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1864 if (tpd->iovec[slot].addr)
1865 pci_unmap_single(he_dev->pci_dev,
1866 tpd->iovec[slot].addr,
1867 tpd->iovec[slot].len & TPD_LEN_MASK,
1868 PCI_DMA_TODEVICE);
1869 if (tpd->iovec[slot].len & TPD_LST)
1870 break;
1871
1872 }
1873
1874 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1875 if (tpd->vcc && tpd->vcc->pop)
1876 tpd->vcc->pop(tpd->vcc, tpd->skb);
1877 else
1878 dev_kfree_skb_any(tpd->skb);
1879 }
1880
1881 next_tbrq_entry:
1882 if (tpd)
1883 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1884 he_dev->tbrq_head = (struct he_tbrq *)
1885 ((unsigned long) he_dev->tbrq_base |
1886 TBRQ_MASK(++he_dev->tbrq_head));
1887 }
1888
1889 if (updated) {
1890 if (updated > he_dev->tbrq_peak)
1891 he_dev->tbrq_peak = updated;
1892
1893 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1894 G0_TBRQ_H + (group * 16));
1895 }
1896 }
1897
1898 static void
1899 he_service_rbpl(struct he_dev *he_dev, int group)
1900 {
1901 struct he_rbp *new_tail;
1902 struct he_rbp *rbpl_head;
1903 struct he_buff *heb;
1904 dma_addr_t mapping;
1905 int i;
1906 int moved = 0;
1907
1908 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1909 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1910
1911 for (;;) {
1912 new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1913 RBPL_MASK(he_dev->rbpl_tail+1));
1914
1915 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1916 if (new_tail == rbpl_head)
1917 break;
1918
1919 i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1920 if (i > (RBPL_TABLE_SIZE - 1)) {
1921 i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1922 if (i > (RBPL_TABLE_SIZE - 1))
1923 break;
1924 }
1925 he_dev->rbpl_hint = i + 1;
1926
1927 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1928 if (!heb)
1929 break;
1930 heb->mapping = mapping;
1931 list_add(&heb->entry, &he_dev->rbpl_outstanding);
1932 he_dev->rbpl_virt[i] = heb;
1933 set_bit(i, he_dev->rbpl_table);
1934 new_tail->idx = i << RBP_IDX_OFFSET;
1935 new_tail->phys = mapping + offsetof(struct he_buff, data);
1936
1937 he_dev->rbpl_tail = new_tail;
1938 ++moved;
1939 }
1940
1941 if (moved)
1942 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1943 }
1944
1945 static void
1946 he_tasklet(unsigned long data)
1947 {
1948 unsigned long flags;
1949 struct he_dev *he_dev = (struct he_dev *) data;
1950 int group, type;
1951 int updated = 0;
1952
1953 HPRINTK("tasklet (0x%lx)\n", data);
1954 spin_lock_irqsave(&he_dev->global_lock, flags);
1955
1956 while (he_dev->irq_head != he_dev->irq_tail) {
1957 ++updated;
1958
1959 type = ITYPE_TYPE(he_dev->irq_head->isw);
1960 group = ITYPE_GROUP(he_dev->irq_head->isw);
1961
1962 switch (type) {
1963 case ITYPE_RBRQ_THRESH:
1964 HPRINTK("rbrq%d threshold\n", group);
1965 /* fall through */
1966 case ITYPE_RBRQ_TIMER:
1967 if (he_service_rbrq(he_dev, group))
1968 he_service_rbpl(he_dev, group);
1969 break;
1970 case ITYPE_TBRQ_THRESH:
1971 HPRINTK("tbrq%d threshold\n", group);
1972 /* fall through */
1973 case ITYPE_TPD_COMPLETE:
1974 he_service_tbrq(he_dev, group);
1975 break;
1976 case ITYPE_RBPL_THRESH:
1977 he_service_rbpl(he_dev, group);
1978 break;
1979 case ITYPE_RBPS_THRESH:
1980 /* shouldn't happen unless small buffers enabled */
1981 break;
1982 case ITYPE_PHY:
1983 HPRINTK("phy interrupt\n");
1984 #ifdef CONFIG_ATM_HE_USE_SUNI
1985 spin_unlock_irqrestore(&he_dev->global_lock, flags);
1986 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1987 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1988 spin_lock_irqsave(&he_dev->global_lock, flags);
1989 #endif
1990 break;
1991 case ITYPE_OTHER:
1992 switch (type|group) {
1993 case ITYPE_PARITY:
1994 hprintk("parity error\n");
1995 break;
1996 case ITYPE_ABORT:
1997 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1998 break;
1999 }
2000 break;
2001 case ITYPE_TYPE(ITYPE_INVALID):
2002 /* see 8.1.1 -- check all queues */
2003
2004 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2005
2006 he_service_rbrq(he_dev, 0);
2007 he_service_rbpl(he_dev, 0);
2008 he_service_tbrq(he_dev, 0);
2009 break;
2010 default:
2011 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2012 }
2013
2014 he_dev->irq_head->isw = ITYPE_INVALID;
2015
2016 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2017 }
2018
2019 if (updated) {
2020 if (updated > he_dev->irq_peak)
2021 he_dev->irq_peak = updated;
2022
2023 he_writel(he_dev,
2024 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2025 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2026 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2027 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2028 }
2029 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2030 }
2031
2032 static irqreturn_t
2033 he_irq_handler(int irq, void *dev_id)
2034 {
2035 unsigned long flags;
2036 struct he_dev *he_dev = (struct he_dev * )dev_id;
2037 int handled = 0;
2038
2039 if (he_dev == NULL)
2040 return IRQ_NONE;
2041
2042 spin_lock_irqsave(&he_dev->global_lock, flags);
2043
2044 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2045 (*he_dev->irq_tailoffset << 2));
2046
2047 if (he_dev->irq_tail == he_dev->irq_head) {
2048 HPRINTK("tailoffset not updated?\n");
2049 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2050 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2051 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2052 }
2053
2054 #ifdef DEBUG
2055 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2056 hprintk("spurious (or shared) interrupt?\n");
2057 #endif
2058
2059 if (he_dev->irq_head != he_dev->irq_tail) {
2060 handled = 1;
2061 tasklet_schedule(&he_dev->tasklet);
2062 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2063 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2064 }
2065 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2066 return IRQ_RETVAL(handled);
2067
2068 }
2069
2070 static __inline__ void
2071 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2072 {
2073 struct he_tpdrq *new_tail;
2074
2075 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2076 tpd, cid, he_dev->tpdrq_tail);
2077
2078 /* new_tail = he_dev->tpdrq_tail; */
2079 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2080 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2081
2082 /*
2083 * check to see if we are about to set the tail == head
2084 * if true, update the head pointer from the adapter
2085 * to see if this is really the case (reading the queue
2086 * head for every enqueue would be unnecessarily slow)
2087 */
2088
2089 if (new_tail == he_dev->tpdrq_head) {
2090 he_dev->tpdrq_head = (struct he_tpdrq *)
2091 (((unsigned long)he_dev->tpdrq_base) |
2092 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2093
2094 if (new_tail == he_dev->tpdrq_head) {
2095 int slot;
2096
2097 hprintk("tpdrq full (cid 0x%x)\n", cid);
2098 /*
2099 * FIXME
2100 * push tpd onto a transmit backlog queue
2101 * after service_tbrq, service the backlog
2102 * for now, we just drop the pdu
2103 */
2104 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2105 if (tpd->iovec[slot].addr)
2106 pci_unmap_single(he_dev->pci_dev,
2107 tpd->iovec[slot].addr,
2108 tpd->iovec[slot].len & TPD_LEN_MASK,
2109 PCI_DMA_TODEVICE);
2110 }
2111 if (tpd->skb) {
2112 if (tpd->vcc->pop)
2113 tpd->vcc->pop(tpd->vcc, tpd->skb);
2114 else
2115 dev_kfree_skb_any(tpd->skb);
2116 atomic_inc(&tpd->vcc->stats->tx_err);
2117 }
2118 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2119 return;
2120 }
2121 }
2122
2123 /* 2.1.5 transmit packet descriptor ready queue */
2124 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2125 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2126 he_dev->tpdrq_tail->cid = cid;
2127 wmb();
2128
2129 he_dev->tpdrq_tail = new_tail;
2130
2131 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2132 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2133 }
2134
2135 static int
2136 he_open(struct atm_vcc *vcc)
2137 {
2138 unsigned long flags;
2139 struct he_dev *he_dev = HE_DEV(vcc->dev);
2140 struct he_vcc *he_vcc;
2141 int err = 0;
2142 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2143 short vpi = vcc->vpi;
2144 int vci = vcc->vci;
2145
2146 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2147 return 0;
2148
2149 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2150
2151 set_bit(ATM_VF_ADDR, &vcc->flags);
2152
2153 cid = he_mkcid(he_dev, vpi, vci);
2154
2155 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2156 if (he_vcc == NULL) {
2157 hprintk("unable to allocate he_vcc during open\n");
2158 return -ENOMEM;
2159 }
2160
2161 INIT_LIST_HEAD(&he_vcc->buffers);
2162 he_vcc->pdu_len = 0;
2163 he_vcc->rc_index = -1;
2164
2165 init_waitqueue_head(&he_vcc->rx_waitq);
2166 init_waitqueue_head(&he_vcc->tx_waitq);
2167
2168 vcc->dev_data = he_vcc;
2169
2170 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2171 int pcr_goal;
2172
2173 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2174 if (pcr_goal == 0)
2175 pcr_goal = he_dev->atm_dev->link_rate;
2176 if (pcr_goal < 0) /* means round down, technically */
2177 pcr_goal = -pcr_goal;
2178
2179 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2180
2181 switch (vcc->qos.aal) {
2182 case ATM_AAL5:
2183 tsr0_aal = TSR0_AAL5;
2184 tsr4 = TSR4_AAL5;
2185 break;
2186 case ATM_AAL0:
2187 tsr0_aal = TSR0_AAL0_SDU;
2188 tsr4 = TSR4_AAL0_SDU;
2189 break;
2190 default:
2191 err = -EINVAL;
2192 goto open_failed;
2193 }
2194
2195 spin_lock_irqsave(&he_dev->global_lock, flags);
2196 tsr0 = he_readl_tsr0(he_dev, cid);
2197 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2198
2199 if (TSR0_CONN_STATE(tsr0) != 0) {
2200 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2201 err = -EBUSY;
2202 goto open_failed;
2203 }
2204
2205 switch (vcc->qos.txtp.traffic_class) {
2206 case ATM_UBR:
2207 /* 2.3.3.1 open connection ubr */
2208
2209 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2210 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2211 break;
2212
2213 case ATM_CBR:
2214 /* 2.3.3.2 open connection cbr */
2215
2216 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2217 if ((he_dev->total_bw + pcr_goal)
2218 > (he_dev->atm_dev->link_rate * 9 / 10))
2219 {
2220 err = -EBUSY;
2221 goto open_failed;
2222 }
2223
2224 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2225
2226 /* find an unused cs_stper register */
2227 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2228 if (he_dev->cs_stper[reg].inuse == 0 ||
2229 he_dev->cs_stper[reg].pcr == pcr_goal)
2230 break;
2231
2232 if (reg == HE_NUM_CS_STPER) {
2233 err = -EBUSY;
2234 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2235 goto open_failed;
2236 }
2237
2238 he_dev->total_bw += pcr_goal;
2239
2240 he_vcc->rc_index = reg;
2241 ++he_dev->cs_stper[reg].inuse;
2242 he_dev->cs_stper[reg].pcr = pcr_goal;
2243
2244 clock = he_is622(he_dev) ? 66667000 : 50000000;
2245 period = clock / pcr_goal;
2246
2247 HPRINTK("rc_index = %d period = %d\n",
2248 reg, period);
2249
2250 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2251 CS_STPER0 + reg);
2252 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2253
2254 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2255 TSR0_RC_INDEX(reg);
2256
2257 break;
2258 default:
2259 err = -EINVAL;
2260 goto open_failed;
2261 }
2262
2263 spin_lock_irqsave(&he_dev->global_lock, flags);
2264
2265 he_writel_tsr0(he_dev, tsr0, cid);
2266 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2267 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2268 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2269 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2270 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2271
2272 he_writel_tsr3(he_dev, 0x0, cid);
2273 he_writel_tsr5(he_dev, 0x0, cid);
2274 he_writel_tsr6(he_dev, 0x0, cid);
2275 he_writel_tsr7(he_dev, 0x0, cid);
2276 he_writel_tsr8(he_dev, 0x0, cid);
2277 he_writel_tsr10(he_dev, 0x0, cid);
2278 he_writel_tsr11(he_dev, 0x0, cid);
2279 he_writel_tsr12(he_dev, 0x0, cid);
2280 he_writel_tsr13(he_dev, 0x0, cid);
2281 he_writel_tsr14(he_dev, 0x0, cid);
2282 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2283 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2284 }
2285
2286 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2287 unsigned aal;
2288
2289 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2290 &HE_VCC(vcc)->rx_waitq);
2291
2292 switch (vcc->qos.aal) {
2293 case ATM_AAL5:
2294 aal = RSR0_AAL5;
2295 break;
2296 case ATM_AAL0:
2297 aal = RSR0_RAWCELL;
2298 break;
2299 default:
2300 err = -EINVAL;
2301 goto open_failed;
2302 }
2303
2304 spin_lock_irqsave(&he_dev->global_lock, flags);
2305
2306 rsr0 = he_readl_rsr0(he_dev, cid);
2307 if (rsr0 & RSR0_OPEN_CONN) {
2308 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2309
2310 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2311 err = -EBUSY;
2312 goto open_failed;
2313 }
2314
2315 rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2316 rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2317 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2318 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2319
2320 #ifdef USE_CHECKSUM_HW
2321 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2322 rsr0 |= RSR0_TCP_CKSUM;
2323 #endif
2324
2325 he_writel_rsr4(he_dev, rsr4, cid);
2326 he_writel_rsr1(he_dev, rsr1, cid);
2327 /* 5.1.11 last parameter initialized should be
2328 the open/closed indication in rsr0 */
2329 he_writel_rsr0(he_dev,
2330 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2331 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2332
2333 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2334 }
2335
2336 open_failed:
2337
2338 if (err) {
2339 kfree(he_vcc);
2340 clear_bit(ATM_VF_ADDR, &vcc->flags);
2341 }
2342 else
2343 set_bit(ATM_VF_READY, &vcc->flags);
2344
2345 return err;
2346 }
2347
2348 static void
2349 he_close(struct atm_vcc *vcc)
2350 {
2351 unsigned long flags;
2352 DECLARE_WAITQUEUE(wait, current);
2353 struct he_dev *he_dev = HE_DEV(vcc->dev);
2354 struct he_tpd *tpd;
2355 unsigned cid;
2356 struct he_vcc *he_vcc = HE_VCC(vcc);
2357 #define MAX_RETRY 30
2358 int retry = 0, sleep = 1, tx_inuse;
2359
2360 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2361
2362 clear_bit(ATM_VF_READY, &vcc->flags);
2363 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2364
2365 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2366 int timeout;
2367
2368 HPRINTK("close rx cid 0x%x\n", cid);
2369
2370 /* 2.7.2.2 close receive operation */
2371
2372 /* wait for previous close (if any) to finish */
2373
2374 spin_lock_irqsave(&he_dev->global_lock, flags);
2375 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2376 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2377 udelay(250);
2378 }
2379
2380 set_current_state(TASK_UNINTERRUPTIBLE);
2381 add_wait_queue(&he_vcc->rx_waitq, &wait);
2382
2383 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2384 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2385 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2386 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2387
2388 timeout = schedule_timeout(30*HZ);
2389
2390 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2391 set_current_state(TASK_RUNNING);
2392
2393 if (timeout == 0)
2394 hprintk("close rx timeout cid 0x%x\n", cid);
2395
2396 HPRINTK("close rx cid 0x%x complete\n", cid);
2397
2398 }
2399
2400 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2401 volatile unsigned tsr4, tsr0;
2402 int timeout;
2403
2404 HPRINTK("close tx cid 0x%x\n", cid);
2405
2406 /* 2.1.2
2407 *
2408 * ... the host must first stop queueing packets to the TPDRQ
2409 * on the connection to be closed, then wait for all outstanding
2410 * packets to be transmitted and their buffers returned to the
2411 * TBRQ. When the last packet on the connection arrives in the
2412 * TBRQ, the host issues the close command to the adapter.
2413 */
2414
2415 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2416 (retry < MAX_RETRY)) {
2417 msleep(sleep);
2418 if (sleep < 250)
2419 sleep = sleep * 2;
2420
2421 ++retry;
2422 }
2423
2424 if (tx_inuse > 1)
2425 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2426
2427 /* 2.3.1.1 generic close operations with flush */
2428
2429 spin_lock_irqsave(&he_dev->global_lock, flags);
2430 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2431 /* also clears TSR4_SESSION_ENDED */
2432
2433 switch (vcc->qos.txtp.traffic_class) {
2434 case ATM_UBR:
2435 he_writel_tsr1(he_dev,
2436 TSR1_MCR(rate_to_atmf(200000))
2437 | TSR1_PCR(0), cid);
2438 break;
2439 case ATM_CBR:
2440 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2441 break;
2442 }
2443 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2444
2445 tpd = __alloc_tpd(he_dev);
2446 if (tpd == NULL) {
2447 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2448 goto close_tx_incomplete;
2449 }
2450 tpd->status |= TPD_EOS | TPD_INT;
2451 tpd->skb = NULL;
2452 tpd->vcc = vcc;
2453 wmb();
2454
2455 set_current_state(TASK_UNINTERRUPTIBLE);
2456 add_wait_queue(&he_vcc->tx_waitq, &wait);
2457 __enqueue_tpd(he_dev, tpd, cid);
2458 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2459
2460 timeout = schedule_timeout(30*HZ);
2461
2462 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2463 set_current_state(TASK_RUNNING);
2464
2465 spin_lock_irqsave(&he_dev->global_lock, flags);
2466
2467 if (timeout == 0) {
2468 hprintk("close tx timeout cid 0x%x\n", cid);
2469 goto close_tx_incomplete;
2470 }
2471
2472 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2473 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2474 udelay(250);
2475 }
2476
2477 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2478 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2479 udelay(250);
2480 }
2481
2482 close_tx_incomplete:
2483
2484 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2485 int reg = he_vcc->rc_index;
2486
2487 HPRINTK("cs_stper reg = %d\n", reg);
2488
2489 if (he_dev->cs_stper[reg].inuse == 0)
2490 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2491 else
2492 --he_dev->cs_stper[reg].inuse;
2493
2494 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2495 }
2496 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2497
2498 HPRINTK("close tx cid 0x%x complete\n", cid);
2499 }
2500
2501 kfree(he_vcc);
2502
2503 clear_bit(ATM_VF_ADDR, &vcc->flags);
2504 }
2505
2506 static int
2507 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2508 {
2509 unsigned long flags;
2510 struct he_dev *he_dev = HE_DEV(vcc->dev);
2511 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2512 struct he_tpd *tpd;
2513 #ifdef USE_SCATTERGATHER
2514 int i, slot = 0;
2515 #endif
2516
2517 #define HE_TPD_BUFSIZE 0xffff
2518
2519 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2520
2521 if ((skb->len > HE_TPD_BUFSIZE) ||
2522 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2523 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2524 if (vcc->pop)
2525 vcc->pop(vcc, skb);
2526 else
2527 dev_kfree_skb_any(skb);
2528 atomic_inc(&vcc->stats->tx_err);
2529 return -EINVAL;
2530 }
2531
2532 #ifndef USE_SCATTERGATHER
2533 if (skb_shinfo(skb)->nr_frags) {
2534 hprintk("no scatter/gather support\n");
2535 if (vcc->pop)
2536 vcc->pop(vcc, skb);
2537 else
2538 dev_kfree_skb_any(skb);
2539 atomic_inc(&vcc->stats->tx_err);
2540 return -EINVAL;
2541 }
2542 #endif
2543 spin_lock_irqsave(&he_dev->global_lock, flags);
2544
2545 tpd = __alloc_tpd(he_dev);
2546 if (tpd == NULL) {
2547 if (vcc->pop)
2548 vcc->pop(vcc, skb);
2549 else
2550 dev_kfree_skb_any(skb);
2551 atomic_inc(&vcc->stats->tx_err);
2552 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2553 return -ENOMEM;
2554 }
2555
2556 if (vcc->qos.aal == ATM_AAL5)
2557 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2558 else {
2559 char *pti_clp = (void *) (skb->data + 3);
2560 int clp, pti;
2561
2562 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2563 clp = (*pti_clp & ATM_HDR_CLP);
2564 tpd->status |= TPD_CELLTYPE(pti);
2565 if (clp)
2566 tpd->status |= TPD_CLP;
2567
2568 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2569 }
2570
2571 #ifdef USE_SCATTERGATHER
2572 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2573 skb_headlen(skb), PCI_DMA_TODEVICE);
2574 tpd->iovec[slot].len = skb_headlen(skb);
2575 ++slot;
2576
2577 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2578 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2579
2580 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2581 tpd->vcc = vcc;
2582 tpd->skb = NULL; /* not the last fragment
2583 so dont ->push() yet */
2584 wmb();
2585
2586 __enqueue_tpd(he_dev, tpd, cid);
2587 tpd = __alloc_tpd(he_dev);
2588 if (tpd == NULL) {
2589 if (vcc->pop)
2590 vcc->pop(vcc, skb);
2591 else
2592 dev_kfree_skb_any(skb);
2593 atomic_inc(&vcc->stats->tx_err);
2594 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2595 return -ENOMEM;
2596 }
2597 tpd->status |= TPD_USERCELL;
2598 slot = 0;
2599 }
2600
2601 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2602 (void *) page_address(frag->page) + frag->page_offset,
2603 frag->size, PCI_DMA_TODEVICE);
2604 tpd->iovec[slot].len = frag->size;
2605 ++slot;
2606
2607 }
2608
2609 tpd->iovec[slot - 1].len |= TPD_LST;
2610 #else
2611 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2612 tpd->length0 = skb->len | TPD_LST;
2613 #endif
2614 tpd->status |= TPD_INT;
2615
2616 tpd->vcc = vcc;
2617 tpd->skb = skb;
2618 wmb();
2619 ATM_SKB(skb)->vcc = vcc;
2620
2621 __enqueue_tpd(he_dev, tpd, cid);
2622 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2623
2624 atomic_inc(&vcc->stats->tx);
2625
2626 return 0;
2627 }
2628
2629 static int
2630 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2631 {
2632 unsigned long flags;
2633 struct he_dev *he_dev = HE_DEV(atm_dev);
2634 struct he_ioctl_reg reg;
2635 int err = 0;
2636
2637 switch (cmd) {
2638 case HE_GET_REG:
2639 if (!capable(CAP_NET_ADMIN))
2640 return -EPERM;
2641
2642 if (copy_from_user(&reg, arg,
2643 sizeof(struct he_ioctl_reg)))
2644 return -EFAULT;
2645
2646 spin_lock_irqsave(&he_dev->global_lock, flags);
2647 switch (reg.type) {
2648 case HE_REGTYPE_PCI:
2649 if (reg.addr >= HE_REGMAP_SIZE) {
2650 err = -EINVAL;
2651 break;
2652 }
2653
2654 reg.val = he_readl(he_dev, reg.addr);
2655 break;
2656 case HE_REGTYPE_RCM:
2657 reg.val =
2658 he_readl_rcm(he_dev, reg.addr);
2659 break;
2660 case HE_REGTYPE_TCM:
2661 reg.val =
2662 he_readl_tcm(he_dev, reg.addr);
2663 break;
2664 case HE_REGTYPE_MBOX:
2665 reg.val =
2666 he_readl_mbox(he_dev, reg.addr);
2667 break;
2668 default:
2669 err = -EINVAL;
2670 break;
2671 }
2672 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2673 if (err == 0)
2674 if (copy_to_user(arg, &reg,
2675 sizeof(struct he_ioctl_reg)))
2676 return -EFAULT;
2677 break;
2678 default:
2679 #ifdef CONFIG_ATM_HE_USE_SUNI
2680 if (atm_dev->phy && atm_dev->phy->ioctl)
2681 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2682 #else /* CONFIG_ATM_HE_USE_SUNI */
2683 err = -EINVAL;
2684 #endif /* CONFIG_ATM_HE_USE_SUNI */
2685 break;
2686 }
2687
2688 return err;
2689 }
2690
2691 static void
2692 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2693 {
2694 unsigned long flags;
2695 struct he_dev *he_dev = HE_DEV(atm_dev);
2696
2697 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2698
2699 spin_lock_irqsave(&he_dev->global_lock, flags);
2700 he_writel(he_dev, val, FRAMER + (addr*4));
2701 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2702 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2703 }
2704
2705
2706 static unsigned char
2707 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2708 {
2709 unsigned long flags;
2710 struct he_dev *he_dev = HE_DEV(atm_dev);
2711 unsigned reg;
2712
2713 spin_lock_irqsave(&he_dev->global_lock, flags);
2714 reg = he_readl(he_dev, FRAMER + (addr*4));
2715 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2716
2717 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2718 return reg;
2719 }
2720
2721 static int
2722 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2723 {
2724 unsigned long flags;
2725 struct he_dev *he_dev = HE_DEV(dev);
2726 int left, i;
2727 #ifdef notdef
2728 struct he_rbrq *rbrq_tail;
2729 struct he_tpdrq *tpdrq_head;
2730 int rbpl_head, rbpl_tail;
2731 #endif
2732 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2733
2734
2735 left = *pos;
2736 if (!left--)
2737 return sprintf(page, "ATM he driver\n");
2738
2739 if (!left--)
2740 return sprintf(page, "%s%s\n\n",
2741 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2742
2743 if (!left--)
2744 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2745
2746 spin_lock_irqsave(&he_dev->global_lock, flags);
2747 mcc += he_readl(he_dev, MCC);
2748 oec += he_readl(he_dev, OEC);
2749 dcc += he_readl(he_dev, DCC);
2750 cec += he_readl(he_dev, CEC);
2751 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2752
2753 if (!left--)
2754 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2755 mcc, oec, dcc, cec);
2756
2757 if (!left--)
2758 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2759 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2760
2761 if (!left--)
2762 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2763 CONFIG_TPDRQ_SIZE);
2764
2765 if (!left--)
2766 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2767 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2768
2769 if (!left--)
2770 return sprintf(page, "tbrq_size = %d peak = %d\n",
2771 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2772
2773
2774 #ifdef notdef
2775 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2776 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2777
2778 inuse = rbpl_head - rbpl_tail;
2779 if (inuse < 0)
2780 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2781 inuse /= sizeof(struct he_rbp);
2782
2783 if (!left--)
2784 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2785 CONFIG_RBPL_SIZE, inuse);
2786 #endif
2787
2788 if (!left--)
2789 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2790
2791 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2792 if (!left--)
2793 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2794 he_dev->cs_stper[i].pcr,
2795 he_dev->cs_stper[i].inuse);
2796
2797 if (!left--)
2798 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2799 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2800
2801 return 0;
2802 }
2803
2804 /* eeprom routines -- see 4.7 */
2805
2806 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2807 {
2808 u32 val = 0, tmp_read = 0;
2809 int i, j = 0;
2810 u8 byte_read = 0;
2811
2812 val = readl(he_dev->membase + HOST_CNTL);
2813 val &= 0xFFFFE0FF;
2814
2815 /* Turn on write enable */
2816 val |= 0x800;
2817 he_writel(he_dev, val, HOST_CNTL);
2818
2819 /* Send READ instruction */
2820 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2821 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2822 udelay(EEPROM_DELAY);
2823 }
2824
2825 /* Next, we need to send the byte address to read from */
2826 for (i = 7; i >= 0; i--) {
2827 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2828 udelay(EEPROM_DELAY);
2829 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2830 udelay(EEPROM_DELAY);
2831 }
2832
2833 j = 0;
2834
2835 val &= 0xFFFFF7FF; /* Turn off write enable */
2836 he_writel(he_dev, val, HOST_CNTL);
2837
2838 /* Now, we can read data from the EEPROM by clocking it in */
2839 for (i = 7; i >= 0; i--) {
2840 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2841 udelay(EEPROM_DELAY);
2842 tmp_read = he_readl(he_dev, HOST_CNTL);
2843 byte_read |= (unsigned char)
2844 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2845 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2846 udelay(EEPROM_DELAY);
2847 }
2848
2849 he_writel(he_dev, val | ID_CS, HOST_CNTL);
2850 udelay(EEPROM_DELAY);
2851
2852 return byte_read;
2853 }
2854
2855 MODULE_LICENSE("GPL");
2856 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2857 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2858 module_param(disable64, bool, 0);
2859 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2860 module_param(nvpibits, short, 0);
2861 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2862 module_param(nvcibits, short, 0);
2863 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2864 module_param(rx_skb_reserve, short, 0);
2865 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2866 module_param(irq_coalesce, bool, 0);
2867 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2868 module_param(sdh, bool, 0);
2869 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2870
2871 static struct pci_device_id he_pci_tbl[] = {
2872 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
2873 0, 0, 0 },
2874 { 0, }
2875 };
2876
2877 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2878
2879 static struct pci_driver he_driver = {
2880 .name = "he",
2881 .probe = he_init_one,
2882 .remove = __devexit_p(he_remove_one),
2883 .id_table = he_pci_tbl,
2884 };
2885
2886 static int __init he_init(void)
2887 {
2888 return pci_register_driver(&he_driver);
2889 }
2890
2891 static void __exit he_cleanup(void)
2892 {
2893 pci_unregister_driver(&he_driver);
2894 }
2895
2896 module_init(he_init);
2897 module_exit(he_cleanup);
This page took 0.085144 seconds and 4 git commands to generate.