Merge branch 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelv...
[deliverable/linux.git] / drivers / isdn / hisax / hfc_pci.c
1 /* $Id: hfc_pci.c,v 1.48.2.4 2004/02/11 13:21:33 keil Exp $
2 *
3 * low level driver for CCD's hfc-pci based cards
4 *
5 * Author Werner Cornelius
6 * based on existing driver for CCD hfc ISA cards
7 * Copyright by Werner Cornelius <werner@isdn4linux.de>
8 * by Karsten Keil <keil@isdn4linux.de>
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
12 *
13 * For changes and modifications please read
14 * Documentation/isdn/HiSax.cert
15 *
16 */
17
18 #include <linux/init.h>
19 #include "hisax.h"
20 #include "hfc_pci.h"
21 #include "isdnl1.h"
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24
25 static const char *hfcpci_revision = "$Revision: 1.48.2.4 $";
26
27 /* table entry in the PCI devices list */
28 typedef struct {
29 int vendor_id;
30 int device_id;
31 char *vendor_name;
32 char *card_name;
33 } PCI_ENTRY;
34
35 #define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */
36 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
37 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
38
39 static const PCI_ENTRY id_list[] =
40 {
41 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, "CCD/Billion/Asuscom", "2BD0"},
42 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, "Billion", "B000"},
43 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, "Billion", "B006"},
44 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, "Billion", "B007"},
45 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, "Billion", "B008"},
46 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, "Billion", "B009"},
47 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, "Billion", "B00A"},
48 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, "Billion", "B00B"},
49 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, "Billion", "B00C"},
50 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, "Seyeon", "B100"},
51 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, "Primux II S0", "B700"},
52 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, "Primux II S0 NT", "B701"},
53 {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, "Abocom/Magitek", "2BD1"},
54 {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, "Asuscom/Askey", "675"},
55 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, "German telekom", "T-Concept"},
56 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, "German telekom", "A1T"},
57 {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, "Motorola MC145575", "MC145575"},
58 {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, "Zoltrix", "2BD0"},
59 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E,"Digi International", "Digi DataFire Micro V IOM2 (Europe)"},
60 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"},
61 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"},
62 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"},
63 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"},
64 {0, 0, NULL, NULL},
65 };
66
67
68 /******************************************/
69 /* free hardware resources used by driver */
70 /******************************************/
71 static void
72 release_io_hfcpci(struct IsdnCardState *cs)
73 {
74 printk(KERN_INFO "HiSax: release hfcpci at %p\n",
75 cs->hw.hfcpci.pci_io);
76 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
77 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
78 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
79 mdelay(10);
80 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
81 mdelay(10);
82 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
83 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, 0); /* disable memory mapped ports + busmaster */
84 del_timer(&cs->hw.hfcpci.timer);
85 pci_free_consistent(cs->hw.hfcpci.dev, 0x8000,
86 cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma);
87 cs->hw.hfcpci.fifos = NULL;
88 iounmap((void *)cs->hw.hfcpci.pci_io);
89 }
90
91 /********************************************************************************/
92 /* function called to reset the HFC PCI chip. A complete software reset of chip */
93 /* and fifos is done. */
94 /********************************************************************************/
95 static void
96 reset_hfcpci(struct IsdnCardState *cs)
97 {
98 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
99 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
100 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
101
102 printk(KERN_INFO "HFC_PCI: resetting card\n");
103 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO + PCI_ENA_MASTER); /* enable memory ports + busmaster */
104 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
105 mdelay(10);
106 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
107 mdelay(10);
108 if (Read_hfc(cs, HFCPCI_STATUS) & 2)
109 printk(KERN_WARNING "HFC-PCI init bit busy\n");
110
111 cs->hw.hfcpci.fifo_en = 0x30; /* only D fifos enabled */
112 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
113
114 cs->hw.hfcpci.trm = 0 + HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
115 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
116
117 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_TE); /* ST-Bit delay for TE-Mode */
118 cs->hw.hfcpci.sctrl_e = HFCPCI_AUTO_AWAKE;
119 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e); /* S/T Auto awake */
120 cs->hw.hfcpci.bswapped = 0; /* no exchange */
121 cs->hw.hfcpci.nt_mode = 0; /* we are in TE mode */
122 cs->hw.hfcpci.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
123 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
124
125 cs->hw.hfcpci.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
126 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
127 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
128
129 /* Clear already pending ints */
130 if (Read_hfc(cs, HFCPCI_INT_S1));
131
132 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 2); /* HFC ST 2 */
133 udelay(10);
134 Write_hfc(cs, HFCPCI_STATES, 2); /* HFC ST 2 */
135 cs->hw.hfcpci.mst_m = HFCPCI_MASTER; /* HFC Master Mode */
136
137 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
138 cs->hw.hfcpci.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
139 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
140 cs->hw.hfcpci.sctrl_r = 0;
141 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
142
143 /* Init GCI/IOM2 in master mode */
144 /* Slots 0 and 1 are set for B-chan 1 and 2 */
145 /* D- and monitor/CI channel are not enabled */
146 /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */
147 /* STIO2 is used as data input, B1+B2 from IOM->ST */
148 /* ST B-channel send disabled -> continous 1s */
149 /* The IOM slots are always enabled */
150 cs->hw.hfcpci.conn = 0x36; /* set data flow directions */
151 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
152 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */
153 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */
154 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */
155 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */
156
157 /* Finally enable IRQ output */
158 cs->hw.hfcpci.int_m2 = HFCPCI_IRQ_ENABLE;
159 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
160 if (Read_hfc(cs, HFCPCI_INT_S1));
161 }
162
163 /***************************************************/
164 /* Timer function called when kernel timer expires */
165 /***************************************************/
166 static void
167 hfcpci_Timer(struct IsdnCardState *cs)
168 {
169 cs->hw.hfcpci.timer.expires = jiffies + 75;
170 /* WD RESET */
171 /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80);
172 add_timer(&cs->hw.hfcpci.timer);
173 */
174 }
175
176
177 /*********************************/
178 /* schedule a new D-channel task */
179 /*********************************/
180 static void
181 sched_event_D_pci(struct IsdnCardState *cs, int event)
182 {
183 test_and_set_bit(event, &cs->event);
184 schedule_work(&cs->tqueue);
185 }
186
187 /*********************************/
188 /* schedule a new b_channel task */
189 /*********************************/
190 static void
191 hfcpci_sched_event(struct BCState *bcs, int event)
192 {
193 test_and_set_bit(event, &bcs->event);
194 schedule_work(&bcs->tqueue);
195 }
196
197 /************************************************/
198 /* select a b-channel entry matching and active */
199 /************************************************/
200 static
201 struct BCState *
202 Sel_BCS(struct IsdnCardState *cs, int channel)
203 {
204 if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
205 return (&cs->bcs[0]);
206 else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
207 return (&cs->bcs[1]);
208 else
209 return (NULL);
210 }
211
212 /***************************************/
213 /* clear the desired B-channel rx fifo */
214 /***************************************/
215 static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
216 { u_char fifo_state;
217 bzfifo_type *bzr;
218
219 if (fifo) {
220 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
221 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2RX;
222 } else {
223 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
224 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1RX;
225 }
226 if (fifo_state)
227 cs->hw.hfcpci.fifo_en ^= fifo_state;
228 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
229 cs->hw.hfcpci.last_bfifo_cnt[fifo] = 0;
230 bzr->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
231 bzr->za[MAX_B_FRAMES].z2 = bzr->za[MAX_B_FRAMES].z1;
232 bzr->f1 = MAX_B_FRAMES;
233 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
234 if (fifo_state)
235 cs->hw.hfcpci.fifo_en |= fifo_state;
236 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
237 }
238
239 /***************************************/
240 /* clear the desired B-channel tx fifo */
241 /***************************************/
242 static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
243 { u_char fifo_state;
244 bzfifo_type *bzt;
245
246 if (fifo) {
247 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
248 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2TX;
249 } else {
250 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
251 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1TX;
252 }
253 if (fifo_state)
254 cs->hw.hfcpci.fifo_en ^= fifo_state;
255 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
256 bzt->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
257 bzt->za[MAX_B_FRAMES].z2 = bzt->za[MAX_B_FRAMES].z1;
258 bzt->f1 = MAX_B_FRAMES;
259 bzt->f2 = bzt->f1; /* init F pointers to remain constant */
260 if (fifo_state)
261 cs->hw.hfcpci.fifo_en |= fifo_state;
262 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
263 }
264
265 /*********************************************/
266 /* read a complete B-frame out of the buffer */
267 /*********************************************/
268 static struct sk_buff
269 *
270 hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type * bz, u_char * bdata, int count)
271 {
272 u_char *ptr, *ptr1, new_f2;
273 struct sk_buff *skb;
274 struct IsdnCardState *cs = bcs->cs;
275 int total, maxlen, new_z2;
276 z_type *zp;
277
278 if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
279 debugl1(cs, "hfcpci_empty_fifo");
280 zp = &bz->za[bz->f2]; /* point to Z-Regs */
281 new_z2 = zp->z2 + count; /* new position in fifo */
282 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
283 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
284 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
285 if ((count > HSCX_BUFMAX + 3) || (count < 4) ||
286 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
287 if (cs->debug & L1_DEB_WARN)
288 debugl1(cs, "hfcpci_empty_fifo: incoming packet invalid length %d or crc", count);
289 #ifdef ERROR_STATISTIC
290 bcs->err_inv++;
291 #endif
292 bz->za[new_f2].z2 = new_z2;
293 bz->f2 = new_f2; /* next buffer */
294 skb = NULL;
295 } else if (!(skb = dev_alloc_skb(count - 3)))
296 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
297 else {
298 total = count;
299 count -= 3;
300 ptr = skb_put(skb, count);
301
302 if (zp->z2 + count <= B_FIFO_SIZE + B_SUB_VAL)
303 maxlen = count; /* complete transfer */
304 else
305 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
306
307 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
308 memcpy(ptr, ptr1, maxlen); /* copy data */
309 count -= maxlen;
310
311 if (count) { /* rest remaining */
312 ptr += maxlen;
313 ptr1 = bdata; /* start of buffer */
314 memcpy(ptr, ptr1, count); /* rest */
315 }
316 bz->za[new_f2].z2 = new_z2;
317 bz->f2 = new_f2; /* next buffer */
318
319 }
320 return (skb);
321 }
322
323 /*******************************/
324 /* D-channel receive procedure */
325 /*******************************/
326 static
327 int
328 receive_dmsg(struct IsdnCardState *cs)
329 {
330 struct sk_buff *skb;
331 int maxlen;
332 int rcnt, total;
333 int count = 5;
334 u_char *ptr, *ptr1;
335 dfifo_type *df;
336 z_type *zp;
337
338 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_rx;
339 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
340 debugl1(cs, "rec_dmsg blocked");
341 return (1);
342 }
343 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
344 zp = &df->za[df->f2 & D_FREG_MASK];
345 rcnt = zp->z1 - zp->z2;
346 if (rcnt < 0)
347 rcnt += D_FIFO_SIZE;
348 rcnt++;
349 if (cs->debug & L1_DEB_ISAC)
350 debugl1(cs, "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)",
351 df->f1, df->f2, zp->z1, zp->z2, rcnt);
352
353 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
354 (df->data[zp->z1])) {
355 if (cs->debug & L1_DEB_WARN)
356 debugl1(cs, "empty_fifo hfcpci paket inv. len %d or crc %d", rcnt, df->data[zp->z1]);
357 #ifdef ERROR_STATISTIC
358 cs->err_rx++;
359 #endif
360 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
361 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + rcnt) & (D_FIFO_SIZE - 1);
362 } else if ((skb = dev_alloc_skb(rcnt - 3))) {
363 total = rcnt;
364 rcnt -= 3;
365 ptr = skb_put(skb, rcnt);
366
367 if (zp->z2 + rcnt <= D_FIFO_SIZE)
368 maxlen = rcnt; /* complete transfer */
369 else
370 maxlen = D_FIFO_SIZE - zp->z2; /* maximum */
371
372 ptr1 = df->data + zp->z2; /* start of data */
373 memcpy(ptr, ptr1, maxlen); /* copy data */
374 rcnt -= maxlen;
375
376 if (rcnt) { /* rest remaining */
377 ptr += maxlen;
378 ptr1 = df->data; /* start of buffer */
379 memcpy(ptr, ptr1, rcnt); /* rest */
380 }
381 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
382 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + total) & (D_FIFO_SIZE - 1);
383
384 skb_queue_tail(&cs->rq, skb);
385 sched_event_D_pci(cs, D_RCVBUFREADY);
386 } else
387 printk(KERN_WARNING "HFC-PCI: D receive out of memory\n");
388 }
389 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
390 return (1);
391 }
392
393 /*******************************************************************************/
394 /* check for transparent receive data and read max one threshold size if avail */
395 /*******************************************************************************/
396 static int
397 hfcpci_empty_fifo_trans(struct BCState *bcs, bzfifo_type * bz, u_char * bdata)
398 {
399 unsigned short *z1r, *z2r;
400 int new_z2, fcnt, maxlen;
401 struct sk_buff *skb;
402 u_char *ptr, *ptr1;
403
404 z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
405 z2r = z1r + 1;
406
407 if (!(fcnt = *z1r - *z2r))
408 return (0); /* no data avail */
409
410 if (fcnt <= 0)
411 fcnt += B_FIFO_SIZE; /* bytes actually buffered */
412 if (fcnt > HFCPCI_BTRANS_THRESHOLD)
413 fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */
414
415 new_z2 = *z2r + fcnt; /* new position in fifo */
416 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
417 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
418
419 if (!(skb = dev_alloc_skb(fcnt)))
420 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
421 else {
422 ptr = skb_put(skb, fcnt);
423 if (*z2r + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
424 maxlen = fcnt; /* complete transfer */
425 else
426 maxlen = B_FIFO_SIZE + B_SUB_VAL - *z2r; /* maximum */
427
428 ptr1 = bdata + (*z2r - B_SUB_VAL); /* start of data */
429 memcpy(ptr, ptr1, maxlen); /* copy data */
430 fcnt -= maxlen;
431
432 if (fcnt) { /* rest remaining */
433 ptr += maxlen;
434 ptr1 = bdata; /* start of buffer */
435 memcpy(ptr, ptr1, fcnt); /* rest */
436 }
437 skb_queue_tail(&bcs->rqueue, skb);
438 hfcpci_sched_event(bcs, B_RCVBUFREADY);
439 }
440
441 *z2r = new_z2; /* new position */
442 return (1);
443 } /* hfcpci_empty_fifo_trans */
444
445 /**********************************/
446 /* B-channel main receive routine */
447 /**********************************/
448 static void
449 main_rec_hfcpci(struct BCState *bcs)
450 {
451 struct IsdnCardState *cs = bcs->cs;
452 int rcnt, real_fifo;
453 int receive, count = 5;
454 struct sk_buff *skb;
455 bzfifo_type *bz;
456 u_char *bdata;
457 z_type *zp;
458
459
460 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
461 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
462 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
463 real_fifo = 1;
464 } else {
465 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
466 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b1;
467 real_fifo = 0;
468 }
469 Begin:
470 count--;
471 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
472 debugl1(cs, "rec_data %d blocked", bcs->channel);
473 return;
474 }
475 if (bz->f1 != bz->f2) {
476 if (cs->debug & L1_DEB_HSCX)
477 debugl1(cs, "hfcpci rec %d f1(%d) f2(%d)",
478 bcs->channel, bz->f1, bz->f2);
479 zp = &bz->za[bz->f2];
480
481 rcnt = zp->z1 - zp->z2;
482 if (rcnt < 0)
483 rcnt += B_FIFO_SIZE;
484 rcnt++;
485 if (cs->debug & L1_DEB_HSCX)
486 debugl1(cs, "hfcpci rec %d z1(%x) z2(%x) cnt(%d)",
487 bcs->channel, zp->z1, zp->z2, rcnt);
488 if ((skb = hfcpci_empty_fifo(bcs, bz, bdata, rcnt))) {
489 skb_queue_tail(&bcs->rqueue, skb);
490 hfcpci_sched_event(bcs, B_RCVBUFREADY);
491 }
492 rcnt = bz->f1 - bz->f2;
493 if (rcnt < 0)
494 rcnt += MAX_B_FRAMES + 1;
495 if (cs->hw.hfcpci.last_bfifo_cnt[real_fifo] > rcnt + 1) {
496 rcnt = 0;
497 hfcpci_clear_fifo_rx(cs, real_fifo);
498 }
499 cs->hw.hfcpci.last_bfifo_cnt[real_fifo] = rcnt;
500 if (rcnt > 1)
501 receive = 1;
502 else
503 receive = 0;
504 } else if (bcs->mode == L1_MODE_TRANS)
505 receive = hfcpci_empty_fifo_trans(bcs, bz, bdata);
506 else
507 receive = 0;
508 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
509 if (count && receive)
510 goto Begin;
511 }
512
513 /**************************/
514 /* D-channel send routine */
515 /**************************/
516 static void
517 hfcpci_fill_dfifo(struct IsdnCardState *cs)
518 {
519 int fcnt;
520 int count, new_z1, maxlen;
521 dfifo_type *df;
522 u_char *src, *dst, new_f1;
523
524 if (!cs->tx_skb)
525 return;
526 if (cs->tx_skb->len <= 0)
527 return;
528
529 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_tx;
530
531 if (cs->debug & L1_DEB_ISAC)
532 debugl1(cs, "hfcpci_fill_Dfifo f1(%d) f2(%d) z1(f1)(%x)",
533 df->f1, df->f2,
534 df->za[df->f1 & D_FREG_MASK].z1);
535 fcnt = df->f1 - df->f2; /* frame count actually buffered */
536 if (fcnt < 0)
537 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
538 if (fcnt > (MAX_D_FRAMES - 1)) {
539 if (cs->debug & L1_DEB_ISAC)
540 debugl1(cs, "hfcpci_fill_Dfifo more as 14 frames");
541 #ifdef ERROR_STATISTIC
542 cs->err_tx++;
543 #endif
544 return;
545 }
546 /* now determine free bytes in FIFO buffer */
547 count = df->za[df->f2 & D_FREG_MASK].z2 - df->za[df->f1 & D_FREG_MASK].z1 - 1;
548 if (count <= 0)
549 count += D_FIFO_SIZE; /* count now contains available bytes */
550
551 if (cs->debug & L1_DEB_ISAC)
552 debugl1(cs, "hfcpci_fill_Dfifo count(%ld/%d)",
553 cs->tx_skb->len, count);
554 if (count < cs->tx_skb->len) {
555 if (cs->debug & L1_DEB_ISAC)
556 debugl1(cs, "hfcpci_fill_Dfifo no fifo mem");
557 return;
558 }
559 count = cs->tx_skb->len; /* get frame len */
560 new_z1 = (df->za[df->f1 & D_FREG_MASK].z1 + count) & (D_FIFO_SIZE - 1);
561 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
562 src = cs->tx_skb->data; /* source pointer */
563 dst = df->data + df->za[df->f1 & D_FREG_MASK].z1;
564 maxlen = D_FIFO_SIZE - df->za[df->f1 & D_FREG_MASK].z1; /* end fifo */
565 if (maxlen > count)
566 maxlen = count; /* limit size */
567 memcpy(dst, src, maxlen); /* first copy */
568
569 count -= maxlen; /* remaining bytes */
570 if (count) {
571 dst = df->data; /* start of buffer */
572 src += maxlen; /* new position */
573 memcpy(dst, src, count);
574 }
575 df->za[new_f1 & D_FREG_MASK].z1 = new_z1; /* for next buffer */
576 df->za[df->f1 & D_FREG_MASK].z1 = new_z1; /* new pos actual buffer */
577 df->f1 = new_f1; /* next frame */
578
579 dev_kfree_skb_any(cs->tx_skb);
580 cs->tx_skb = NULL;
581 }
582
583 /**************************/
584 /* B-channel send routine */
585 /**************************/
586 static void
587 hfcpci_fill_fifo(struct BCState *bcs)
588 {
589 struct IsdnCardState *cs = bcs->cs;
590 int maxlen, fcnt;
591 int count, new_z1;
592 bzfifo_type *bz;
593 u_char *bdata;
594 u_char new_f1, *src, *dst;
595 unsigned short *z1t, *z2t;
596
597 if (!bcs->tx_skb)
598 return;
599 if (bcs->tx_skb->len <= 0)
600 return;
601
602 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
603 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
604 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b2;
605 } else {
606 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
607 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b1;
608 }
609
610 if (bcs->mode == L1_MODE_TRANS) {
611 z1t = &bz->za[MAX_B_FRAMES].z1;
612 z2t = z1t + 1;
613 if (cs->debug & L1_DEB_HSCX)
614 debugl1(cs, "hfcpci_fill_fifo_trans %d z1(%x) z2(%x)",
615 bcs->channel, *z1t, *z2t);
616 fcnt = *z2t - *z1t;
617 if (fcnt <= 0)
618 fcnt += B_FIFO_SIZE; /* fcnt contains available bytes in fifo */
619 fcnt = B_FIFO_SIZE - fcnt; /* remaining bytes to send */
620
621 while ((fcnt < 2 * HFCPCI_BTRANS_THRESHOLD) && (bcs->tx_skb)) {
622 if (bcs->tx_skb->len < B_FIFO_SIZE - fcnt) {
623 /* data is suitable for fifo */
624 count = bcs->tx_skb->len;
625
626 new_z1 = *z1t + count; /* new buffer Position */
627 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
628 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
629 src = bcs->tx_skb->data; /* source pointer */
630 dst = bdata + (*z1t - B_SUB_VAL);
631 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - *z1t; /* end of fifo */
632 if (maxlen > count)
633 maxlen = count; /* limit size */
634 memcpy(dst, src, maxlen); /* first copy */
635
636 count -= maxlen; /* remaining bytes */
637 if (count) {
638 dst = bdata; /* start of buffer */
639 src += maxlen; /* new position */
640 memcpy(dst, src, count);
641 }
642 bcs->tx_cnt -= bcs->tx_skb->len;
643 fcnt += bcs->tx_skb->len;
644 *z1t = new_z1; /* now send data */
645 } else if (cs->debug & L1_DEB_HSCX)
646 debugl1(cs, "hfcpci_fill_fifo_trans %d frame length %d discarded",
647 bcs->channel, bcs->tx_skb->len);
648
649 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
650 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
651 u_long flags;
652 spin_lock_irqsave(&bcs->aclock, flags);
653 bcs->ackcnt += bcs->tx_skb->len;
654 spin_unlock_irqrestore(&bcs->aclock, flags);
655 schedule_event(bcs, B_ACKPENDING);
656 }
657
658 dev_kfree_skb_any(bcs->tx_skb);
659 bcs->tx_skb = skb_dequeue(&bcs->squeue); /* fetch next data */
660 }
661 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
662 return;
663 }
664 if (cs->debug & L1_DEB_HSCX)
665 debugl1(cs, "hfcpci_fill_fifo_hdlc %d f1(%d) f2(%d) z1(f1)(%x)",
666 bcs->channel, bz->f1, bz->f2,
667 bz->za[bz->f1].z1);
668
669 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
670 if (fcnt < 0)
671 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
672 if (fcnt > (MAX_B_FRAMES - 1)) {
673 if (cs->debug & L1_DEB_HSCX)
674 debugl1(cs, "hfcpci_fill_Bfifo more as 14 frames");
675 return;
676 }
677 /* now determine free bytes in FIFO buffer */
678 count = bz->za[bz->f2].z2 - bz->za[bz->f1].z1 - 1;
679 if (count <= 0)
680 count += B_FIFO_SIZE; /* count now contains available bytes */
681
682 if (cs->debug & L1_DEB_HSCX)
683 debugl1(cs, "hfcpci_fill_fifo %d count(%ld/%d),%lx",
684 bcs->channel, bcs->tx_skb->len,
685 count, current->state);
686
687 if (count < bcs->tx_skb->len) {
688 if (cs->debug & L1_DEB_HSCX)
689 debugl1(cs, "hfcpci_fill_fifo no fifo mem");
690 return;
691 }
692 count = bcs->tx_skb->len; /* get frame len */
693 new_z1 = bz->za[bz->f1].z1 + count; /* new buffer Position */
694 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
695 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
696
697 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
698 src = bcs->tx_skb->data; /* source pointer */
699 dst = bdata + (bz->za[bz->f1].z1 - B_SUB_VAL);
700 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - bz->za[bz->f1].z1; /* end fifo */
701 if (maxlen > count)
702 maxlen = count; /* limit size */
703 memcpy(dst, src, maxlen); /* first copy */
704
705 count -= maxlen; /* remaining bytes */
706 if (count) {
707 dst = bdata; /* start of buffer */
708 src += maxlen; /* new position */
709 memcpy(dst, src, count);
710 }
711 bcs->tx_cnt -= bcs->tx_skb->len;
712 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
713 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
714 u_long flags;
715 spin_lock_irqsave(&bcs->aclock, flags);
716 bcs->ackcnt += bcs->tx_skb->len;
717 spin_unlock_irqrestore(&bcs->aclock, flags);
718 schedule_event(bcs, B_ACKPENDING);
719 }
720
721 bz->za[new_f1].z1 = new_z1; /* for next buffer */
722 bz->f1 = new_f1; /* next frame */
723
724 dev_kfree_skb_any(bcs->tx_skb);
725 bcs->tx_skb = NULL;
726 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
727 }
728
729 /**********************************************/
730 /* D-channel l1 state call for leased NT-mode */
731 /**********************************************/
732 static void
733 dch_nt_l2l1(struct PStack *st, int pr, void *arg)
734 {
735 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
736
737 switch (pr) {
738 case (PH_DATA | REQUEST):
739 case (PH_PULL | REQUEST):
740 case (PH_PULL | INDICATION):
741 st->l1.l1hw(st, pr, arg);
742 break;
743 case (PH_ACTIVATE | REQUEST):
744 st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
745 break;
746 case (PH_TESTLOOP | REQUEST):
747 if (1 & (long) arg)
748 debugl1(cs, "PH_TEST_LOOP B1");
749 if (2 & (long) arg)
750 debugl1(cs, "PH_TEST_LOOP B2");
751 if (!(3 & (long) arg))
752 debugl1(cs, "PH_TEST_LOOP DISABLED");
753 st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg);
754 break;
755 default:
756 if (cs->debug)
757 debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr);
758 break;
759 }
760 }
761
762
763
764 /***********************/
765 /* set/reset echo mode */
766 /***********************/
767 static int
768 hfcpci_auxcmd(struct IsdnCardState *cs, isdn_ctrl * ic)
769 {
770 u_long flags;
771 int i = *(unsigned int *) ic->parm.num;
772
773 if ((ic->arg == 98) &&
774 (!(cs->hw.hfcpci.int_m1 & (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC + HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC)))) {
775 spin_lock_irqsave(&cs->lock, flags);
776 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_NT); /* ST-Bit delay for NT-Mode */
777 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 0); /* HFC ST G0 */
778 udelay(10);
779 cs->hw.hfcpci.sctrl |= SCTRL_MODE_NT;
780 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); /* set NT-mode */
781 udelay(10);
782 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 1); /* HFC ST G1 */
783 udelay(10);
784 Write_hfc(cs, HFCPCI_STATES, 1 | HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
785 cs->dc.hfcpci.ph_state = 1;
786 cs->hw.hfcpci.nt_mode = 1;
787 cs->hw.hfcpci.nt_timer = 0;
788 cs->stlist->l2.l2l1 = dch_nt_l2l1;
789 spin_unlock_irqrestore(&cs->lock, flags);
790 debugl1(cs, "NT mode activated");
791 return (0);
792 }
793 if ((cs->chanlimit > 1) || (cs->hw.hfcpci.bswapped) ||
794 (cs->hw.hfcpci.nt_mode) || (ic->arg != 12))
795 return (-EINVAL);
796
797 spin_lock_irqsave(&cs->lock, flags);
798 if (i) {
799 cs->logecho = 1;
800 cs->hw.hfcpci.trm |= 0x20; /* enable echo chan */
801 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_B2REC;
802 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2RX;
803 } else {
804 cs->logecho = 0;
805 cs->hw.hfcpci.trm &= ~0x20; /* disable echo chan */
806 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_B2REC;
807 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2RX;
808 }
809 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
810 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
811 cs->hw.hfcpci.conn |= 0x10; /* B2-IOM -> B2-ST */
812 cs->hw.hfcpci.ctmt &= ~2;
813 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
814 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
815 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
816 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
817 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
818 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
819 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
820 spin_unlock_irqrestore(&cs->lock, flags);
821 return (0);
822 } /* hfcpci_auxcmd */
823
824 /*****************************/
825 /* E-channel receive routine */
826 /*****************************/
827 static void
828 receive_emsg(struct IsdnCardState *cs)
829 {
830 int rcnt;
831 int receive, count = 5;
832 bzfifo_type *bz;
833 u_char *bdata;
834 z_type *zp;
835 u_char *ptr, *ptr1, new_f2;
836 int total, maxlen, new_z2;
837 u_char e_buffer[256];
838
839 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
840 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
841 Begin:
842 count--;
843 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
844 debugl1(cs, "echo_rec_data blocked");
845 return;
846 }
847 if (bz->f1 != bz->f2) {
848 if (cs->debug & L1_DEB_ISAC)
849 debugl1(cs, "hfcpci e_rec f1(%d) f2(%d)",
850 bz->f1, bz->f2);
851 zp = &bz->za[bz->f2];
852
853 rcnt = zp->z1 - zp->z2;
854 if (rcnt < 0)
855 rcnt += B_FIFO_SIZE;
856 rcnt++;
857 if (cs->debug & L1_DEB_ISAC)
858 debugl1(cs, "hfcpci e_rec z1(%x) z2(%x) cnt(%d)",
859 zp->z1, zp->z2, rcnt);
860 new_z2 = zp->z2 + rcnt; /* new position in fifo */
861 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
862 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
863 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
864 if ((rcnt > 256 + 3) || (count < 4) ||
865 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
866 if (cs->debug & L1_DEB_WARN)
867 debugl1(cs, "hfcpci_empty_echan: incoming packet invalid length %d or crc", rcnt);
868 bz->za[new_f2].z2 = new_z2;
869 bz->f2 = new_f2; /* next buffer */
870 } else {
871 total = rcnt;
872 rcnt -= 3;
873 ptr = e_buffer;
874
875 if (zp->z2 <= B_FIFO_SIZE + B_SUB_VAL)
876 maxlen = rcnt; /* complete transfer */
877 else
878 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
879
880 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
881 memcpy(ptr, ptr1, maxlen); /* copy data */
882 rcnt -= maxlen;
883
884 if (rcnt) { /* rest remaining */
885 ptr += maxlen;
886 ptr1 = bdata; /* start of buffer */
887 memcpy(ptr, ptr1, rcnt); /* rest */
888 }
889 bz->za[new_f2].z2 = new_z2;
890 bz->f2 = new_f2; /* next buffer */
891 if (cs->debug & DEB_DLOG_HEX) {
892 ptr = cs->dlog;
893 if ((total - 3) < MAX_DLOG_SPACE / 3 - 10) {
894 *ptr++ = 'E';
895 *ptr++ = 'C';
896 *ptr++ = 'H';
897 *ptr++ = 'O';
898 *ptr++ = ':';
899 ptr += QuickHex(ptr, e_buffer, total - 3);
900 ptr--;
901 *ptr++ = '\n';
902 *ptr = 0;
903 HiSax_putstatus(cs, NULL, cs->dlog);
904 } else
905 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
906 }
907 }
908
909 rcnt = bz->f1 - bz->f2;
910 if (rcnt < 0)
911 rcnt += MAX_B_FRAMES + 1;
912 if (rcnt > 1)
913 receive = 1;
914 else
915 receive = 0;
916 } else
917 receive = 0;
918 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
919 if (count && receive)
920 goto Begin;
921 } /* receive_emsg */
922
923 /*********************/
924 /* Interrupt handler */
925 /*********************/
926 static irqreturn_t
927 hfcpci_interrupt(int intno, void *dev_id)
928 {
929 u_long flags;
930 struct IsdnCardState *cs = dev_id;
931 u_char exval;
932 struct BCState *bcs;
933 int count = 15;
934 u_char val, stat;
935
936 if (!(cs->hw.hfcpci.int_m2 & 0x08)) {
937 debugl1(cs, "HFC-PCI: int_m2 %x not initialised", cs->hw.hfcpci.int_m2);
938 return IRQ_NONE; /* not initialised */
939 }
940 spin_lock_irqsave(&cs->lock, flags);
941 if (HFCPCI_ANYINT & (stat = Read_hfc(cs, HFCPCI_STATUS))) {
942 val = Read_hfc(cs, HFCPCI_INT_S1);
943 if (cs->debug & L1_DEB_ISAC)
944 debugl1(cs, "HFC-PCI: stat(%02x) s1(%02x)", stat, val);
945 } else {
946 spin_unlock_irqrestore(&cs->lock, flags);
947 return IRQ_NONE;
948 }
949 if (cs->debug & L1_DEB_ISAC)
950 debugl1(cs, "HFC-PCI irq %x %s", val,
951 test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
952 "locked" : "unlocked");
953 val &= cs->hw.hfcpci.int_m1;
954 if (val & 0x40) { /* state machine irq */
955 exval = Read_hfc(cs, HFCPCI_STATES) & 0xf;
956 if (cs->debug & L1_DEB_ISAC)
957 debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcpci.ph_state,
958 exval);
959 cs->dc.hfcpci.ph_state = exval;
960 sched_event_D_pci(cs, D_L1STATECHANGE);
961 val &= ~0x40;
962 }
963 if (val & 0x80) { /* timer irq */
964 if (cs->hw.hfcpci.nt_mode) {
965 if ((--cs->hw.hfcpci.nt_timer) < 0)
966 sched_event_D_pci(cs, D_L1STATECHANGE);
967 }
968 val &= ~0x80;
969 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
970 }
971 while (val) {
972 if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
973 cs->hw.hfcpci.int_s1 |= val;
974 spin_unlock_irqrestore(&cs->lock, flags);
975 return IRQ_HANDLED;
976 }
977 if (cs->hw.hfcpci.int_s1 & 0x18) {
978 exval = val;
979 val = cs->hw.hfcpci.int_s1;
980 cs->hw.hfcpci.int_s1 = exval;
981 }
982 if (val & 0x08) {
983 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
984 if (cs->debug)
985 debugl1(cs, "hfcpci spurious 0x08 IRQ");
986 } else
987 main_rec_hfcpci(bcs);
988 }
989 if (val & 0x10) {
990 if (cs->logecho)
991 receive_emsg(cs);
992 else if (!(bcs = Sel_BCS(cs, 1))) {
993 if (cs->debug)
994 debugl1(cs, "hfcpci spurious 0x10 IRQ");
995 } else
996 main_rec_hfcpci(bcs);
997 }
998 if (val & 0x01) {
999 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
1000 if (cs->debug)
1001 debugl1(cs, "hfcpci spurious 0x01 IRQ");
1002 } else {
1003 if (bcs->tx_skb) {
1004 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1005 hfcpci_fill_fifo(bcs);
1006 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1007 } else
1008 debugl1(cs, "fill_data %d blocked", bcs->channel);
1009 } else {
1010 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1011 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1012 hfcpci_fill_fifo(bcs);
1013 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1014 } else
1015 debugl1(cs, "fill_data %d blocked", bcs->channel);
1016 } else {
1017 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1018 }
1019 }
1020 }
1021 }
1022 if (val & 0x02) {
1023 if (!(bcs = Sel_BCS(cs, 1))) {
1024 if (cs->debug)
1025 debugl1(cs, "hfcpci spurious 0x02 IRQ");
1026 } else {
1027 if (bcs->tx_skb) {
1028 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1029 hfcpci_fill_fifo(bcs);
1030 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1031 } else
1032 debugl1(cs, "fill_data %d blocked", bcs->channel);
1033 } else {
1034 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1035 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1036 hfcpci_fill_fifo(bcs);
1037 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1038 } else
1039 debugl1(cs, "fill_data %d blocked", bcs->channel);
1040 } else {
1041 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1042 }
1043 }
1044 }
1045 }
1046 if (val & 0x20) { /* receive dframe */
1047 receive_dmsg(cs);
1048 }
1049 if (val & 0x04) { /* dframe transmitted */
1050 if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
1051 del_timer(&cs->dbusytimer);
1052 if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
1053 sched_event_D_pci(cs, D_CLEARBUSY);
1054 if (cs->tx_skb) {
1055 if (cs->tx_skb->len) {
1056 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1057 hfcpci_fill_dfifo(cs);
1058 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1059 } else {
1060 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1061 }
1062 goto afterXPR;
1063 } else {
1064 dev_kfree_skb_irq(cs->tx_skb);
1065 cs->tx_cnt = 0;
1066 cs->tx_skb = NULL;
1067 }
1068 }
1069 if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
1070 cs->tx_cnt = 0;
1071 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1072 hfcpci_fill_dfifo(cs);
1073 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1074 } else {
1075 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1076 }
1077 } else
1078 sched_event_D_pci(cs, D_XMTBUFREADY);
1079 }
1080 afterXPR:
1081 if (cs->hw.hfcpci.int_s1 && count--) {
1082 val = cs->hw.hfcpci.int_s1;
1083 cs->hw.hfcpci.int_s1 = 0;
1084 if (cs->debug & L1_DEB_ISAC)
1085 debugl1(cs, "HFC-PCI irq %x loop %d", val, 15 - count);
1086 } else
1087 val = 0;
1088 }
1089 spin_unlock_irqrestore(&cs->lock, flags);
1090 return IRQ_HANDLED;
1091 }
1092
1093 /********************************************************************/
1094 /* timer callback for D-chan busy resolution. Currently no function */
1095 /********************************************************************/
1096 static void
1097 hfcpci_dbusy_timer(struct IsdnCardState *cs)
1098 {
1099 }
1100
1101 /*************************************/
1102 /* Layer 1 D-channel hardware access */
1103 /*************************************/
1104 static void
1105 HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
1106 {
1107 u_long flags;
1108 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
1109 struct sk_buff *skb = arg;
1110
1111 switch (pr) {
1112 case (PH_DATA | REQUEST):
1113 if (cs->debug & DEB_DLOG_HEX)
1114 LogFrame(cs, skb->data, skb->len);
1115 if (cs->debug & DEB_DLOG_VERBOSE)
1116 dlogframe(cs, skb, 0);
1117 spin_lock_irqsave(&cs->lock, flags);
1118 if (cs->tx_skb) {
1119 skb_queue_tail(&cs->sq, skb);
1120 #ifdef L2FRAME_DEBUG /* psa */
1121 if (cs->debug & L1_DEB_LAPD)
1122 Logl2Frame(cs, skb, "PH_DATA Queued", 0);
1123 #endif
1124 } else {
1125 cs->tx_skb = skb;
1126 cs->tx_cnt = 0;
1127 #ifdef L2FRAME_DEBUG /* psa */
1128 if (cs->debug & L1_DEB_LAPD)
1129 Logl2Frame(cs, skb, "PH_DATA", 0);
1130 #endif
1131 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1132 hfcpci_fill_dfifo(cs);
1133 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1134 } else
1135 debugl1(cs, "hfcpci_fill_dfifo blocked");
1136
1137 }
1138 spin_unlock_irqrestore(&cs->lock, flags);
1139 break;
1140 case (PH_PULL | INDICATION):
1141 spin_lock_irqsave(&cs->lock, flags);
1142 if (cs->tx_skb) {
1143 if (cs->debug & L1_DEB_WARN)
1144 debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
1145 skb_queue_tail(&cs->sq, skb);
1146 spin_unlock_irqrestore(&cs->lock, flags);
1147 break;
1148 }
1149 if (cs->debug & DEB_DLOG_HEX)
1150 LogFrame(cs, skb->data, skb->len);
1151 if (cs->debug & DEB_DLOG_VERBOSE)
1152 dlogframe(cs, skb, 0);
1153 cs->tx_skb = skb;
1154 cs->tx_cnt = 0;
1155 #ifdef L2FRAME_DEBUG /* psa */
1156 if (cs->debug & L1_DEB_LAPD)
1157 Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
1158 #endif
1159 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1160 hfcpci_fill_dfifo(cs);
1161 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1162 } else
1163 debugl1(cs, "hfcpci_fill_dfifo blocked");
1164 spin_unlock_irqrestore(&cs->lock, flags);
1165 break;
1166 case (PH_PULL | REQUEST):
1167 #ifdef L2FRAME_DEBUG /* psa */
1168 if (cs->debug & L1_DEB_LAPD)
1169 debugl1(cs, "-> PH_REQUEST_PULL");
1170 #endif
1171 if (!cs->tx_skb) {
1172 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1173 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1174 } else
1175 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1176 break;
1177 case (HW_RESET | REQUEST):
1178 spin_lock_irqsave(&cs->lock, flags);
1179 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3); /* HFC ST 3 */
1180 udelay(6);
1181 Write_hfc(cs, HFCPCI_STATES, 3); /* HFC ST 2 */
1182 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1183 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1184 Write_hfc(cs, HFCPCI_STATES, HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
1185 spin_unlock_irqrestore(&cs->lock, flags);
1186 l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
1187 break;
1188 case (HW_ENABLE | REQUEST):
1189 spin_lock_irqsave(&cs->lock, flags);
1190 Write_hfc(cs, HFCPCI_STATES, HFCPCI_DO_ACTION);
1191 spin_unlock_irqrestore(&cs->lock, flags);
1192 break;
1193 case (HW_DEACTIVATE | REQUEST):
1194 spin_lock_irqsave(&cs->lock, flags);
1195 cs->hw.hfcpci.mst_m &= ~HFCPCI_MASTER;
1196 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1197 spin_unlock_irqrestore(&cs->lock, flags);
1198 break;
1199 case (HW_INFO3 | REQUEST):
1200 spin_lock_irqsave(&cs->lock, flags);
1201 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1202 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1203 spin_unlock_irqrestore(&cs->lock, flags);
1204 break;
1205 case (HW_TESTLOOP | REQUEST):
1206 spin_lock_irqsave(&cs->lock, flags);
1207 switch ((long) arg) {
1208 case (1):
1209 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* tx slot */
1210 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* rx slot */
1211 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~7) | 1;
1212 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1213 break;
1214
1215 case (2):
1216 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* tx slot */
1217 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* rx slot */
1218 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~0x38) | 0x08;
1219 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1220 break;
1221
1222 default:
1223 spin_unlock_irqrestore(&cs->lock, flags);
1224 if (cs->debug & L1_DEB_WARN)
1225 debugl1(cs, "hfcpci_l1hw loop invalid %4lx", (long) arg);
1226 return;
1227 }
1228 cs->hw.hfcpci.trm |= 0x80; /* enable IOM-loop */
1229 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
1230 spin_unlock_irqrestore(&cs->lock, flags);
1231 break;
1232 default:
1233 if (cs->debug & L1_DEB_WARN)
1234 debugl1(cs, "hfcpci_l1hw unknown pr %4x", pr);
1235 break;
1236 }
1237 }
1238
1239 /***********************************************/
1240 /* called during init setting l1 stack pointer */
1241 /***********************************************/
1242 static void
1243 setstack_hfcpci(struct PStack *st, struct IsdnCardState *cs)
1244 {
1245 st->l1.l1hw = HFCPCI_l1hw;
1246 }
1247
1248 /**************************************/
1249 /* send B-channel data if not blocked */
1250 /**************************************/
1251 static void
1252 hfcpci_send_data(struct BCState *bcs)
1253 {
1254 struct IsdnCardState *cs = bcs->cs;
1255
1256 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1257 hfcpci_fill_fifo(bcs);
1258 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1259 } else
1260 debugl1(cs, "send_data %d blocked", bcs->channel);
1261 }
1262
1263 /***************************************************************/
1264 /* activate/deactivate hardware for selected channels and mode */
1265 /***************************************************************/
1266 static void
1267 mode_hfcpci(struct BCState *bcs, int mode, int bc)
1268 {
1269 struct IsdnCardState *cs = bcs->cs;
1270 int fifo2;
1271
1272 if (cs->debug & L1_DEB_HSCX)
1273 debugl1(cs, "HFCPCI bchannel mode %d bchan %d/%d",
1274 mode, bc, bcs->channel);
1275 bcs->mode = mode;
1276 bcs->channel = bc;
1277 fifo2 = bc;
1278 if (cs->chanlimit > 1) {
1279 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1280 cs->hw.hfcpci.sctrl_e &= ~0x80;
1281 } else {
1282 if (bc) {
1283 if (mode != L1_MODE_NULL) {
1284 cs->hw.hfcpci.bswapped = 1; /* B1 and B2 exchanged */
1285 cs->hw.hfcpci.sctrl_e |= 0x80;
1286 } else {
1287 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1288 cs->hw.hfcpci.sctrl_e &= ~0x80;
1289 }
1290 fifo2 = 0;
1291 } else {
1292 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1293 cs->hw.hfcpci.sctrl_e &= ~0x80;
1294 }
1295 }
1296 switch (mode) {
1297 case (L1_MODE_NULL):
1298 if (bc) {
1299 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
1300 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
1301 } else {
1302 cs->hw.hfcpci.sctrl &= ~SCTRL_B1_ENA;
1303 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B1_ENA;
1304 }
1305 if (fifo2) {
1306 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1307 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1308 } else {
1309 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1310 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1311 }
1312 break;
1313 case (L1_MODE_TRANS):
1314 hfcpci_clear_fifo_rx(cs, fifo2);
1315 hfcpci_clear_fifo_tx(cs, fifo2);
1316 if (bc) {
1317 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1318 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1319 } else {
1320 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1321 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1322 }
1323 if (fifo2) {
1324 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1325 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1326 cs->hw.hfcpci.ctmt |= 2;
1327 cs->hw.hfcpci.conn &= ~0x18;
1328 } else {
1329 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1330 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1331 cs->hw.hfcpci.ctmt |= 1;
1332 cs->hw.hfcpci.conn &= ~0x03;
1333 }
1334 break;
1335 case (L1_MODE_HDLC):
1336 hfcpci_clear_fifo_rx(cs, fifo2);
1337 hfcpci_clear_fifo_tx(cs, fifo2);
1338 if (bc) {
1339 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1340 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1341 } else {
1342 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1343 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1344 }
1345 if (fifo2) {
1346 cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
1347 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1348 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1349 cs->hw.hfcpci.ctmt &= ~2;
1350 cs->hw.hfcpci.conn &= ~0x18;
1351 } else {
1352 cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
1353 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1354 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1355 cs->hw.hfcpci.ctmt &= ~1;
1356 cs->hw.hfcpci.conn &= ~0x03;
1357 }
1358 break;
1359 case (L1_MODE_EXTRN):
1360 if (bc) {
1361 cs->hw.hfcpci.conn |= 0x10;
1362 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1363 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1364 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1365 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1366 } else {
1367 cs->hw.hfcpci.conn |= 0x02;
1368 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1369 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1370 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1371 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1372 }
1373 break;
1374 }
1375 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e);
1376 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1377 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
1378 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
1379 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
1380 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
1381 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1382 }
1383
1384 /******************************/
1385 /* Layer2 -> Layer 1 Transfer */
1386 /******************************/
1387 static void
1388 hfcpci_l2l1(struct PStack *st, int pr, void *arg)
1389 {
1390 struct BCState *bcs = st->l1.bcs;
1391 u_long flags;
1392 struct sk_buff *skb = arg;
1393
1394 switch (pr) {
1395 case (PH_DATA | REQUEST):
1396 spin_lock_irqsave(&bcs->cs->lock, flags);
1397 if (bcs->tx_skb) {
1398 skb_queue_tail(&bcs->squeue, skb);
1399 } else {
1400 bcs->tx_skb = skb;
1401 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1402 bcs->cs->BC_Send_Data(bcs);
1403 }
1404 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1405 break;
1406 case (PH_PULL | INDICATION):
1407 spin_lock_irqsave(&bcs->cs->lock, flags);
1408 if (bcs->tx_skb) {
1409 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1410 printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n");
1411 break;
1412 }
1413 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1414 bcs->tx_skb = skb;
1415 bcs->cs->BC_Send_Data(bcs);
1416 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1417 break;
1418 case (PH_PULL | REQUEST):
1419 if (!bcs->tx_skb) {
1420 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1421 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1422 } else
1423 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1424 break;
1425 case (PH_ACTIVATE | REQUEST):
1426 spin_lock_irqsave(&bcs->cs->lock, flags);
1427 test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
1428 mode_hfcpci(bcs, st->l1.mode, st->l1.bc);
1429 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1430 l1_msg_b(st, pr, arg);
1431 break;
1432 case (PH_DEACTIVATE | REQUEST):
1433 l1_msg_b(st, pr, arg);
1434 break;
1435 case (PH_DEACTIVATE | CONFIRM):
1436 spin_lock_irqsave(&bcs->cs->lock, flags);
1437 test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
1438 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1439 mode_hfcpci(bcs, 0, st->l1.bc);
1440 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1441 st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
1442 break;
1443 }
1444 }
1445
1446 /******************************************/
1447 /* deactivate B-channel access and queues */
1448 /******************************************/
1449 static void
1450 close_hfcpci(struct BCState *bcs)
1451 {
1452 mode_hfcpci(bcs, 0, bcs->channel);
1453 if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
1454 skb_queue_purge(&bcs->rqueue);
1455 skb_queue_purge(&bcs->squeue);
1456 if (bcs->tx_skb) {
1457 dev_kfree_skb_any(bcs->tx_skb);
1458 bcs->tx_skb = NULL;
1459 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1460 }
1461 }
1462 }
1463
1464 /*************************************/
1465 /* init B-channel queues and control */
1466 /*************************************/
1467 static int
1468 open_hfcpcistate(struct IsdnCardState *cs, struct BCState *bcs)
1469 {
1470 if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
1471 skb_queue_head_init(&bcs->rqueue);
1472 skb_queue_head_init(&bcs->squeue);
1473 }
1474 bcs->tx_skb = NULL;
1475 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1476 bcs->event = 0;
1477 bcs->tx_cnt = 0;
1478 return (0);
1479 }
1480
1481 /*********************************/
1482 /* inits the stack for B-channel */
1483 /*********************************/
1484 static int
1485 setstack_2b(struct PStack *st, struct BCState *bcs)
1486 {
1487 bcs->channel = st->l1.bc;
1488 if (open_hfcpcistate(st->l1.hardware, bcs))
1489 return (-1);
1490 st->l1.bcs = bcs;
1491 st->l2.l2l1 = hfcpci_l2l1;
1492 setstack_manager(st);
1493 bcs->st = st;
1494 setstack_l1_B(st);
1495 return (0);
1496 }
1497
1498 /***************************/
1499 /* handle L1 state changes */
1500 /***************************/
1501 static void
1502 hfcpci_bh(struct work_struct *work)
1503 {
1504 struct IsdnCardState *cs =
1505 container_of(work, struct IsdnCardState, tqueue);
1506 u_long flags;
1507 // struct PStack *stptr;
1508
1509 if (!cs)
1510 return;
1511 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
1512 if (!cs->hw.hfcpci.nt_mode)
1513 switch (cs->dc.hfcpci.ph_state) {
1514 case (0):
1515 l1_msg(cs, HW_RESET | INDICATION, NULL);
1516 break;
1517 case (3):
1518 l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
1519 break;
1520 case (8):
1521 l1_msg(cs, HW_RSYNC | INDICATION, NULL);
1522 break;
1523 case (6):
1524 l1_msg(cs, HW_INFO2 | INDICATION, NULL);
1525 break;
1526 case (7):
1527 l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
1528 break;
1529 default:
1530 break;
1531 } else {
1532 spin_lock_irqsave(&cs->lock, flags);
1533 switch (cs->dc.hfcpci.ph_state) {
1534 case (2):
1535 if (cs->hw.hfcpci.nt_timer < 0) {
1536 cs->hw.hfcpci.nt_timer = 0;
1537 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1538 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1539 /* Clear already pending ints */
1540 if (Read_hfc(cs, HFCPCI_INT_S1));
1541 Write_hfc(cs, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
1542 udelay(10);
1543 Write_hfc(cs, HFCPCI_STATES, 4);
1544 cs->dc.hfcpci.ph_state = 4;
1545 } else {
1546 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_TIMER;
1547 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1548 cs->hw.hfcpci.ctmt &= ~HFCPCI_AUTO_TIMER;
1549 cs->hw.hfcpci.ctmt |= HFCPCI_TIM3_125;
1550 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1551 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1552 cs->hw.hfcpci.nt_timer = NT_T1_COUNT;
1553 Write_hfc(cs, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); /* allow G2 -> G3 transition */
1554 }
1555 break;
1556 case (1):
1557 case (3):
1558 case (4):
1559 cs->hw.hfcpci.nt_timer = 0;
1560 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1561 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1562 break;
1563 default:
1564 break;
1565 }
1566 spin_unlock_irqrestore(&cs->lock, flags);
1567 }
1568 }
1569 if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
1570 DChannel_proc_rcv(cs);
1571 if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
1572 DChannel_proc_xmt(cs);
1573 }
1574
1575
1576 /********************************/
1577 /* called for card init message */
1578 /********************************/
1579 static void
1580 inithfcpci(struct IsdnCardState *cs)
1581 {
1582 cs->bcs[0].BC_SetStack = setstack_2b;
1583 cs->bcs[1].BC_SetStack = setstack_2b;
1584 cs->bcs[0].BC_Close = close_hfcpci;
1585 cs->bcs[1].BC_Close = close_hfcpci;
1586 cs->dbusytimer.function = (void *) hfcpci_dbusy_timer;
1587 cs->dbusytimer.data = (long) cs;
1588 init_timer(&cs->dbusytimer);
1589 mode_hfcpci(cs->bcs, 0, 0);
1590 mode_hfcpci(cs->bcs + 1, 0, 1);
1591 }
1592
1593
1594
1595 /*******************************************/
1596 /* handle card messages from control layer */
1597 /*******************************************/
1598 static int
1599 hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
1600 {
1601 u_long flags;
1602
1603 if (cs->debug & L1_DEB_ISAC)
1604 debugl1(cs, "HFCPCI: card_msg %x", mt);
1605 switch (mt) {
1606 case CARD_RESET:
1607 spin_lock_irqsave(&cs->lock, flags);
1608 reset_hfcpci(cs);
1609 spin_unlock_irqrestore(&cs->lock, flags);
1610 return (0);
1611 case CARD_RELEASE:
1612 release_io_hfcpci(cs);
1613 return (0);
1614 case CARD_INIT:
1615 spin_lock_irqsave(&cs->lock, flags);
1616 inithfcpci(cs);
1617 reset_hfcpci(cs);
1618 spin_unlock_irqrestore(&cs->lock, flags);
1619 msleep(80); /* Timeout 80ms */
1620 /* now switch timer interrupt off */
1621 spin_lock_irqsave(&cs->lock, flags);
1622 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1623 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1624 /* reinit mode reg */
1625 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1626 spin_unlock_irqrestore(&cs->lock, flags);
1627 return (0);
1628 case CARD_TEST:
1629 return (0);
1630 }
1631 return (0);
1632 }
1633
1634
1635 /* this variable is used as card index when more than one cards are present */
1636 static struct pci_dev *dev_hfcpci __devinitdata = NULL;
1637
1638 int __devinit
1639 setup_hfcpci(struct IsdnCard *card)
1640 {
1641 u_long flags;
1642 struct IsdnCardState *cs = card->cs;
1643 char tmp[64];
1644 int i;
1645 struct pci_dev *tmp_hfcpci = NULL;
1646
1647 #ifdef __BIG_ENDIAN
1648 #error "not running on big endian machines now"
1649 #endif
1650
1651 strcpy(tmp, hfcpci_revision);
1652 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1653
1654 cs->hw.hfcpci.int_s1 = 0;
1655 cs->dc.hfcpci.ph_state = 0;
1656 cs->hw.hfcpci.fifo = 255;
1657 if (cs->typ != ISDN_CTYPE_HFC_PCI)
1658 return(0);
1659
1660 i = 0;
1661 while (id_list[i].vendor_id) {
1662 tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
1663 id_list[i].device_id,
1664 dev_hfcpci);
1665 i++;
1666 if (tmp_hfcpci) {
1667 dma_addr_t dma_mask = DMA_BIT_MASK(32) & ~0x7fffUL;
1668 if (pci_enable_device(tmp_hfcpci))
1669 continue;
1670 if (pci_set_dma_mask(tmp_hfcpci, dma_mask)) {
1671 printk(KERN_WARNING
1672 "HiSax hfc_pci: No suitable DMA available.\n");
1673 continue;
1674 }
1675 if (pci_set_consistent_dma_mask(tmp_hfcpci, dma_mask)) {
1676 printk(KERN_WARNING
1677 "HiSax hfc_pci: No suitable consistent DMA available.\n");
1678 continue;
1679 }
1680 pci_set_master(tmp_hfcpci);
1681 if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
1682 continue;
1683 else
1684 break;
1685 }
1686 }
1687
1688 if (!tmp_hfcpci) {
1689 printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
1690 return (0);
1691 }
1692
1693 i--;
1694 dev_hfcpci = tmp_hfcpci; /* old device */
1695 cs->hw.hfcpci.dev = dev_hfcpci;
1696 cs->irq = dev_hfcpci->irq;
1697 if (!cs->irq) {
1698 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1699 return (0);
1700 }
1701 cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
1702 printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
1703
1704 if (!cs->hw.hfcpci.pci_io) {
1705 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1706 return (0);
1707 }
1708
1709 /* Allocate memory for FIFOS */
1710 cs->hw.hfcpci.fifos = pci_alloc_consistent(cs->hw.hfcpci.dev,
1711 0x8000, &cs->hw.hfcpci.dma);
1712 if (!cs->hw.hfcpci.fifos) {
1713 printk(KERN_WARNING "HFC-PCI: Error allocating FIFO memory!\n");
1714 return 0;
1715 }
1716 if (cs->hw.hfcpci.dma & 0x7fff) {
1717 printk(KERN_WARNING
1718 "HFC-PCI: Error DMA memory not on 32K boundary (%lx)\n",
1719 (u_long)cs->hw.hfcpci.dma);
1720 pci_free_consistent(cs->hw.hfcpci.dev, 0x8000,
1721 cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma);
1722 return 0;
1723 }
1724 pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u32)cs->hw.hfcpci.dma);
1725 cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
1726 printk(KERN_INFO
1727 "HFC-PCI: defined at mem %p fifo %p(%lx) IRQ %d HZ %d\n",
1728 cs->hw.hfcpci.pci_io,
1729 cs->hw.hfcpci.fifos,
1730 (u_long)cs->hw.hfcpci.dma,
1731 cs->irq, HZ);
1732
1733 spin_lock_irqsave(&cs->lock, flags);
1734
1735 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
1736 cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
1737 cs->hw.hfcpci.int_m1 = 0;
1738 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1739 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
1740 /* At this point the needed PCI config is done */
1741 /* fifos are still not enabled */
1742
1743 INIT_WORK(&cs->tqueue, hfcpci_bh);
1744 cs->setstack_d = setstack_hfcpci;
1745 cs->BC_Send_Data = &hfcpci_send_data;
1746 cs->readisac = NULL;
1747 cs->writeisac = NULL;
1748 cs->readisacfifo = NULL;
1749 cs->writeisacfifo = NULL;
1750 cs->BC_Read_Reg = NULL;
1751 cs->BC_Write_Reg = NULL;
1752 cs->irq_func = &hfcpci_interrupt;
1753 cs->irq_flags |= IRQF_SHARED;
1754 cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
1755 cs->hw.hfcpci.timer.data = (long) cs;
1756 init_timer(&cs->hw.hfcpci.timer);
1757 cs->cardmsg = &hfcpci_card_msg;
1758 cs->auxcmd = &hfcpci_auxcmd;
1759
1760 spin_unlock_irqrestore(&cs->lock, flags);
1761
1762 return (1);
1763 }
This page took 0.090103 seconds and 5 git commands to generate.