[SERIAL] Remove wrong asm/serial.h inclusions
[deliverable/linux.git] / drivers / char / synclink.c
1 /*
2 * linux/drivers/char/synclink.c
3 *
4 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
5 *
6 * Device driver for Microgate SyncLink ISA and PCI
7 * high speed multiprotocol serial adapters.
8 *
9 * written by Paul Fulghum for Microgate Corporation
10 * paulkf@microgate.com
11 *
12 * Microgate and SyncLink are trademarks of Microgate Corporation
13 *
14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
15 *
16 * Original release 01/11/99
17 *
18 * This code is released under the GNU General Public License (GPL)
19 *
20 * This driver is primarily intended for use in synchronous
21 * HDLC mode. Asynchronous mode is also provided.
22 *
23 * When operating in synchronous mode, each call to mgsl_write()
24 * contains exactly one complete HDLC frame. Calling mgsl_put_char
25 * will start assembling an HDLC frame that will not be sent until
26 * mgsl_flush_chars or mgsl_write is called.
27 *
28 * Synchronous receive data is reported as complete frames. To accomplish
29 * this, the TTY flip buffer is bypassed (too small to hold largest
30 * frame and may fragment frames) and the line discipline
31 * receive entry point is called directly.
32 *
33 * This driver has been tested with a slightly modified ppp.c driver
34 * for synchronous PPP.
35 *
36 * 2000/02/16
37 * Added interface for syncppp.c driver (an alternate synchronous PPP
38 * implementation that also supports Cisco HDLC). Each device instance
39 * registers as a tty device AND a network device (if dosyncppp option
40 * is set for the device). The functionality is determined by which
41 * device interface is opened.
42 *
43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
53 * OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #if defined(__i386__)
57 # define BREAKPOINT() asm(" int $3");
58 #else
59 # define BREAKPOINT() { }
60 #endif
61
62 #define MAX_ISA_DEVICES 10
63 #define MAX_PCI_DEVICES 10
64 #define MAX_TOTAL_DEVICES 20
65
66 #include <linux/config.h>
67 #include <linux/module.h>
68 #include <linux/errno.h>
69 #include <linux/signal.h>
70 #include <linux/sched.h>
71 #include <linux/timer.h>
72 #include <linux/interrupt.h>
73 #include <linux/pci.h>
74 #include <linux/tty.h>
75 #include <linux/tty_flip.h>
76 #include <linux/serial.h>
77 #include <linux/major.h>
78 #include <linux/string.h>
79 #include <linux/fcntl.h>
80 #include <linux/ptrace.h>
81 #include <linux/ioport.h>
82 #include <linux/mm.h>
83 #include <linux/slab.h>
84 #include <linux/delay.h>
85
86 #include <linux/netdevice.h>
87
88 #include <linux/vmalloc.h>
89 #include <linux/init.h>
90
91 #include <linux/delay.h>
92 #include <linux/ioctl.h>
93
94 #include <asm/system.h>
95 #include <asm/io.h>
96 #include <asm/irq.h>
97 #include <asm/dma.h>
98 #include <linux/bitops.h>
99 #include <asm/types.h>
100 #include <linux/termios.h>
101 #include <linux/workqueue.h>
102 #include <linux/hdlc.h>
103 #include <linux/dma-mapping.h>
104
105 #ifdef CONFIG_HDLC_MODULE
106 #define CONFIG_HDLC 1
107 #endif
108
109 #define GET_USER(error,value,addr) error = get_user(value,addr)
110 #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
111 #define PUT_USER(error,value,addr) error = put_user(value,addr)
112 #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
113
114 #include <asm/uaccess.h>
115
116 #include "linux/synclink.h"
117
118 #define RCLRVALUE 0xffff
119
120 static MGSL_PARAMS default_params = {
121 MGSL_MODE_HDLC, /* unsigned long mode */
122 0, /* unsigned char loopback; */
123 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
124 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
125 0, /* unsigned long clock_speed; */
126 0xff, /* unsigned char addr_filter; */
127 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
128 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
129 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
130 9600, /* unsigned long data_rate; */
131 8, /* unsigned char data_bits; */
132 1, /* unsigned char stop_bits; */
133 ASYNC_PARITY_NONE /* unsigned char parity; */
134 };
135
136 #define SHARED_MEM_ADDRESS_SIZE 0x40000
137 #define BUFFERLISTSIZE (PAGE_SIZE)
138 #define DMABUFFERSIZE (PAGE_SIZE)
139 #define MAXRXFRAMES 7
140
141 typedef struct _DMABUFFERENTRY
142 {
143 u32 phys_addr; /* 32-bit flat physical address of data buffer */
144 volatile u16 count; /* buffer size/data count */
145 volatile u16 status; /* Control/status field */
146 volatile u16 rcc; /* character count field */
147 u16 reserved; /* padding required by 16C32 */
148 u32 link; /* 32-bit flat link to next buffer entry */
149 char *virt_addr; /* virtual address of data buffer */
150 u32 phys_entry; /* physical address of this buffer entry */
151 dma_addr_t dma_addr;
152 } DMABUFFERENTRY, *DMAPBUFFERENTRY;
153
154 /* The queue of BH actions to be performed */
155
156 #define BH_RECEIVE 1
157 #define BH_TRANSMIT 2
158 #define BH_STATUS 4
159
160 #define IO_PIN_SHUTDOWN_LIMIT 100
161
162 #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
163
164 struct _input_signal_events {
165 int ri_up;
166 int ri_down;
167 int dsr_up;
168 int dsr_down;
169 int dcd_up;
170 int dcd_down;
171 int cts_up;
172 int cts_down;
173 };
174
175 /* transmit holding buffer definitions*/
176 #define MAX_TX_HOLDING_BUFFERS 5
177 struct tx_holding_buffer {
178 int buffer_size;
179 unsigned char * buffer;
180 };
181
182
183 /*
184 * Device instance data structure
185 */
186
187 struct mgsl_struct {
188 int magic;
189 int flags;
190 int count; /* count of opens */
191 int line;
192 int hw_version;
193 unsigned short close_delay;
194 unsigned short closing_wait; /* time to wait before closing */
195
196 struct mgsl_icount icount;
197
198 struct tty_struct *tty;
199 int timeout;
200 int x_char; /* xon/xoff character */
201 int blocked_open; /* # of blocked opens */
202 u16 read_status_mask;
203 u16 ignore_status_mask;
204 unsigned char *xmit_buf;
205 int xmit_head;
206 int xmit_tail;
207 int xmit_cnt;
208
209 wait_queue_head_t open_wait;
210 wait_queue_head_t close_wait;
211
212 wait_queue_head_t status_event_wait_q;
213 wait_queue_head_t event_wait_q;
214 struct timer_list tx_timer; /* HDLC transmit timeout timer */
215 struct mgsl_struct *next_device; /* device list link */
216
217 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
218 struct work_struct task; /* task structure for scheduling bh */
219
220 u32 EventMask; /* event trigger mask */
221 u32 RecordedEvents; /* pending events */
222
223 u32 max_frame_size; /* as set by device config */
224
225 u32 pending_bh;
226
227 int bh_running; /* Protection from multiple */
228 int isr_overflow;
229 int bh_requested;
230
231 int dcd_chkcount; /* check counts to prevent */
232 int cts_chkcount; /* too many IRQs if a signal */
233 int dsr_chkcount; /* is floating */
234 int ri_chkcount;
235
236 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
237 u32 buffer_list_phys;
238 dma_addr_t buffer_list_dma_addr;
239
240 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
241 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
242 unsigned int current_rx_buffer;
243
244 int num_tx_dma_buffers; /* number of tx dma frames required */
245 int tx_dma_buffers_used;
246 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
247 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
248 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
249 int current_tx_buffer; /* next tx dma buffer to be loaded */
250
251 unsigned char *intermediate_rxbuffer;
252
253 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
254 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
255 int put_tx_holding_index; /* next tx holding buffer to store user request */
256 int tx_holding_count; /* number of tx holding buffers waiting */
257 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
258
259 int rx_enabled;
260 int rx_overflow;
261 int rx_rcc_underrun;
262
263 int tx_enabled;
264 int tx_active;
265 u32 idle_mode;
266
267 u16 cmr_value;
268 u16 tcsr_value;
269
270 char device_name[25]; /* device instance name */
271
272 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
273 unsigned char bus; /* expansion bus number (zero based) */
274 unsigned char function; /* PCI device number */
275
276 unsigned int io_base; /* base I/O address of adapter */
277 unsigned int io_addr_size; /* size of the I/O address range */
278 int io_addr_requested; /* nonzero if I/O address requested */
279
280 unsigned int irq_level; /* interrupt level */
281 unsigned long irq_flags;
282 int irq_requested; /* nonzero if IRQ requested */
283
284 unsigned int dma_level; /* DMA channel */
285 int dma_requested; /* nonzero if dma channel requested */
286
287 u16 mbre_bit;
288 u16 loopback_bits;
289 u16 usc_idle_mode;
290
291 MGSL_PARAMS params; /* communications parameters */
292
293 unsigned char serial_signals; /* current serial signal states */
294
295 int irq_occurred; /* for diagnostics use */
296 unsigned int init_error; /* Initialization startup error (DIAGS) */
297 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
298
299 u32 last_mem_alloc;
300 unsigned char* memory_base; /* shared memory address (PCI only) */
301 u32 phys_memory_base;
302 int shared_mem_requested;
303
304 unsigned char* lcr_base; /* local config registers (PCI only) */
305 u32 phys_lcr_base;
306 u32 lcr_offset;
307 int lcr_mem_requested;
308
309 u32 misc_ctrl_value;
310 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
311 char char_buf[MAX_ASYNC_BUFFER_SIZE];
312 BOOLEAN drop_rts_on_tx_done;
313
314 BOOLEAN loopmode_insert_requested;
315 BOOLEAN loopmode_send_done_requested;
316
317 struct _input_signal_events input_signal_events;
318
319 /* generic HDLC device parts */
320 int netcount;
321 int dosyncppp;
322 spinlock_t netlock;
323
324 #ifdef CONFIG_HDLC
325 struct net_device *netdev;
326 #endif
327 };
328
329 #define MGSL_MAGIC 0x5401
330
331 /*
332 * The size of the serial xmit buffer is 1 page, or 4096 bytes
333 */
334 #ifndef SERIAL_XMIT_SIZE
335 #define SERIAL_XMIT_SIZE 4096
336 #endif
337
338 /*
339 * These macros define the offsets used in calculating the
340 * I/O address of the specified USC registers.
341 */
342
343
344 #define DCPIN 2 /* Bit 1 of I/O address */
345 #define SDPIN 4 /* Bit 2 of I/O address */
346
347 #define DCAR 0 /* DMA command/address register */
348 #define CCAR SDPIN /* channel command/address register */
349 #define DATAREG DCPIN + SDPIN /* serial data register */
350 #define MSBONLY 0x41
351 #define LSBONLY 0x40
352
353 /*
354 * These macros define the register address (ordinal number)
355 * used for writing address/value pairs to the USC.
356 */
357
358 #define CMR 0x02 /* Channel mode Register */
359 #define CCSR 0x04 /* Channel Command/status Register */
360 #define CCR 0x06 /* Channel Control Register */
361 #define PSR 0x08 /* Port status Register */
362 #define PCR 0x0a /* Port Control Register */
363 #define TMDR 0x0c /* Test mode Data Register */
364 #define TMCR 0x0e /* Test mode Control Register */
365 #define CMCR 0x10 /* Clock mode Control Register */
366 #define HCR 0x12 /* Hardware Configuration Register */
367 #define IVR 0x14 /* Interrupt Vector Register */
368 #define IOCR 0x16 /* Input/Output Control Register */
369 #define ICR 0x18 /* Interrupt Control Register */
370 #define DCCR 0x1a /* Daisy Chain Control Register */
371 #define MISR 0x1c /* Misc Interrupt status Register */
372 #define SICR 0x1e /* status Interrupt Control Register */
373 #define RDR 0x20 /* Receive Data Register */
374 #define RMR 0x22 /* Receive mode Register */
375 #define RCSR 0x24 /* Receive Command/status Register */
376 #define RICR 0x26 /* Receive Interrupt Control Register */
377 #define RSR 0x28 /* Receive Sync Register */
378 #define RCLR 0x2a /* Receive count Limit Register */
379 #define RCCR 0x2c /* Receive Character count Register */
380 #define TC0R 0x2e /* Time Constant 0 Register */
381 #define TDR 0x30 /* Transmit Data Register */
382 #define TMR 0x32 /* Transmit mode Register */
383 #define TCSR 0x34 /* Transmit Command/status Register */
384 #define TICR 0x36 /* Transmit Interrupt Control Register */
385 #define TSR 0x38 /* Transmit Sync Register */
386 #define TCLR 0x3a /* Transmit count Limit Register */
387 #define TCCR 0x3c /* Transmit Character count Register */
388 #define TC1R 0x3e /* Time Constant 1 Register */
389
390
391 /*
392 * MACRO DEFINITIONS FOR DMA REGISTERS
393 */
394
395 #define DCR 0x06 /* DMA Control Register (shared) */
396 #define DACR 0x08 /* DMA Array count Register (shared) */
397 #define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
398 #define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
399 #define DICR 0x18 /* DMA Interrupt Control Register (shared) */
400 #define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
401 #define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
402
403 #define TDMR 0x02 /* Transmit DMA mode Register */
404 #define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
405 #define TBCR 0x2a /* Transmit Byte count Register */
406 #define TARL 0x2c /* Transmit Address Register (low) */
407 #define TARU 0x2e /* Transmit Address Register (high) */
408 #define NTBCR 0x3a /* Next Transmit Byte count Register */
409 #define NTARL 0x3c /* Next Transmit Address Register (low) */
410 #define NTARU 0x3e /* Next Transmit Address Register (high) */
411
412 #define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
413 #define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
414 #define RBCR 0xaa /* Receive Byte count Register */
415 #define RARL 0xac /* Receive Address Register (low) */
416 #define RARU 0xae /* Receive Address Register (high) */
417 #define NRBCR 0xba /* Next Receive Byte count Register */
418 #define NRARL 0xbc /* Next Receive Address Register (low) */
419 #define NRARU 0xbe /* Next Receive Address Register (high) */
420
421
422 /*
423 * MACRO DEFINITIONS FOR MODEM STATUS BITS
424 */
425
426 #define MODEMSTATUS_DTR 0x80
427 #define MODEMSTATUS_DSR 0x40
428 #define MODEMSTATUS_RTS 0x20
429 #define MODEMSTATUS_CTS 0x10
430 #define MODEMSTATUS_RI 0x04
431 #define MODEMSTATUS_DCD 0x01
432
433
434 /*
435 * Channel Command/Address Register (CCAR) Command Codes
436 */
437
438 #define RTCmd_Null 0x0000
439 #define RTCmd_ResetHighestIus 0x1000
440 #define RTCmd_TriggerChannelLoadDma 0x2000
441 #define RTCmd_TriggerRxDma 0x2800
442 #define RTCmd_TriggerTxDma 0x3000
443 #define RTCmd_TriggerRxAndTxDma 0x3800
444 #define RTCmd_PurgeRxFifo 0x4800
445 #define RTCmd_PurgeTxFifo 0x5000
446 #define RTCmd_PurgeRxAndTxFifo 0x5800
447 #define RTCmd_LoadRcc 0x6800
448 #define RTCmd_LoadTcc 0x7000
449 #define RTCmd_LoadRccAndTcc 0x7800
450 #define RTCmd_LoadTC0 0x8800
451 #define RTCmd_LoadTC1 0x9000
452 #define RTCmd_LoadTC0AndTC1 0x9800
453 #define RTCmd_SerialDataLSBFirst 0xa000
454 #define RTCmd_SerialDataMSBFirst 0xa800
455 #define RTCmd_SelectBigEndian 0xb000
456 #define RTCmd_SelectLittleEndian 0xb800
457
458
459 /*
460 * DMA Command/Address Register (DCAR) Command Codes
461 */
462
463 #define DmaCmd_Null 0x0000
464 #define DmaCmd_ResetTxChannel 0x1000
465 #define DmaCmd_ResetRxChannel 0x1200
466 #define DmaCmd_StartTxChannel 0x2000
467 #define DmaCmd_StartRxChannel 0x2200
468 #define DmaCmd_ContinueTxChannel 0x3000
469 #define DmaCmd_ContinueRxChannel 0x3200
470 #define DmaCmd_PauseTxChannel 0x4000
471 #define DmaCmd_PauseRxChannel 0x4200
472 #define DmaCmd_AbortTxChannel 0x5000
473 #define DmaCmd_AbortRxChannel 0x5200
474 #define DmaCmd_InitTxChannel 0x7000
475 #define DmaCmd_InitRxChannel 0x7200
476 #define DmaCmd_ResetHighestDmaIus 0x8000
477 #define DmaCmd_ResetAllChannels 0x9000
478 #define DmaCmd_StartAllChannels 0xa000
479 #define DmaCmd_ContinueAllChannels 0xb000
480 #define DmaCmd_PauseAllChannels 0xc000
481 #define DmaCmd_AbortAllChannels 0xd000
482 #define DmaCmd_InitAllChannels 0xf000
483
484 #define TCmd_Null 0x0000
485 #define TCmd_ClearTxCRC 0x2000
486 #define TCmd_SelectTicrTtsaData 0x4000
487 #define TCmd_SelectTicrTxFifostatus 0x5000
488 #define TCmd_SelectTicrIntLevel 0x6000
489 #define TCmd_SelectTicrdma_level 0x7000
490 #define TCmd_SendFrame 0x8000
491 #define TCmd_SendAbort 0x9000
492 #define TCmd_EnableDleInsertion 0xc000
493 #define TCmd_DisableDleInsertion 0xd000
494 #define TCmd_ClearEofEom 0xe000
495 #define TCmd_SetEofEom 0xf000
496
497 #define RCmd_Null 0x0000
498 #define RCmd_ClearRxCRC 0x2000
499 #define RCmd_EnterHuntmode 0x3000
500 #define RCmd_SelectRicrRtsaData 0x4000
501 #define RCmd_SelectRicrRxFifostatus 0x5000
502 #define RCmd_SelectRicrIntLevel 0x6000
503 #define RCmd_SelectRicrdma_level 0x7000
504
505 /*
506 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
507 */
508
509 #define RECEIVE_STATUS BIT5
510 #define RECEIVE_DATA BIT4
511 #define TRANSMIT_STATUS BIT3
512 #define TRANSMIT_DATA BIT2
513 #define IO_PIN BIT1
514 #define MISC BIT0
515
516
517 /*
518 * Receive status Bits in Receive Command/status Register RCSR
519 */
520
521 #define RXSTATUS_SHORT_FRAME BIT8
522 #define RXSTATUS_CODE_VIOLATION BIT8
523 #define RXSTATUS_EXITED_HUNT BIT7
524 #define RXSTATUS_IDLE_RECEIVED BIT6
525 #define RXSTATUS_BREAK_RECEIVED BIT5
526 #define RXSTATUS_ABORT_RECEIVED BIT5
527 #define RXSTATUS_RXBOUND BIT4
528 #define RXSTATUS_CRC_ERROR BIT3
529 #define RXSTATUS_FRAMING_ERROR BIT3
530 #define RXSTATUS_ABORT BIT2
531 #define RXSTATUS_PARITY_ERROR BIT2
532 #define RXSTATUS_OVERRUN BIT1
533 #define RXSTATUS_DATA_AVAILABLE BIT0
534 #define RXSTATUS_ALL 0x01f6
535 #define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
536
537 /*
538 * Values for setting transmit idle mode in
539 * Transmit Control/status Register (TCSR)
540 */
541 #define IDLEMODE_FLAGS 0x0000
542 #define IDLEMODE_ALT_ONE_ZERO 0x0100
543 #define IDLEMODE_ZERO 0x0200
544 #define IDLEMODE_ONE 0x0300
545 #define IDLEMODE_ALT_MARK_SPACE 0x0500
546 #define IDLEMODE_SPACE 0x0600
547 #define IDLEMODE_MARK 0x0700
548 #define IDLEMODE_MASK 0x0700
549
550 /*
551 * IUSC revision identifiers
552 */
553 #define IUSC_SL1660 0x4d44
554 #define IUSC_PRE_SL1660 0x4553
555
556 /*
557 * Transmit status Bits in Transmit Command/status Register (TCSR)
558 */
559
560 #define TCSR_PRESERVE 0x0F00
561
562 #define TCSR_UNDERWAIT BIT11
563 #define TXSTATUS_PREAMBLE_SENT BIT7
564 #define TXSTATUS_IDLE_SENT BIT6
565 #define TXSTATUS_ABORT_SENT BIT5
566 #define TXSTATUS_EOF_SENT BIT4
567 #define TXSTATUS_EOM_SENT BIT4
568 #define TXSTATUS_CRC_SENT BIT3
569 #define TXSTATUS_ALL_SENT BIT2
570 #define TXSTATUS_UNDERRUN BIT1
571 #define TXSTATUS_FIFO_EMPTY BIT0
572 #define TXSTATUS_ALL 0x00fa
573 #define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
574
575
576 #define MISCSTATUS_RXC_LATCHED BIT15
577 #define MISCSTATUS_RXC BIT14
578 #define MISCSTATUS_TXC_LATCHED BIT13
579 #define MISCSTATUS_TXC BIT12
580 #define MISCSTATUS_RI_LATCHED BIT11
581 #define MISCSTATUS_RI BIT10
582 #define MISCSTATUS_DSR_LATCHED BIT9
583 #define MISCSTATUS_DSR BIT8
584 #define MISCSTATUS_DCD_LATCHED BIT7
585 #define MISCSTATUS_DCD BIT6
586 #define MISCSTATUS_CTS_LATCHED BIT5
587 #define MISCSTATUS_CTS BIT4
588 #define MISCSTATUS_RCC_UNDERRUN BIT3
589 #define MISCSTATUS_DPLL_NO_SYNC BIT2
590 #define MISCSTATUS_BRG1_ZERO BIT1
591 #define MISCSTATUS_BRG0_ZERO BIT0
592
593 #define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
594 #define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
595
596 #define SICR_RXC_ACTIVE BIT15
597 #define SICR_RXC_INACTIVE BIT14
598 #define SICR_RXC (BIT15+BIT14)
599 #define SICR_TXC_ACTIVE BIT13
600 #define SICR_TXC_INACTIVE BIT12
601 #define SICR_TXC (BIT13+BIT12)
602 #define SICR_RI_ACTIVE BIT11
603 #define SICR_RI_INACTIVE BIT10
604 #define SICR_RI (BIT11+BIT10)
605 #define SICR_DSR_ACTIVE BIT9
606 #define SICR_DSR_INACTIVE BIT8
607 #define SICR_DSR (BIT9+BIT8)
608 #define SICR_DCD_ACTIVE BIT7
609 #define SICR_DCD_INACTIVE BIT6
610 #define SICR_DCD (BIT7+BIT6)
611 #define SICR_CTS_ACTIVE BIT5
612 #define SICR_CTS_INACTIVE BIT4
613 #define SICR_CTS (BIT5+BIT4)
614 #define SICR_RCC_UNDERFLOW BIT3
615 #define SICR_DPLL_NO_SYNC BIT2
616 #define SICR_BRG1_ZERO BIT1
617 #define SICR_BRG0_ZERO BIT0
618
619 void usc_DisableMasterIrqBit( struct mgsl_struct *info );
620 void usc_EnableMasterIrqBit( struct mgsl_struct *info );
621 void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
622 void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
623 void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
624
625 #define usc_EnableInterrupts( a, b ) \
626 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
627
628 #define usc_DisableInterrupts( a, b ) \
629 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
630
631 #define usc_EnableMasterIrqBit(a) \
632 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
633
634 #define usc_DisableMasterIrqBit(a) \
635 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
636
637 #define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
638
639 /*
640 * Transmit status Bits in Transmit Control status Register (TCSR)
641 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
642 */
643
644 #define TXSTATUS_PREAMBLE_SENT BIT7
645 #define TXSTATUS_IDLE_SENT BIT6
646 #define TXSTATUS_ABORT_SENT BIT5
647 #define TXSTATUS_EOF BIT4
648 #define TXSTATUS_CRC_SENT BIT3
649 #define TXSTATUS_ALL_SENT BIT2
650 #define TXSTATUS_UNDERRUN BIT1
651 #define TXSTATUS_FIFO_EMPTY BIT0
652
653 #define DICR_MASTER BIT15
654 #define DICR_TRANSMIT BIT0
655 #define DICR_RECEIVE BIT1
656
657 #define usc_EnableDmaInterrupts(a,b) \
658 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
659
660 #define usc_DisableDmaInterrupts(a,b) \
661 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
662
663 #define usc_EnableStatusIrqs(a,b) \
664 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
665
666 #define usc_DisablestatusIrqs(a,b) \
667 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
668
669 /* Transmit status Bits in Transmit Control status Register (TCSR) */
670 /* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
671
672
673 #define DISABLE_UNCONDITIONAL 0
674 #define DISABLE_END_OF_FRAME 1
675 #define ENABLE_UNCONDITIONAL 2
676 #define ENABLE_AUTO_CTS 3
677 #define ENABLE_AUTO_DCD 3
678 #define usc_EnableTransmitter(a,b) \
679 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
680 #define usc_EnableReceiver(a,b) \
681 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
682
683 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
684 static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
685 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
686
687 static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
688 static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
689 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
690 void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
691 void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
692
693 #define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
694 #define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
695
696 #define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
697
698 static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
699 static void usc_start_receiver( struct mgsl_struct *info );
700 static void usc_stop_receiver( struct mgsl_struct *info );
701
702 static void usc_start_transmitter( struct mgsl_struct *info );
703 static void usc_stop_transmitter( struct mgsl_struct *info );
704 static void usc_set_txidle( struct mgsl_struct *info );
705 static void usc_load_txfifo( struct mgsl_struct *info );
706
707 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
708 static void usc_enable_loopback( struct mgsl_struct *info, int enable );
709
710 static void usc_get_serial_signals( struct mgsl_struct *info );
711 static void usc_set_serial_signals( struct mgsl_struct *info );
712
713 static void usc_reset( struct mgsl_struct *info );
714
715 static void usc_set_sync_mode( struct mgsl_struct *info );
716 static void usc_set_sdlc_mode( struct mgsl_struct *info );
717 static void usc_set_async_mode( struct mgsl_struct *info );
718 static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
719
720 static void usc_loopback_frame( struct mgsl_struct *info );
721
722 static void mgsl_tx_timeout(unsigned long context);
723
724
725 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
726 static void usc_loopmode_insert_request( struct mgsl_struct * info );
727 static int usc_loopmode_active( struct mgsl_struct * info);
728 static void usc_loopmode_send_done( struct mgsl_struct * info );
729
730 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
731
732 #ifdef CONFIG_HDLC
733 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
734 static void hdlcdev_tx_done(struct mgsl_struct *info);
735 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
736 static int hdlcdev_init(struct mgsl_struct *info);
737 static void hdlcdev_exit(struct mgsl_struct *info);
738 #endif
739
740 /*
741 * Defines a BUS descriptor value for the PCI adapter
742 * local bus address ranges.
743 */
744
745 #define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
746 (0x00400020 + \
747 ((WrHold) << 30) + \
748 ((WrDly) << 28) + \
749 ((RdDly) << 26) + \
750 ((Nwdd) << 20) + \
751 ((Nwad) << 15) + \
752 ((Nxda) << 13) + \
753 ((Nrdd) << 11) + \
754 ((Nrad) << 6) )
755
756 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
757
758 /*
759 * Adapter diagnostic routines
760 */
761 static BOOLEAN mgsl_register_test( struct mgsl_struct *info );
762 static BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
763 static BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
764 static BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
765 static int mgsl_adapter_test( struct mgsl_struct *info );
766
767 /*
768 * device and resource management routines
769 */
770 static int mgsl_claim_resources(struct mgsl_struct *info);
771 static void mgsl_release_resources(struct mgsl_struct *info);
772 static void mgsl_add_device(struct mgsl_struct *info);
773 static struct mgsl_struct* mgsl_allocate_device(void);
774
775 /*
776 * DMA buffer manupulation functions.
777 */
778 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
779 static int mgsl_get_rx_frame( struct mgsl_struct *info );
780 static int mgsl_get_raw_rx_frame( struct mgsl_struct *info );
781 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
782 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
783 static int num_free_tx_dma_buffers(struct mgsl_struct *info);
784 static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
785 static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
786
787 /*
788 * DMA and Shared Memory buffer allocation and formatting
789 */
790 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
791 static void mgsl_free_dma_buffers(struct mgsl_struct *info);
792 static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
793 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
794 static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
795 static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
796 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
797 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
798 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
799 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
800 static int load_next_tx_holding_buffer(struct mgsl_struct *info);
801 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
802
803 /*
804 * Bottom half interrupt handlers
805 */
806 static void mgsl_bh_handler(void* Context);
807 static void mgsl_bh_receive(struct mgsl_struct *info);
808 static void mgsl_bh_transmit(struct mgsl_struct *info);
809 static void mgsl_bh_status(struct mgsl_struct *info);
810
811 /*
812 * Interrupt handler routines and dispatch table.
813 */
814 static void mgsl_isr_null( struct mgsl_struct *info );
815 static void mgsl_isr_transmit_data( struct mgsl_struct *info );
816 static void mgsl_isr_receive_data( struct mgsl_struct *info );
817 static void mgsl_isr_receive_status( struct mgsl_struct *info );
818 static void mgsl_isr_transmit_status( struct mgsl_struct *info );
819 static void mgsl_isr_io_pin( struct mgsl_struct *info );
820 static void mgsl_isr_misc( struct mgsl_struct *info );
821 static void mgsl_isr_receive_dma( struct mgsl_struct *info );
822 static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
823
824 typedef void (*isr_dispatch_func)(struct mgsl_struct *);
825
826 static isr_dispatch_func UscIsrTable[7] =
827 {
828 mgsl_isr_null,
829 mgsl_isr_misc,
830 mgsl_isr_io_pin,
831 mgsl_isr_transmit_data,
832 mgsl_isr_transmit_status,
833 mgsl_isr_receive_data,
834 mgsl_isr_receive_status
835 };
836
837 /*
838 * ioctl call handlers
839 */
840 static int tiocmget(struct tty_struct *tty, struct file *file);
841 static int tiocmset(struct tty_struct *tty, struct file *file,
842 unsigned int set, unsigned int clear);
843 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
844 __user *user_icount);
845 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
846 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
847 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
848 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
849 static int mgsl_txenable(struct mgsl_struct * info, int enable);
850 static int mgsl_txabort(struct mgsl_struct * info);
851 static int mgsl_rxenable(struct mgsl_struct * info, int enable);
852 static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
853 static int mgsl_loopmode_send_done( struct mgsl_struct * info );
854
855 /* set non-zero on successful registration with PCI subsystem */
856 static int pci_registered;
857
858 /*
859 * Global linked list of SyncLink devices
860 */
861 static struct mgsl_struct *mgsl_device_list;
862 static int mgsl_device_count;
863
864 /*
865 * Set this param to non-zero to load eax with the
866 * .text section address and breakpoint on module load.
867 * This is useful for use with gdb and add-symbol-file command.
868 */
869 static int break_on_load;
870
871 /*
872 * Driver major number, defaults to zero to get auto
873 * assigned major number. May be forced as module parameter.
874 */
875 static int ttymajor;
876
877 /*
878 * Array of user specified options for ISA adapters.
879 */
880 static int io[MAX_ISA_DEVICES];
881 static int irq[MAX_ISA_DEVICES];
882 static int dma[MAX_ISA_DEVICES];
883 static int debug_level;
884 static int maxframe[MAX_TOTAL_DEVICES];
885 static int dosyncppp[MAX_TOTAL_DEVICES];
886 static int txdmabufs[MAX_TOTAL_DEVICES];
887 static int txholdbufs[MAX_TOTAL_DEVICES];
888
889 module_param(break_on_load, bool, 0);
890 module_param(ttymajor, int, 0);
891 module_param_array(io, int, NULL, 0);
892 module_param_array(irq, int, NULL, 0);
893 module_param_array(dma, int, NULL, 0);
894 module_param(debug_level, int, 0);
895 module_param_array(maxframe, int, NULL, 0);
896 module_param_array(dosyncppp, int, NULL, 0);
897 module_param_array(txdmabufs, int, NULL, 0);
898 module_param_array(txholdbufs, int, NULL, 0);
899
900 static char *driver_name = "SyncLink serial driver";
901 static char *driver_version = "$Revision: 4.38 $";
902
903 static int synclink_init_one (struct pci_dev *dev,
904 const struct pci_device_id *ent);
905 static void synclink_remove_one (struct pci_dev *dev);
906
907 static struct pci_device_id synclink_pci_tbl[] = {
908 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
909 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
910 { 0, }, /* terminate list */
911 };
912 MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
913
914 MODULE_LICENSE("GPL");
915
916 static struct pci_driver synclink_pci_driver = {
917 .name = "synclink",
918 .id_table = synclink_pci_tbl,
919 .probe = synclink_init_one,
920 .remove = __devexit_p(synclink_remove_one),
921 };
922
923 static struct tty_driver *serial_driver;
924
925 /* number of characters left in xmit buffer before we ask for more */
926 #define WAKEUP_CHARS 256
927
928
929 static void mgsl_change_params(struct mgsl_struct *info);
930 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
931
932 /*
933 * 1st function defined in .text section. Calling this function in
934 * init_module() followed by a breakpoint allows a remote debugger
935 * (gdb) to get the .text address for the add-symbol-file command.
936 * This allows remote debugging of dynamically loadable modules.
937 */
938 static void* mgsl_get_text_ptr(void)
939 {
940 return mgsl_get_text_ptr;
941 }
942
943 static inline int mgsl_paranoia_check(struct mgsl_struct *info,
944 char *name, const char *routine)
945 {
946 #ifdef MGSL_PARANOIA_CHECK
947 static const char *badmagic =
948 "Warning: bad magic number for mgsl struct (%s) in %s\n";
949 static const char *badinfo =
950 "Warning: null mgsl_struct for (%s) in %s\n";
951
952 if (!info) {
953 printk(badinfo, name, routine);
954 return 1;
955 }
956 if (info->magic != MGSL_MAGIC) {
957 printk(badmagic, name, routine);
958 return 1;
959 }
960 #else
961 if (!info)
962 return 1;
963 #endif
964 return 0;
965 }
966
967 /**
968 * line discipline callback wrappers
969 *
970 * The wrappers maintain line discipline references
971 * while calling into the line discipline.
972 *
973 * ldisc_receive_buf - pass receive data to line discipline
974 */
975
976 static void ldisc_receive_buf(struct tty_struct *tty,
977 const __u8 *data, char *flags, int count)
978 {
979 struct tty_ldisc *ld;
980 if (!tty)
981 return;
982 ld = tty_ldisc_ref(tty);
983 if (ld) {
984 if (ld->receive_buf)
985 ld->receive_buf(tty, data, flags, count);
986 tty_ldisc_deref(ld);
987 }
988 }
989
990 /* mgsl_stop() throttle (stop) transmitter
991 *
992 * Arguments: tty pointer to tty info structure
993 * Return Value: None
994 */
995 static void mgsl_stop(struct tty_struct *tty)
996 {
997 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
998 unsigned long flags;
999
1000 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
1001 return;
1002
1003 if ( debug_level >= DEBUG_LEVEL_INFO )
1004 printk("mgsl_stop(%s)\n",info->device_name);
1005
1006 spin_lock_irqsave(&info->irq_spinlock,flags);
1007 if (info->tx_enabled)
1008 usc_stop_transmitter(info);
1009 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1010
1011 } /* end of mgsl_stop() */
1012
1013 /* mgsl_start() release (start) transmitter
1014 *
1015 * Arguments: tty pointer to tty info structure
1016 * Return Value: None
1017 */
1018 static void mgsl_start(struct tty_struct *tty)
1019 {
1020 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1021 unsigned long flags;
1022
1023 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1024 return;
1025
1026 if ( debug_level >= DEBUG_LEVEL_INFO )
1027 printk("mgsl_start(%s)\n",info->device_name);
1028
1029 spin_lock_irqsave(&info->irq_spinlock,flags);
1030 if (!info->tx_enabled)
1031 usc_start_transmitter(info);
1032 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1033
1034 } /* end of mgsl_start() */
1035
1036 /*
1037 * Bottom half work queue access functions
1038 */
1039
1040 /* mgsl_bh_action() Return next bottom half action to perform.
1041 * Return Value: BH action code or 0 if nothing to do.
1042 */
1043 static int mgsl_bh_action(struct mgsl_struct *info)
1044 {
1045 unsigned long flags;
1046 int rc = 0;
1047
1048 spin_lock_irqsave(&info->irq_spinlock,flags);
1049
1050 if (info->pending_bh & BH_RECEIVE) {
1051 info->pending_bh &= ~BH_RECEIVE;
1052 rc = BH_RECEIVE;
1053 } else if (info->pending_bh & BH_TRANSMIT) {
1054 info->pending_bh &= ~BH_TRANSMIT;
1055 rc = BH_TRANSMIT;
1056 } else if (info->pending_bh & BH_STATUS) {
1057 info->pending_bh &= ~BH_STATUS;
1058 rc = BH_STATUS;
1059 }
1060
1061 if (!rc) {
1062 /* Mark BH routine as complete */
1063 info->bh_running = 0;
1064 info->bh_requested = 0;
1065 }
1066
1067 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1068
1069 return rc;
1070 }
1071
1072 /*
1073 * Perform bottom half processing of work items queued by ISR.
1074 */
1075 static void mgsl_bh_handler(void* Context)
1076 {
1077 struct mgsl_struct *info = (struct mgsl_struct*)Context;
1078 int action;
1079
1080 if (!info)
1081 return;
1082
1083 if ( debug_level >= DEBUG_LEVEL_BH )
1084 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1085 __FILE__,__LINE__,info->device_name);
1086
1087 info->bh_running = 1;
1088
1089 while((action = mgsl_bh_action(info)) != 0) {
1090
1091 /* Process work item */
1092 if ( debug_level >= DEBUG_LEVEL_BH )
1093 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1094 __FILE__,__LINE__,action);
1095
1096 switch (action) {
1097
1098 case BH_RECEIVE:
1099 mgsl_bh_receive(info);
1100 break;
1101 case BH_TRANSMIT:
1102 mgsl_bh_transmit(info);
1103 break;
1104 case BH_STATUS:
1105 mgsl_bh_status(info);
1106 break;
1107 default:
1108 /* unknown work item ID */
1109 printk("Unknown work item ID=%08X!\n", action);
1110 break;
1111 }
1112 }
1113
1114 if ( debug_level >= DEBUG_LEVEL_BH )
1115 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1116 __FILE__,__LINE__,info->device_name);
1117 }
1118
1119 static void mgsl_bh_receive(struct mgsl_struct *info)
1120 {
1121 int (*get_rx_frame)(struct mgsl_struct *info) =
1122 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1123
1124 if ( debug_level >= DEBUG_LEVEL_BH )
1125 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1126 __FILE__,__LINE__,info->device_name);
1127
1128 do
1129 {
1130 if (info->rx_rcc_underrun) {
1131 unsigned long flags;
1132 spin_lock_irqsave(&info->irq_spinlock,flags);
1133 usc_start_receiver(info);
1134 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1135 return;
1136 }
1137 } while(get_rx_frame(info));
1138 }
1139
1140 static void mgsl_bh_transmit(struct mgsl_struct *info)
1141 {
1142 struct tty_struct *tty = info->tty;
1143 unsigned long flags;
1144
1145 if ( debug_level >= DEBUG_LEVEL_BH )
1146 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1147 __FILE__,__LINE__,info->device_name);
1148
1149 if (tty) {
1150 tty_wakeup(tty);
1151 wake_up_interruptible(&tty->write_wait);
1152 }
1153
1154 /* if transmitter idle and loopmode_send_done_requested
1155 * then start echoing RxD to TxD
1156 */
1157 spin_lock_irqsave(&info->irq_spinlock,flags);
1158 if ( !info->tx_active && info->loopmode_send_done_requested )
1159 usc_loopmode_send_done( info );
1160 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1161 }
1162
1163 static void mgsl_bh_status(struct mgsl_struct *info)
1164 {
1165 if ( debug_level >= DEBUG_LEVEL_BH )
1166 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1167 __FILE__,__LINE__,info->device_name);
1168
1169 info->ri_chkcount = 0;
1170 info->dsr_chkcount = 0;
1171 info->dcd_chkcount = 0;
1172 info->cts_chkcount = 0;
1173 }
1174
1175 /* mgsl_isr_receive_status()
1176 *
1177 * Service a receive status interrupt. The type of status
1178 * interrupt is indicated by the state of the RCSR.
1179 * This is only used for HDLC mode.
1180 *
1181 * Arguments: info pointer to device instance data
1182 * Return Value: None
1183 */
1184 static void mgsl_isr_receive_status( struct mgsl_struct *info )
1185 {
1186 u16 status = usc_InReg( info, RCSR );
1187
1188 if ( debug_level >= DEBUG_LEVEL_ISR )
1189 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1190 __FILE__,__LINE__,status);
1191
1192 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1193 info->loopmode_insert_requested &&
1194 usc_loopmode_active(info) )
1195 {
1196 ++info->icount.rxabort;
1197 info->loopmode_insert_requested = FALSE;
1198
1199 /* clear CMR:13 to start echoing RxD to TxD */
1200 info->cmr_value &= ~BIT13;
1201 usc_OutReg(info, CMR, info->cmr_value);
1202
1203 /* disable received abort irq (no longer required) */
1204 usc_OutReg(info, RICR,
1205 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1206 }
1207
1208 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1209 if (status & RXSTATUS_EXITED_HUNT)
1210 info->icount.exithunt++;
1211 if (status & RXSTATUS_IDLE_RECEIVED)
1212 info->icount.rxidle++;
1213 wake_up_interruptible(&info->event_wait_q);
1214 }
1215
1216 if (status & RXSTATUS_OVERRUN){
1217 info->icount.rxover++;
1218 usc_process_rxoverrun_sync( info );
1219 }
1220
1221 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1222 usc_UnlatchRxstatusBits( info, status );
1223
1224 } /* end of mgsl_isr_receive_status() */
1225
1226 /* mgsl_isr_transmit_status()
1227 *
1228 * Service a transmit status interrupt
1229 * HDLC mode :end of transmit frame
1230 * Async mode:all data is sent
1231 * transmit status is indicated by bits in the TCSR.
1232 *
1233 * Arguments: info pointer to device instance data
1234 * Return Value: None
1235 */
1236 static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1237 {
1238 u16 status = usc_InReg( info, TCSR );
1239
1240 if ( debug_level >= DEBUG_LEVEL_ISR )
1241 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1242 __FILE__,__LINE__,status);
1243
1244 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1245 usc_UnlatchTxstatusBits( info, status );
1246
1247 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1248 {
1249 /* finished sending HDLC abort. This may leave */
1250 /* the TxFifo with data from the aborted frame */
1251 /* so purge the TxFifo. Also shutdown the DMA */
1252 /* channel in case there is data remaining in */
1253 /* the DMA buffer */
1254 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1255 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1256 }
1257
1258 if ( status & TXSTATUS_EOF_SENT )
1259 info->icount.txok++;
1260 else if ( status & TXSTATUS_UNDERRUN )
1261 info->icount.txunder++;
1262 else if ( status & TXSTATUS_ABORT_SENT )
1263 info->icount.txabort++;
1264 else
1265 info->icount.txunder++;
1266
1267 info->tx_active = 0;
1268 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1269 del_timer(&info->tx_timer);
1270
1271 if ( info->drop_rts_on_tx_done ) {
1272 usc_get_serial_signals( info );
1273 if ( info->serial_signals & SerialSignal_RTS ) {
1274 info->serial_signals &= ~SerialSignal_RTS;
1275 usc_set_serial_signals( info );
1276 }
1277 info->drop_rts_on_tx_done = 0;
1278 }
1279
1280 #ifdef CONFIG_HDLC
1281 if (info->netcount)
1282 hdlcdev_tx_done(info);
1283 else
1284 #endif
1285 {
1286 if (info->tty->stopped || info->tty->hw_stopped) {
1287 usc_stop_transmitter(info);
1288 return;
1289 }
1290 info->pending_bh |= BH_TRANSMIT;
1291 }
1292
1293 } /* end of mgsl_isr_transmit_status() */
1294
1295 /* mgsl_isr_io_pin()
1296 *
1297 * Service an Input/Output pin interrupt. The type of
1298 * interrupt is indicated by bits in the MISR
1299 *
1300 * Arguments: info pointer to device instance data
1301 * Return Value: None
1302 */
1303 static void mgsl_isr_io_pin( struct mgsl_struct *info )
1304 {
1305 struct mgsl_icount *icount;
1306 u16 status = usc_InReg( info, MISR );
1307
1308 if ( debug_level >= DEBUG_LEVEL_ISR )
1309 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1310 __FILE__,__LINE__,status);
1311
1312 usc_ClearIrqPendingBits( info, IO_PIN );
1313 usc_UnlatchIostatusBits( info, status );
1314
1315 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1316 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1317 icount = &info->icount;
1318 /* update input line counters */
1319 if (status & MISCSTATUS_RI_LATCHED) {
1320 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1321 usc_DisablestatusIrqs(info,SICR_RI);
1322 icount->rng++;
1323 if ( status & MISCSTATUS_RI )
1324 info->input_signal_events.ri_up++;
1325 else
1326 info->input_signal_events.ri_down++;
1327 }
1328 if (status & MISCSTATUS_DSR_LATCHED) {
1329 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1330 usc_DisablestatusIrqs(info,SICR_DSR);
1331 icount->dsr++;
1332 if ( status & MISCSTATUS_DSR )
1333 info->input_signal_events.dsr_up++;
1334 else
1335 info->input_signal_events.dsr_down++;
1336 }
1337 if (status & MISCSTATUS_DCD_LATCHED) {
1338 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1339 usc_DisablestatusIrqs(info,SICR_DCD);
1340 icount->dcd++;
1341 if (status & MISCSTATUS_DCD) {
1342 info->input_signal_events.dcd_up++;
1343 } else
1344 info->input_signal_events.dcd_down++;
1345 #ifdef CONFIG_HDLC
1346 if (info->netcount) {
1347 if (status & MISCSTATUS_DCD)
1348 netif_carrier_on(info->netdev);
1349 else
1350 netif_carrier_off(info->netdev);
1351 }
1352 #endif
1353 }
1354 if (status & MISCSTATUS_CTS_LATCHED)
1355 {
1356 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1357 usc_DisablestatusIrqs(info,SICR_CTS);
1358 icount->cts++;
1359 if ( status & MISCSTATUS_CTS )
1360 info->input_signal_events.cts_up++;
1361 else
1362 info->input_signal_events.cts_down++;
1363 }
1364 wake_up_interruptible(&info->status_event_wait_q);
1365 wake_up_interruptible(&info->event_wait_q);
1366
1367 if ( (info->flags & ASYNC_CHECK_CD) &&
1368 (status & MISCSTATUS_DCD_LATCHED) ) {
1369 if ( debug_level >= DEBUG_LEVEL_ISR )
1370 printk("%s CD now %s...", info->device_name,
1371 (status & MISCSTATUS_DCD) ? "on" : "off");
1372 if (status & MISCSTATUS_DCD)
1373 wake_up_interruptible(&info->open_wait);
1374 else {
1375 if ( debug_level >= DEBUG_LEVEL_ISR )
1376 printk("doing serial hangup...");
1377 if (info->tty)
1378 tty_hangup(info->tty);
1379 }
1380 }
1381
1382 if ( (info->flags & ASYNC_CTS_FLOW) &&
1383 (status & MISCSTATUS_CTS_LATCHED) ) {
1384 if (info->tty->hw_stopped) {
1385 if (status & MISCSTATUS_CTS) {
1386 if ( debug_level >= DEBUG_LEVEL_ISR )
1387 printk("CTS tx start...");
1388 if (info->tty)
1389 info->tty->hw_stopped = 0;
1390 usc_start_transmitter(info);
1391 info->pending_bh |= BH_TRANSMIT;
1392 return;
1393 }
1394 } else {
1395 if (!(status & MISCSTATUS_CTS)) {
1396 if ( debug_level >= DEBUG_LEVEL_ISR )
1397 printk("CTS tx stop...");
1398 if (info->tty)
1399 info->tty->hw_stopped = 1;
1400 usc_stop_transmitter(info);
1401 }
1402 }
1403 }
1404 }
1405
1406 info->pending_bh |= BH_STATUS;
1407
1408 /* for diagnostics set IRQ flag */
1409 if ( status & MISCSTATUS_TXC_LATCHED ){
1410 usc_OutReg( info, SICR,
1411 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1412 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1413 info->irq_occurred = 1;
1414 }
1415
1416 } /* end of mgsl_isr_io_pin() */
1417
1418 /* mgsl_isr_transmit_data()
1419 *
1420 * Service a transmit data interrupt (async mode only).
1421 *
1422 * Arguments: info pointer to device instance data
1423 * Return Value: None
1424 */
1425 static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1426 {
1427 if ( debug_level >= DEBUG_LEVEL_ISR )
1428 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1429 __FILE__,__LINE__,info->xmit_cnt);
1430
1431 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1432
1433 if (info->tty->stopped || info->tty->hw_stopped) {
1434 usc_stop_transmitter(info);
1435 return;
1436 }
1437
1438 if ( info->xmit_cnt )
1439 usc_load_txfifo( info );
1440 else
1441 info->tx_active = 0;
1442
1443 if (info->xmit_cnt < WAKEUP_CHARS)
1444 info->pending_bh |= BH_TRANSMIT;
1445
1446 } /* end of mgsl_isr_transmit_data() */
1447
1448 /* mgsl_isr_receive_data()
1449 *
1450 * Service a receive data interrupt. This occurs
1451 * when operating in asynchronous interrupt transfer mode.
1452 * The receive data FIFO is flushed to the receive data buffers.
1453 *
1454 * Arguments: info pointer to device instance data
1455 * Return Value: None
1456 */
1457 static void mgsl_isr_receive_data( struct mgsl_struct *info )
1458 {
1459 int Fifocount;
1460 u16 status;
1461 int work = 0;
1462 unsigned char DataByte;
1463 struct tty_struct *tty = info->tty;
1464 struct mgsl_icount *icount = &info->icount;
1465
1466 if ( debug_level >= DEBUG_LEVEL_ISR )
1467 printk("%s(%d):mgsl_isr_receive_data\n",
1468 __FILE__,__LINE__);
1469
1470 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1471
1472 /* select FIFO status for RICR readback */
1473 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1474
1475 /* clear the Wordstatus bit so that status readback */
1476 /* only reflects the status of this byte */
1477 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1478
1479 /* flush the receive FIFO */
1480
1481 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1482 int flag;
1483
1484 /* read one byte from RxFIFO */
1485 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1486 info->io_base + CCAR );
1487 DataByte = inb( info->io_base + CCAR );
1488
1489 /* get the status of the received byte */
1490 status = usc_InReg(info, RCSR);
1491 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1492 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1493 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1494
1495 icount->rx++;
1496
1497 flag = 0;
1498 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1499 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1500 printk("rxerr=%04X\n",status);
1501 /* update error statistics */
1502 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1503 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1504 icount->brk++;
1505 } else if (status & RXSTATUS_PARITY_ERROR)
1506 icount->parity++;
1507 else if (status & RXSTATUS_FRAMING_ERROR)
1508 icount->frame++;
1509 else if (status & RXSTATUS_OVERRUN) {
1510 /* must issue purge fifo cmd before */
1511 /* 16C32 accepts more receive chars */
1512 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1513 icount->overrun++;
1514 }
1515
1516 /* discard char if tty control flags say so */
1517 if (status & info->ignore_status_mask)
1518 continue;
1519
1520 status &= info->read_status_mask;
1521
1522 if (status & RXSTATUS_BREAK_RECEIVED) {
1523 flag = TTY_BREAK;
1524 if (info->flags & ASYNC_SAK)
1525 do_SAK(tty);
1526 } else if (status & RXSTATUS_PARITY_ERROR)
1527 flag = TTY_PARITY;
1528 else if (status & RXSTATUS_FRAMING_ERROR)
1529 flag = TTY_FRAME;
1530 } /* end of if (error) */
1531 tty_insert_flip_char(tty, DataByte, flag);
1532 if (status & RXSTATUS_OVERRUN) {
1533 /* Overrun is special, since it's
1534 * reported immediately, and doesn't
1535 * affect the current character
1536 */
1537 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1538 }
1539 }
1540
1541 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1542 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1543 __FILE__,__LINE__,icount->rx,icount->brk,
1544 icount->parity,icount->frame,icount->overrun);
1545 }
1546
1547 if(work)
1548 tty_flip_buffer_push(tty);
1549 }
1550
1551 /* mgsl_isr_misc()
1552 *
1553 * Service a miscellaneos interrupt source.
1554 *
1555 * Arguments: info pointer to device extension (instance data)
1556 * Return Value: None
1557 */
1558 static void mgsl_isr_misc( struct mgsl_struct *info )
1559 {
1560 u16 status = usc_InReg( info, MISR );
1561
1562 if ( debug_level >= DEBUG_LEVEL_ISR )
1563 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1564 __FILE__,__LINE__,status);
1565
1566 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1567 (info->params.mode == MGSL_MODE_HDLC)) {
1568
1569 /* turn off receiver and rx DMA */
1570 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1571 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1572 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1573 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1574 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1575
1576 /* schedule BH handler to restart receiver */
1577 info->pending_bh |= BH_RECEIVE;
1578 info->rx_rcc_underrun = 1;
1579 }
1580
1581 usc_ClearIrqPendingBits( info, MISC );
1582 usc_UnlatchMiscstatusBits( info, status );
1583
1584 } /* end of mgsl_isr_misc() */
1585
1586 /* mgsl_isr_null()
1587 *
1588 * Services undefined interrupt vectors from the
1589 * USC. (hence this function SHOULD never be called)
1590 *
1591 * Arguments: info pointer to device extension (instance data)
1592 * Return Value: None
1593 */
1594 static void mgsl_isr_null( struct mgsl_struct *info )
1595 {
1596
1597 } /* end of mgsl_isr_null() */
1598
1599 /* mgsl_isr_receive_dma()
1600 *
1601 * Service a receive DMA channel interrupt.
1602 * For this driver there are two sources of receive DMA interrupts
1603 * as identified in the Receive DMA mode Register (RDMR):
1604 *
1605 * BIT3 EOA/EOL End of List, all receive buffers in receive
1606 * buffer list have been filled (no more free buffers
1607 * available). The DMA controller has shut down.
1608 *
1609 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1610 * DMA buffer is terminated in response to completion
1611 * of a good frame or a frame with errors. The status
1612 * of the frame is stored in the buffer entry in the
1613 * list of receive buffer entries.
1614 *
1615 * Arguments: info pointer to device instance data
1616 * Return Value: None
1617 */
1618 static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1619 {
1620 u16 status;
1621
1622 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1623 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1624
1625 /* Read the receive DMA status to identify interrupt type. */
1626 /* This also clears the status bits. */
1627 status = usc_InDmaReg( info, RDMR );
1628
1629 if ( debug_level >= DEBUG_LEVEL_ISR )
1630 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1631 __FILE__,__LINE__,info->device_name,status);
1632
1633 info->pending_bh |= BH_RECEIVE;
1634
1635 if ( status & BIT3 ) {
1636 info->rx_overflow = 1;
1637 info->icount.buf_overrun++;
1638 }
1639
1640 } /* end of mgsl_isr_receive_dma() */
1641
1642 /* mgsl_isr_transmit_dma()
1643 *
1644 * This function services a transmit DMA channel interrupt.
1645 *
1646 * For this driver there is one source of transmit DMA interrupts
1647 * as identified in the Transmit DMA Mode Register (TDMR):
1648 *
1649 * BIT2 EOB End of Buffer. This interrupt occurs when a
1650 * transmit DMA buffer has been emptied.
1651 *
1652 * The driver maintains enough transmit DMA buffers to hold at least
1653 * one max frame size transmit frame. When operating in a buffered
1654 * transmit mode, there may be enough transmit DMA buffers to hold at
1655 * least two or more max frame size frames. On an EOB condition,
1656 * determine if there are any queued transmit buffers and copy into
1657 * transmit DMA buffers if we have room.
1658 *
1659 * Arguments: info pointer to device instance data
1660 * Return Value: None
1661 */
1662 static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1663 {
1664 u16 status;
1665
1666 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1667 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1668
1669 /* Read the transmit DMA status to identify interrupt type. */
1670 /* This also clears the status bits. */
1671
1672 status = usc_InDmaReg( info, TDMR );
1673
1674 if ( debug_level >= DEBUG_LEVEL_ISR )
1675 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1676 __FILE__,__LINE__,info->device_name,status);
1677
1678 if ( status & BIT2 ) {
1679 --info->tx_dma_buffers_used;
1680
1681 /* if there are transmit frames queued,
1682 * try to load the next one
1683 */
1684 if ( load_next_tx_holding_buffer(info) ) {
1685 /* if call returns non-zero value, we have
1686 * at least one free tx holding buffer
1687 */
1688 info->pending_bh |= BH_TRANSMIT;
1689 }
1690 }
1691
1692 } /* end of mgsl_isr_transmit_dma() */
1693
1694 /* mgsl_interrupt()
1695 *
1696 * Interrupt service routine entry point.
1697 *
1698 * Arguments:
1699 *
1700 * irq interrupt number that caused interrupt
1701 * dev_id device ID supplied during interrupt registration
1702 * regs interrupted processor context
1703 *
1704 * Return Value: None
1705 */
1706 static irqreturn_t mgsl_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1707 {
1708 struct mgsl_struct * info;
1709 u16 UscVector;
1710 u16 DmaVector;
1711
1712 if ( debug_level >= DEBUG_LEVEL_ISR )
1713 printk("%s(%d):mgsl_interrupt(%d)entry.\n",
1714 __FILE__,__LINE__,irq);
1715
1716 info = (struct mgsl_struct *)dev_id;
1717 if (!info)
1718 return IRQ_NONE;
1719
1720 spin_lock(&info->irq_spinlock);
1721
1722 for(;;) {
1723 /* Read the interrupt vectors from hardware. */
1724 UscVector = usc_InReg(info, IVR) >> 9;
1725 DmaVector = usc_InDmaReg(info, DIVR);
1726
1727 if ( debug_level >= DEBUG_LEVEL_ISR )
1728 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1729 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1730
1731 if ( !UscVector && !DmaVector )
1732 break;
1733
1734 /* Dispatch interrupt vector */
1735 if ( UscVector )
1736 (*UscIsrTable[UscVector])(info);
1737 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1738 mgsl_isr_transmit_dma(info);
1739 else
1740 mgsl_isr_receive_dma(info);
1741
1742 if ( info->isr_overflow ) {
1743 printk(KERN_ERR"%s(%d):%s isr overflow irq=%d\n",
1744 __FILE__,__LINE__,info->device_name, irq);
1745 usc_DisableMasterIrqBit(info);
1746 usc_DisableDmaInterrupts(info,DICR_MASTER);
1747 break;
1748 }
1749 }
1750
1751 /* Request bottom half processing if there's something
1752 * for it to do and the bh is not already running
1753 */
1754
1755 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1756 if ( debug_level >= DEBUG_LEVEL_ISR )
1757 printk("%s(%d):%s queueing bh task.\n",
1758 __FILE__,__LINE__,info->device_name);
1759 schedule_work(&info->task);
1760 info->bh_requested = 1;
1761 }
1762
1763 spin_unlock(&info->irq_spinlock);
1764
1765 if ( debug_level >= DEBUG_LEVEL_ISR )
1766 printk("%s(%d):mgsl_interrupt(%d)exit.\n",
1767 __FILE__,__LINE__,irq);
1768 return IRQ_HANDLED;
1769 } /* end of mgsl_interrupt() */
1770
1771 /* startup()
1772 *
1773 * Initialize and start device.
1774 *
1775 * Arguments: info pointer to device instance data
1776 * Return Value: 0 if success, otherwise error code
1777 */
1778 static int startup(struct mgsl_struct * info)
1779 {
1780 int retval = 0;
1781
1782 if ( debug_level >= DEBUG_LEVEL_INFO )
1783 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1784
1785 if (info->flags & ASYNC_INITIALIZED)
1786 return 0;
1787
1788 if (!info->xmit_buf) {
1789 /* allocate a page of memory for a transmit buffer */
1790 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1791 if (!info->xmit_buf) {
1792 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1793 __FILE__,__LINE__,info->device_name);
1794 return -ENOMEM;
1795 }
1796 }
1797
1798 info->pending_bh = 0;
1799
1800 memset(&info->icount, 0, sizeof(info->icount));
1801
1802 init_timer(&info->tx_timer);
1803 info->tx_timer.data = (unsigned long)info;
1804 info->tx_timer.function = mgsl_tx_timeout;
1805
1806 /* Allocate and claim adapter resources */
1807 retval = mgsl_claim_resources(info);
1808
1809 /* perform existence check and diagnostics */
1810 if ( !retval )
1811 retval = mgsl_adapter_test(info);
1812
1813 if ( retval ) {
1814 if (capable(CAP_SYS_ADMIN) && info->tty)
1815 set_bit(TTY_IO_ERROR, &info->tty->flags);
1816 mgsl_release_resources(info);
1817 return retval;
1818 }
1819
1820 /* program hardware for current parameters */
1821 mgsl_change_params(info);
1822
1823 if (info->tty)
1824 clear_bit(TTY_IO_ERROR, &info->tty->flags);
1825
1826 info->flags |= ASYNC_INITIALIZED;
1827
1828 return 0;
1829
1830 } /* end of startup() */
1831
1832 /* shutdown()
1833 *
1834 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1835 *
1836 * Arguments: info pointer to device instance data
1837 * Return Value: None
1838 */
1839 static void shutdown(struct mgsl_struct * info)
1840 {
1841 unsigned long flags;
1842
1843 if (!(info->flags & ASYNC_INITIALIZED))
1844 return;
1845
1846 if (debug_level >= DEBUG_LEVEL_INFO)
1847 printk("%s(%d):mgsl_shutdown(%s)\n",
1848 __FILE__,__LINE__, info->device_name );
1849
1850 /* clear status wait queue because status changes */
1851 /* can't happen after shutting down the hardware */
1852 wake_up_interruptible(&info->status_event_wait_q);
1853 wake_up_interruptible(&info->event_wait_q);
1854
1855 del_timer(&info->tx_timer);
1856
1857 if (info->xmit_buf) {
1858 free_page((unsigned long) info->xmit_buf);
1859 info->xmit_buf = NULL;
1860 }
1861
1862 spin_lock_irqsave(&info->irq_spinlock,flags);
1863 usc_DisableMasterIrqBit(info);
1864 usc_stop_receiver(info);
1865 usc_stop_transmitter(info);
1866 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1867 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1868 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1869
1870 /* Disable DMAEN (Port 7, Bit 14) */
1871 /* This disconnects the DMA request signal from the ISA bus */
1872 /* on the ISA adapter. This has no effect for the PCI adapter */
1873 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1874
1875 /* Disable INTEN (Port 6, Bit12) */
1876 /* This disconnects the IRQ request signal to the ISA bus */
1877 /* on the ISA adapter. This has no effect for the PCI adapter */
1878 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1879
1880 if (!info->tty || info->tty->termios->c_cflag & HUPCL) {
1881 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1882 usc_set_serial_signals(info);
1883 }
1884
1885 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1886
1887 mgsl_release_resources(info);
1888
1889 if (info->tty)
1890 set_bit(TTY_IO_ERROR, &info->tty->flags);
1891
1892 info->flags &= ~ASYNC_INITIALIZED;
1893
1894 } /* end of shutdown() */
1895
1896 static void mgsl_program_hw(struct mgsl_struct *info)
1897 {
1898 unsigned long flags;
1899
1900 spin_lock_irqsave(&info->irq_spinlock,flags);
1901
1902 usc_stop_receiver(info);
1903 usc_stop_transmitter(info);
1904 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1905
1906 if (info->params.mode == MGSL_MODE_HDLC ||
1907 info->params.mode == MGSL_MODE_RAW ||
1908 info->netcount)
1909 usc_set_sync_mode(info);
1910 else
1911 usc_set_async_mode(info);
1912
1913 usc_set_serial_signals(info);
1914
1915 info->dcd_chkcount = 0;
1916 info->cts_chkcount = 0;
1917 info->ri_chkcount = 0;
1918 info->dsr_chkcount = 0;
1919
1920 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1921 usc_EnableInterrupts(info, IO_PIN);
1922 usc_get_serial_signals(info);
1923
1924 if (info->netcount || info->tty->termios->c_cflag & CREAD)
1925 usc_start_receiver(info);
1926
1927 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1928 }
1929
1930 /* Reconfigure adapter based on new parameters
1931 */
1932 static void mgsl_change_params(struct mgsl_struct *info)
1933 {
1934 unsigned cflag;
1935 int bits_per_char;
1936
1937 if (!info->tty || !info->tty->termios)
1938 return;
1939
1940 if (debug_level >= DEBUG_LEVEL_INFO)
1941 printk("%s(%d):mgsl_change_params(%s)\n",
1942 __FILE__,__LINE__, info->device_name );
1943
1944 cflag = info->tty->termios->c_cflag;
1945
1946 /* if B0 rate (hangup) specified then negate DTR and RTS */
1947 /* otherwise assert DTR and RTS */
1948 if (cflag & CBAUD)
1949 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1950 else
1951 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1952
1953 /* byte size and parity */
1954
1955 switch (cflag & CSIZE) {
1956 case CS5: info->params.data_bits = 5; break;
1957 case CS6: info->params.data_bits = 6; break;
1958 case CS7: info->params.data_bits = 7; break;
1959 case CS8: info->params.data_bits = 8; break;
1960 /* Never happens, but GCC is too dumb to figure it out */
1961 default: info->params.data_bits = 7; break;
1962 }
1963
1964 if (cflag & CSTOPB)
1965 info->params.stop_bits = 2;
1966 else
1967 info->params.stop_bits = 1;
1968
1969 info->params.parity = ASYNC_PARITY_NONE;
1970 if (cflag & PARENB) {
1971 if (cflag & PARODD)
1972 info->params.parity = ASYNC_PARITY_ODD;
1973 else
1974 info->params.parity = ASYNC_PARITY_EVEN;
1975 #ifdef CMSPAR
1976 if (cflag & CMSPAR)
1977 info->params.parity = ASYNC_PARITY_SPACE;
1978 #endif
1979 }
1980
1981 /* calculate number of jiffies to transmit a full
1982 * FIFO (32 bytes) at specified data rate
1983 */
1984 bits_per_char = info->params.data_bits +
1985 info->params.stop_bits + 1;
1986
1987 /* if port data rate is set to 460800 or less then
1988 * allow tty settings to override, otherwise keep the
1989 * current data rate.
1990 */
1991 if (info->params.data_rate <= 460800)
1992 info->params.data_rate = tty_get_baud_rate(info->tty);
1993
1994 if ( info->params.data_rate ) {
1995 info->timeout = (32*HZ*bits_per_char) /
1996 info->params.data_rate;
1997 }
1998 info->timeout += HZ/50; /* Add .02 seconds of slop */
1999
2000 if (cflag & CRTSCTS)
2001 info->flags |= ASYNC_CTS_FLOW;
2002 else
2003 info->flags &= ~ASYNC_CTS_FLOW;
2004
2005 if (cflag & CLOCAL)
2006 info->flags &= ~ASYNC_CHECK_CD;
2007 else
2008 info->flags |= ASYNC_CHECK_CD;
2009
2010 /* process tty input control flags */
2011
2012 info->read_status_mask = RXSTATUS_OVERRUN;
2013 if (I_INPCK(info->tty))
2014 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2015 if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
2016 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
2017
2018 if (I_IGNPAR(info->tty))
2019 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2020 if (I_IGNBRK(info->tty)) {
2021 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
2022 /* If ignoring parity and break indicators, ignore
2023 * overruns too. (For real raw support).
2024 */
2025 if (I_IGNPAR(info->tty))
2026 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2027 }
2028
2029 mgsl_program_hw(info);
2030
2031 } /* end of mgsl_change_params() */
2032
2033 /* mgsl_put_char()
2034 *
2035 * Add a character to the transmit buffer.
2036 *
2037 * Arguments: tty pointer to tty information structure
2038 * ch character to add to transmit buffer
2039 *
2040 * Return Value: None
2041 */
2042 static void mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2043 {
2044 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2045 unsigned long flags;
2046
2047 if ( debug_level >= DEBUG_LEVEL_INFO ) {
2048 printk( "%s(%d):mgsl_put_char(%d) on %s\n",
2049 __FILE__,__LINE__,ch,info->device_name);
2050 }
2051
2052 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2053 return;
2054
2055 if (!tty || !info->xmit_buf)
2056 return;
2057
2058 spin_lock_irqsave(&info->irq_spinlock,flags);
2059
2060 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
2061
2062 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2063 info->xmit_buf[info->xmit_head++] = ch;
2064 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2065 info->xmit_cnt++;
2066 }
2067 }
2068
2069 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2070
2071 } /* end of mgsl_put_char() */
2072
2073 /* mgsl_flush_chars()
2074 *
2075 * Enable transmitter so remaining characters in the
2076 * transmit buffer are sent.
2077 *
2078 * Arguments: tty pointer to tty information structure
2079 * Return Value: None
2080 */
2081 static void mgsl_flush_chars(struct tty_struct *tty)
2082 {
2083 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2084 unsigned long flags;
2085
2086 if ( debug_level >= DEBUG_LEVEL_INFO )
2087 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2088 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2089
2090 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2091 return;
2092
2093 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2094 !info->xmit_buf)
2095 return;
2096
2097 if ( debug_level >= DEBUG_LEVEL_INFO )
2098 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2099 __FILE__,__LINE__,info->device_name );
2100
2101 spin_lock_irqsave(&info->irq_spinlock,flags);
2102
2103 if (!info->tx_active) {
2104 if ( (info->params.mode == MGSL_MODE_HDLC ||
2105 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2106 /* operating in synchronous (frame oriented) mode */
2107 /* copy data from circular xmit_buf to */
2108 /* transmit DMA buffer. */
2109 mgsl_load_tx_dma_buffer(info,
2110 info->xmit_buf,info->xmit_cnt);
2111 }
2112 usc_start_transmitter(info);
2113 }
2114
2115 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2116
2117 } /* end of mgsl_flush_chars() */
2118
2119 /* mgsl_write()
2120 *
2121 * Send a block of data
2122 *
2123 * Arguments:
2124 *
2125 * tty pointer to tty information structure
2126 * buf pointer to buffer containing send data
2127 * count size of send data in bytes
2128 *
2129 * Return Value: number of characters written
2130 */
2131 static int mgsl_write(struct tty_struct * tty,
2132 const unsigned char *buf, int count)
2133 {
2134 int c, ret = 0;
2135 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2136 unsigned long flags;
2137
2138 if ( debug_level >= DEBUG_LEVEL_INFO )
2139 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2140 __FILE__,__LINE__,info->device_name,count);
2141
2142 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2143 goto cleanup;
2144
2145 if (!tty || !info->xmit_buf)
2146 goto cleanup;
2147
2148 if ( info->params.mode == MGSL_MODE_HDLC ||
2149 info->params.mode == MGSL_MODE_RAW ) {
2150 /* operating in synchronous (frame oriented) mode */
2151 /* operating in synchronous (frame oriented) mode */
2152 if (info->tx_active) {
2153
2154 if ( info->params.mode == MGSL_MODE_HDLC ) {
2155 ret = 0;
2156 goto cleanup;
2157 }
2158 /* transmitter is actively sending data -
2159 * if we have multiple transmit dma and
2160 * holding buffers, attempt to queue this
2161 * frame for transmission at a later time.
2162 */
2163 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2164 /* no tx holding buffers available */
2165 ret = 0;
2166 goto cleanup;
2167 }
2168
2169 /* queue transmit frame request */
2170 ret = count;
2171 save_tx_buffer_request(info,buf,count);
2172
2173 /* if we have sufficient tx dma buffers,
2174 * load the next buffered tx request
2175 */
2176 spin_lock_irqsave(&info->irq_spinlock,flags);
2177 load_next_tx_holding_buffer(info);
2178 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2179 goto cleanup;
2180 }
2181
2182 /* if operating in HDLC LoopMode and the adapter */
2183 /* has yet to be inserted into the loop, we can't */
2184 /* transmit */
2185
2186 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2187 !usc_loopmode_active(info) )
2188 {
2189 ret = 0;
2190 goto cleanup;
2191 }
2192
2193 if ( info->xmit_cnt ) {
2194 /* Send accumulated from send_char() calls */
2195 /* as frame and wait before accepting more data. */
2196 ret = 0;
2197
2198 /* copy data from circular xmit_buf to */
2199 /* transmit DMA buffer. */
2200 mgsl_load_tx_dma_buffer(info,
2201 info->xmit_buf,info->xmit_cnt);
2202 if ( debug_level >= DEBUG_LEVEL_INFO )
2203 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2204 __FILE__,__LINE__,info->device_name);
2205 } else {
2206 if ( debug_level >= DEBUG_LEVEL_INFO )
2207 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2208 __FILE__,__LINE__,info->device_name);
2209 ret = count;
2210 info->xmit_cnt = count;
2211 mgsl_load_tx_dma_buffer(info,buf,count);
2212 }
2213 } else {
2214 while (1) {
2215 spin_lock_irqsave(&info->irq_spinlock,flags);
2216 c = min_t(int, count,
2217 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2218 SERIAL_XMIT_SIZE - info->xmit_head));
2219 if (c <= 0) {
2220 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2221 break;
2222 }
2223 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2224 info->xmit_head = ((info->xmit_head + c) &
2225 (SERIAL_XMIT_SIZE-1));
2226 info->xmit_cnt += c;
2227 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2228 buf += c;
2229 count -= c;
2230 ret += c;
2231 }
2232 }
2233
2234 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2235 spin_lock_irqsave(&info->irq_spinlock,flags);
2236 if (!info->tx_active)
2237 usc_start_transmitter(info);
2238 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2239 }
2240 cleanup:
2241 if ( debug_level >= DEBUG_LEVEL_INFO )
2242 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2243 __FILE__,__LINE__,info->device_name,ret);
2244
2245 return ret;
2246
2247 } /* end of mgsl_write() */
2248
2249 /* mgsl_write_room()
2250 *
2251 * Return the count of free bytes in transmit buffer
2252 *
2253 * Arguments: tty pointer to tty info structure
2254 * Return Value: None
2255 */
2256 static int mgsl_write_room(struct tty_struct *tty)
2257 {
2258 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2259 int ret;
2260
2261 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2262 return 0;
2263 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2264 if (ret < 0)
2265 ret = 0;
2266
2267 if (debug_level >= DEBUG_LEVEL_INFO)
2268 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2269 __FILE__,__LINE__, info->device_name,ret );
2270
2271 if ( info->params.mode == MGSL_MODE_HDLC ||
2272 info->params.mode == MGSL_MODE_RAW ) {
2273 /* operating in synchronous (frame oriented) mode */
2274 if ( info->tx_active )
2275 return 0;
2276 else
2277 return HDLC_MAX_FRAME_SIZE;
2278 }
2279
2280 return ret;
2281
2282 } /* end of mgsl_write_room() */
2283
2284 /* mgsl_chars_in_buffer()
2285 *
2286 * Return the count of bytes in transmit buffer
2287 *
2288 * Arguments: tty pointer to tty info structure
2289 * Return Value: None
2290 */
2291 static int mgsl_chars_in_buffer(struct tty_struct *tty)
2292 {
2293 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2294
2295 if (debug_level >= DEBUG_LEVEL_INFO)
2296 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2297 __FILE__,__LINE__, info->device_name );
2298
2299 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2300 return 0;
2301
2302 if (debug_level >= DEBUG_LEVEL_INFO)
2303 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2304 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2305
2306 if ( info->params.mode == MGSL_MODE_HDLC ||
2307 info->params.mode == MGSL_MODE_RAW ) {
2308 /* operating in synchronous (frame oriented) mode */
2309 if ( info->tx_active )
2310 return info->max_frame_size;
2311 else
2312 return 0;
2313 }
2314
2315 return info->xmit_cnt;
2316 } /* end of mgsl_chars_in_buffer() */
2317
2318 /* mgsl_flush_buffer()
2319 *
2320 * Discard all data in the send buffer
2321 *
2322 * Arguments: tty pointer to tty info structure
2323 * Return Value: None
2324 */
2325 static void mgsl_flush_buffer(struct tty_struct *tty)
2326 {
2327 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2328 unsigned long flags;
2329
2330 if (debug_level >= DEBUG_LEVEL_INFO)
2331 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2332 __FILE__,__LINE__, info->device_name );
2333
2334 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2335 return;
2336
2337 spin_lock_irqsave(&info->irq_spinlock,flags);
2338 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2339 del_timer(&info->tx_timer);
2340 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2341
2342 wake_up_interruptible(&tty->write_wait);
2343 tty_wakeup(tty);
2344 }
2345
2346 /* mgsl_send_xchar()
2347 *
2348 * Send a high-priority XON/XOFF character
2349 *
2350 * Arguments: tty pointer to tty info structure
2351 * ch character to send
2352 * Return Value: None
2353 */
2354 static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2355 {
2356 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2357 unsigned long flags;
2358
2359 if (debug_level >= DEBUG_LEVEL_INFO)
2360 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2361 __FILE__,__LINE__, info->device_name, ch );
2362
2363 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2364 return;
2365
2366 info->x_char = ch;
2367 if (ch) {
2368 /* Make sure transmit interrupts are on */
2369 spin_lock_irqsave(&info->irq_spinlock,flags);
2370 if (!info->tx_enabled)
2371 usc_start_transmitter(info);
2372 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2373 }
2374 } /* end of mgsl_send_xchar() */
2375
2376 /* mgsl_throttle()
2377 *
2378 * Signal remote device to throttle send data (our receive data)
2379 *
2380 * Arguments: tty pointer to tty info structure
2381 * Return Value: None
2382 */
2383 static void mgsl_throttle(struct tty_struct * tty)
2384 {
2385 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2386 unsigned long flags;
2387
2388 if (debug_level >= DEBUG_LEVEL_INFO)
2389 printk("%s(%d):mgsl_throttle(%s) entry\n",
2390 __FILE__,__LINE__, info->device_name );
2391
2392 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2393 return;
2394
2395 if (I_IXOFF(tty))
2396 mgsl_send_xchar(tty, STOP_CHAR(tty));
2397
2398 if (tty->termios->c_cflag & CRTSCTS) {
2399 spin_lock_irqsave(&info->irq_spinlock,flags);
2400 info->serial_signals &= ~SerialSignal_RTS;
2401 usc_set_serial_signals(info);
2402 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2403 }
2404 } /* end of mgsl_throttle() */
2405
2406 /* mgsl_unthrottle()
2407 *
2408 * Signal remote device to stop throttling send data (our receive data)
2409 *
2410 * Arguments: tty pointer to tty info structure
2411 * Return Value: None
2412 */
2413 static void mgsl_unthrottle(struct tty_struct * tty)
2414 {
2415 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2416 unsigned long flags;
2417
2418 if (debug_level >= DEBUG_LEVEL_INFO)
2419 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2420 __FILE__,__LINE__, info->device_name );
2421
2422 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2423 return;
2424
2425 if (I_IXOFF(tty)) {
2426 if (info->x_char)
2427 info->x_char = 0;
2428 else
2429 mgsl_send_xchar(tty, START_CHAR(tty));
2430 }
2431
2432 if (tty->termios->c_cflag & CRTSCTS) {
2433 spin_lock_irqsave(&info->irq_spinlock,flags);
2434 info->serial_signals |= SerialSignal_RTS;
2435 usc_set_serial_signals(info);
2436 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2437 }
2438
2439 } /* end of mgsl_unthrottle() */
2440
2441 /* mgsl_get_stats()
2442 *
2443 * get the current serial parameters information
2444 *
2445 * Arguments: info pointer to device instance data
2446 * user_icount pointer to buffer to hold returned stats
2447 *
2448 * Return Value: 0 if success, otherwise error code
2449 */
2450 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2451 {
2452 int err;
2453
2454 if (debug_level >= DEBUG_LEVEL_INFO)
2455 printk("%s(%d):mgsl_get_params(%s)\n",
2456 __FILE__,__LINE__, info->device_name);
2457
2458 if (!user_icount) {
2459 memset(&info->icount, 0, sizeof(info->icount));
2460 } else {
2461 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2462 if (err)
2463 return -EFAULT;
2464 }
2465
2466 return 0;
2467
2468 } /* end of mgsl_get_stats() */
2469
2470 /* mgsl_get_params()
2471 *
2472 * get the current serial parameters information
2473 *
2474 * Arguments: info pointer to device instance data
2475 * user_params pointer to buffer to hold returned params
2476 *
2477 * Return Value: 0 if success, otherwise error code
2478 */
2479 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2480 {
2481 int err;
2482 if (debug_level >= DEBUG_LEVEL_INFO)
2483 printk("%s(%d):mgsl_get_params(%s)\n",
2484 __FILE__,__LINE__, info->device_name);
2485
2486 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2487 if (err) {
2488 if ( debug_level >= DEBUG_LEVEL_INFO )
2489 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2490 __FILE__,__LINE__,info->device_name);
2491 return -EFAULT;
2492 }
2493
2494 return 0;
2495
2496 } /* end of mgsl_get_params() */
2497
2498 /* mgsl_set_params()
2499 *
2500 * set the serial parameters
2501 *
2502 * Arguments:
2503 *
2504 * info pointer to device instance data
2505 * new_params user buffer containing new serial params
2506 *
2507 * Return Value: 0 if success, otherwise error code
2508 */
2509 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2510 {
2511 unsigned long flags;
2512 MGSL_PARAMS tmp_params;
2513 int err;
2514
2515 if (debug_level >= DEBUG_LEVEL_INFO)
2516 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2517 info->device_name );
2518 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2519 if (err) {
2520 if ( debug_level >= DEBUG_LEVEL_INFO )
2521 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2522 __FILE__,__LINE__,info->device_name);
2523 return -EFAULT;
2524 }
2525
2526 spin_lock_irqsave(&info->irq_spinlock,flags);
2527 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2528 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2529
2530 mgsl_change_params(info);
2531
2532 return 0;
2533
2534 } /* end of mgsl_set_params() */
2535
2536 /* mgsl_get_txidle()
2537 *
2538 * get the current transmit idle mode
2539 *
2540 * Arguments: info pointer to device instance data
2541 * idle_mode pointer to buffer to hold returned idle mode
2542 *
2543 * Return Value: 0 if success, otherwise error code
2544 */
2545 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2546 {
2547 int err;
2548
2549 if (debug_level >= DEBUG_LEVEL_INFO)
2550 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2551 __FILE__,__LINE__, info->device_name, info->idle_mode);
2552
2553 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2554 if (err) {
2555 if ( debug_level >= DEBUG_LEVEL_INFO )
2556 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2557 __FILE__,__LINE__,info->device_name);
2558 return -EFAULT;
2559 }
2560
2561 return 0;
2562
2563 } /* end of mgsl_get_txidle() */
2564
2565 /* mgsl_set_txidle() service ioctl to set transmit idle mode
2566 *
2567 * Arguments: info pointer to device instance data
2568 * idle_mode new idle mode
2569 *
2570 * Return Value: 0 if success, otherwise error code
2571 */
2572 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2573 {
2574 unsigned long flags;
2575
2576 if (debug_level >= DEBUG_LEVEL_INFO)
2577 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2578 info->device_name, idle_mode );
2579
2580 spin_lock_irqsave(&info->irq_spinlock,flags);
2581 info->idle_mode = idle_mode;
2582 usc_set_txidle( info );
2583 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2584 return 0;
2585
2586 } /* end of mgsl_set_txidle() */
2587
2588 /* mgsl_txenable()
2589 *
2590 * enable or disable the transmitter
2591 *
2592 * Arguments:
2593 *
2594 * info pointer to device instance data
2595 * enable 1 = enable, 0 = disable
2596 *
2597 * Return Value: 0 if success, otherwise error code
2598 */
2599 static int mgsl_txenable(struct mgsl_struct * info, int enable)
2600 {
2601 unsigned long flags;
2602
2603 if (debug_level >= DEBUG_LEVEL_INFO)
2604 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2605 info->device_name, enable);
2606
2607 spin_lock_irqsave(&info->irq_spinlock,flags);
2608 if ( enable ) {
2609 if ( !info->tx_enabled ) {
2610
2611 usc_start_transmitter(info);
2612 /*--------------------------------------------------
2613 * if HDLC/SDLC Loop mode, attempt to insert the
2614 * station in the 'loop' by setting CMR:13. Upon
2615 * receipt of the next GoAhead (RxAbort) sequence,
2616 * the OnLoop indicator (CCSR:7) should go active
2617 * to indicate that we are on the loop
2618 *--------------------------------------------------*/
2619 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2620 usc_loopmode_insert_request( info );
2621 }
2622 } else {
2623 if ( info->tx_enabled )
2624 usc_stop_transmitter(info);
2625 }
2626 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2627 return 0;
2628
2629 } /* end of mgsl_txenable() */
2630
2631 /* mgsl_txabort() abort send HDLC frame
2632 *
2633 * Arguments: info pointer to device instance data
2634 * Return Value: 0 if success, otherwise error code
2635 */
2636 static int mgsl_txabort(struct mgsl_struct * info)
2637 {
2638 unsigned long flags;
2639
2640 if (debug_level >= DEBUG_LEVEL_INFO)
2641 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2642 info->device_name);
2643
2644 spin_lock_irqsave(&info->irq_spinlock,flags);
2645 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2646 {
2647 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2648 usc_loopmode_cancel_transmit( info );
2649 else
2650 usc_TCmd(info,TCmd_SendAbort);
2651 }
2652 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2653 return 0;
2654
2655 } /* end of mgsl_txabort() */
2656
2657 /* mgsl_rxenable() enable or disable the receiver
2658 *
2659 * Arguments: info pointer to device instance data
2660 * enable 1 = enable, 0 = disable
2661 * Return Value: 0 if success, otherwise error code
2662 */
2663 static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2664 {
2665 unsigned long flags;
2666
2667 if (debug_level >= DEBUG_LEVEL_INFO)
2668 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2669 info->device_name, enable);
2670
2671 spin_lock_irqsave(&info->irq_spinlock,flags);
2672 if ( enable ) {
2673 if ( !info->rx_enabled )
2674 usc_start_receiver(info);
2675 } else {
2676 if ( info->rx_enabled )
2677 usc_stop_receiver(info);
2678 }
2679 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2680 return 0;
2681
2682 } /* end of mgsl_rxenable() */
2683
2684 /* mgsl_wait_event() wait for specified event to occur
2685 *
2686 * Arguments: info pointer to device instance data
2687 * mask pointer to bitmask of events to wait for
2688 * Return Value: 0 if successful and bit mask updated with
2689 * of events triggerred,
2690 * otherwise error code
2691 */
2692 static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2693 {
2694 unsigned long flags;
2695 int s;
2696 int rc=0;
2697 struct mgsl_icount cprev, cnow;
2698 int events;
2699 int mask;
2700 struct _input_signal_events oldsigs, newsigs;
2701 DECLARE_WAITQUEUE(wait, current);
2702
2703 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2704 if (rc) {
2705 return -EFAULT;
2706 }
2707
2708 if (debug_level >= DEBUG_LEVEL_INFO)
2709 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2710 info->device_name, mask);
2711
2712 spin_lock_irqsave(&info->irq_spinlock,flags);
2713
2714 /* return immediately if state matches requested events */
2715 usc_get_serial_signals(info);
2716 s = info->serial_signals;
2717 events = mask &
2718 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2719 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2720 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2721 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2722 if (events) {
2723 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2724 goto exit;
2725 }
2726
2727 /* save current irq counts */
2728 cprev = info->icount;
2729 oldsigs = info->input_signal_events;
2730
2731 /* enable hunt and idle irqs if needed */
2732 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2733 u16 oldreg = usc_InReg(info,RICR);
2734 u16 newreg = oldreg +
2735 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2736 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2737 if (oldreg != newreg)
2738 usc_OutReg(info, RICR, newreg);
2739 }
2740
2741 set_current_state(TASK_INTERRUPTIBLE);
2742 add_wait_queue(&info->event_wait_q, &wait);
2743
2744 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2745
2746
2747 for(;;) {
2748 schedule();
2749 if (signal_pending(current)) {
2750 rc = -ERESTARTSYS;
2751 break;
2752 }
2753
2754 /* get current irq counts */
2755 spin_lock_irqsave(&info->irq_spinlock,flags);
2756 cnow = info->icount;
2757 newsigs = info->input_signal_events;
2758 set_current_state(TASK_INTERRUPTIBLE);
2759 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2760
2761 /* if no change, wait aborted for some reason */
2762 if (newsigs.dsr_up == oldsigs.dsr_up &&
2763 newsigs.dsr_down == oldsigs.dsr_down &&
2764 newsigs.dcd_up == oldsigs.dcd_up &&
2765 newsigs.dcd_down == oldsigs.dcd_down &&
2766 newsigs.cts_up == oldsigs.cts_up &&
2767 newsigs.cts_down == oldsigs.cts_down &&
2768 newsigs.ri_up == oldsigs.ri_up &&
2769 newsigs.ri_down == oldsigs.ri_down &&
2770 cnow.exithunt == cprev.exithunt &&
2771 cnow.rxidle == cprev.rxidle) {
2772 rc = -EIO;
2773 break;
2774 }
2775
2776 events = mask &
2777 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2778 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2779 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2780 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2781 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2782 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2783 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2784 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2785 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2786 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2787 if (events)
2788 break;
2789
2790 cprev = cnow;
2791 oldsigs = newsigs;
2792 }
2793
2794 remove_wait_queue(&info->event_wait_q, &wait);
2795 set_current_state(TASK_RUNNING);
2796
2797 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2798 spin_lock_irqsave(&info->irq_spinlock,flags);
2799 if (!waitqueue_active(&info->event_wait_q)) {
2800 /* disable enable exit hunt mode/idle rcvd IRQs */
2801 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2802 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2803 }
2804 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2805 }
2806 exit:
2807 if ( rc == 0 )
2808 PUT_USER(rc, events, mask_ptr);
2809
2810 return rc;
2811
2812 } /* end of mgsl_wait_event() */
2813
2814 static int modem_input_wait(struct mgsl_struct *info,int arg)
2815 {
2816 unsigned long flags;
2817 int rc;
2818 struct mgsl_icount cprev, cnow;
2819 DECLARE_WAITQUEUE(wait, current);
2820
2821 /* save current irq counts */
2822 spin_lock_irqsave(&info->irq_spinlock,flags);
2823 cprev = info->icount;
2824 add_wait_queue(&info->status_event_wait_q, &wait);
2825 set_current_state(TASK_INTERRUPTIBLE);
2826 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2827
2828 for(;;) {
2829 schedule();
2830 if (signal_pending(current)) {
2831 rc = -ERESTARTSYS;
2832 break;
2833 }
2834
2835 /* get new irq counts */
2836 spin_lock_irqsave(&info->irq_spinlock,flags);
2837 cnow = info->icount;
2838 set_current_state(TASK_INTERRUPTIBLE);
2839 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2840
2841 /* if no change, wait aborted for some reason */
2842 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2843 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2844 rc = -EIO;
2845 break;
2846 }
2847
2848 /* check for change in caller specified modem input */
2849 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2850 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2851 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2852 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2853 rc = 0;
2854 break;
2855 }
2856
2857 cprev = cnow;
2858 }
2859 remove_wait_queue(&info->status_event_wait_q, &wait);
2860 set_current_state(TASK_RUNNING);
2861 return rc;
2862 }
2863
2864 /* return the state of the serial control and status signals
2865 */
2866 static int tiocmget(struct tty_struct *tty, struct file *file)
2867 {
2868 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2869 unsigned int result;
2870 unsigned long flags;
2871
2872 spin_lock_irqsave(&info->irq_spinlock,flags);
2873 usc_get_serial_signals(info);
2874 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2875
2876 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2877 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2878 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2879 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2880 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2881 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2882
2883 if (debug_level >= DEBUG_LEVEL_INFO)
2884 printk("%s(%d):%s tiocmget() value=%08X\n",
2885 __FILE__,__LINE__, info->device_name, result );
2886 return result;
2887 }
2888
2889 /* set modem control signals (DTR/RTS)
2890 */
2891 static int tiocmset(struct tty_struct *tty, struct file *file,
2892 unsigned int set, unsigned int clear)
2893 {
2894 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2895 unsigned long flags;
2896
2897 if (debug_level >= DEBUG_LEVEL_INFO)
2898 printk("%s(%d):%s tiocmset(%x,%x)\n",
2899 __FILE__,__LINE__,info->device_name, set, clear);
2900
2901 if (set & TIOCM_RTS)
2902 info->serial_signals |= SerialSignal_RTS;
2903 if (set & TIOCM_DTR)
2904 info->serial_signals |= SerialSignal_DTR;
2905 if (clear & TIOCM_RTS)
2906 info->serial_signals &= ~SerialSignal_RTS;
2907 if (clear & TIOCM_DTR)
2908 info->serial_signals &= ~SerialSignal_DTR;
2909
2910 spin_lock_irqsave(&info->irq_spinlock,flags);
2911 usc_set_serial_signals(info);
2912 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2913
2914 return 0;
2915 }
2916
2917 /* mgsl_break() Set or clear transmit break condition
2918 *
2919 * Arguments: tty pointer to tty instance data
2920 * break_state -1=set break condition, 0=clear
2921 * Return Value: None
2922 */
2923 static void mgsl_break(struct tty_struct *tty, int break_state)
2924 {
2925 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2926 unsigned long flags;
2927
2928 if (debug_level >= DEBUG_LEVEL_INFO)
2929 printk("%s(%d):mgsl_break(%s,%d)\n",
2930 __FILE__,__LINE__, info->device_name, break_state);
2931
2932 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2933 return;
2934
2935 spin_lock_irqsave(&info->irq_spinlock,flags);
2936 if (break_state == -1)
2937 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2938 else
2939 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2940 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2941
2942 } /* end of mgsl_break() */
2943
2944 /* mgsl_ioctl() Service an IOCTL request
2945 *
2946 * Arguments:
2947 *
2948 * tty pointer to tty instance data
2949 * file pointer to associated file object for device
2950 * cmd IOCTL command code
2951 * arg command argument/context
2952 *
2953 * Return Value: 0 if success, otherwise error code
2954 */
2955 static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2956 unsigned int cmd, unsigned long arg)
2957 {
2958 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2959
2960 if (debug_level >= DEBUG_LEVEL_INFO)
2961 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2962 info->device_name, cmd );
2963
2964 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2965 return -ENODEV;
2966
2967 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2968 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
2969 if (tty->flags & (1 << TTY_IO_ERROR))
2970 return -EIO;
2971 }
2972
2973 return mgsl_ioctl_common(info, cmd, arg);
2974 }
2975
2976 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2977 {
2978 int error;
2979 struct mgsl_icount cnow; /* kernel counter temps */
2980 void __user *argp = (void __user *)arg;
2981 struct serial_icounter_struct __user *p_cuser; /* user space */
2982 unsigned long flags;
2983
2984 switch (cmd) {
2985 case MGSL_IOCGPARAMS:
2986 return mgsl_get_params(info, argp);
2987 case MGSL_IOCSPARAMS:
2988 return mgsl_set_params(info, argp);
2989 case MGSL_IOCGTXIDLE:
2990 return mgsl_get_txidle(info, argp);
2991 case MGSL_IOCSTXIDLE:
2992 return mgsl_set_txidle(info,(int)arg);
2993 case MGSL_IOCTXENABLE:
2994 return mgsl_txenable(info,(int)arg);
2995 case MGSL_IOCRXENABLE:
2996 return mgsl_rxenable(info,(int)arg);
2997 case MGSL_IOCTXABORT:
2998 return mgsl_txabort(info);
2999 case MGSL_IOCGSTATS:
3000 return mgsl_get_stats(info, argp);
3001 case MGSL_IOCWAITEVENT:
3002 return mgsl_wait_event(info, argp);
3003 case MGSL_IOCLOOPTXDONE:
3004 return mgsl_loopmode_send_done(info);
3005 /* Wait for modem input (DCD,RI,DSR,CTS) change
3006 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3007 */
3008 case TIOCMIWAIT:
3009 return modem_input_wait(info,(int)arg);
3010
3011 /*
3012 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
3013 * Return: write counters to the user passed counter struct
3014 * NB: both 1->0 and 0->1 transitions are counted except for
3015 * RI where only 0->1 is counted.
3016 */
3017 case TIOCGICOUNT:
3018 spin_lock_irqsave(&info->irq_spinlock,flags);
3019 cnow = info->icount;
3020 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3021 p_cuser = argp;
3022 PUT_USER(error,cnow.cts, &p_cuser->cts);
3023 if (error) return error;
3024 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
3025 if (error) return error;
3026 PUT_USER(error,cnow.rng, &p_cuser->rng);
3027 if (error) return error;
3028 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
3029 if (error) return error;
3030 PUT_USER(error,cnow.rx, &p_cuser->rx);
3031 if (error) return error;
3032 PUT_USER(error,cnow.tx, &p_cuser->tx);
3033 if (error) return error;
3034 PUT_USER(error,cnow.frame, &p_cuser->frame);
3035 if (error) return error;
3036 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
3037 if (error) return error;
3038 PUT_USER(error,cnow.parity, &p_cuser->parity);
3039 if (error) return error;
3040 PUT_USER(error,cnow.brk, &p_cuser->brk);
3041 if (error) return error;
3042 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
3043 if (error) return error;
3044 return 0;
3045 default:
3046 return -ENOIOCTLCMD;
3047 }
3048 return 0;
3049 }
3050
3051 /* mgsl_set_termios()
3052 *
3053 * Set new termios settings
3054 *
3055 * Arguments:
3056 *
3057 * tty pointer to tty structure
3058 * termios pointer to buffer to hold returned old termios
3059 *
3060 * Return Value: None
3061 */
3062 static void mgsl_set_termios(struct tty_struct *tty, struct termios *old_termios)
3063 {
3064 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
3065 unsigned long flags;
3066
3067 if (debug_level >= DEBUG_LEVEL_INFO)
3068 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3069 tty->driver->name );
3070
3071 /* just return if nothing has changed */
3072 if ((tty->termios->c_cflag == old_termios->c_cflag)
3073 && (RELEVANT_IFLAG(tty->termios->c_iflag)
3074 == RELEVANT_IFLAG(old_termios->c_iflag)))
3075 return;
3076
3077 mgsl_change_params(info);
3078
3079 /* Handle transition to B0 status */
3080 if (old_termios->c_cflag & CBAUD &&
3081 !(tty->termios->c_cflag & CBAUD)) {
3082 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3083 spin_lock_irqsave(&info->irq_spinlock,flags);
3084 usc_set_serial_signals(info);
3085 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3086 }
3087
3088 /* Handle transition away from B0 status */
3089 if (!(old_termios->c_cflag & CBAUD) &&
3090 tty->termios->c_cflag & CBAUD) {
3091 info->serial_signals |= SerialSignal_DTR;
3092 if (!(tty->termios->c_cflag & CRTSCTS) ||
3093 !test_bit(TTY_THROTTLED, &tty->flags)) {
3094 info->serial_signals |= SerialSignal_RTS;
3095 }
3096 spin_lock_irqsave(&info->irq_spinlock,flags);
3097 usc_set_serial_signals(info);
3098 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3099 }
3100
3101 /* Handle turning off CRTSCTS */
3102 if (old_termios->c_cflag & CRTSCTS &&
3103 !(tty->termios->c_cflag & CRTSCTS)) {
3104 tty->hw_stopped = 0;
3105 mgsl_start(tty);
3106 }
3107
3108 } /* end of mgsl_set_termios() */
3109
3110 /* mgsl_close()
3111 *
3112 * Called when port is closed. Wait for remaining data to be
3113 * sent. Disable port and free resources.
3114 *
3115 * Arguments:
3116 *
3117 * tty pointer to open tty structure
3118 * filp pointer to open file object
3119 *
3120 * Return Value: None
3121 */
3122 static void mgsl_close(struct tty_struct *tty, struct file * filp)
3123 {
3124 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3125
3126 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3127 return;
3128
3129 if (debug_level >= DEBUG_LEVEL_INFO)
3130 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3131 __FILE__,__LINE__, info->device_name, info->count);
3132
3133 if (!info->count)
3134 return;
3135
3136 if (tty_hung_up_p(filp))
3137 goto cleanup;
3138
3139 if ((tty->count == 1) && (info->count != 1)) {
3140 /*
3141 * tty->count is 1 and the tty structure will be freed.
3142 * info->count should be one in this case.
3143 * if it's not, correct it so that the port is shutdown.
3144 */
3145 printk("mgsl_close: bad refcount; tty->count is 1, "
3146 "info->count is %d\n", info->count);
3147 info->count = 1;
3148 }
3149
3150 info->count--;
3151
3152 /* if at least one open remaining, leave hardware active */
3153 if (info->count)
3154 goto cleanup;
3155
3156 info->flags |= ASYNC_CLOSING;
3157
3158 /* set tty->closing to notify line discipline to
3159 * only process XON/XOFF characters. Only the N_TTY
3160 * discipline appears to use this (ppp does not).
3161 */
3162 tty->closing = 1;
3163
3164 /* wait for transmit data to clear all layers */
3165
3166 if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
3167 if (debug_level >= DEBUG_LEVEL_INFO)
3168 printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n",
3169 __FILE__,__LINE__, info->device_name );
3170 tty_wait_until_sent(tty, info->closing_wait);
3171 }
3172
3173 if (info->flags & ASYNC_INITIALIZED)
3174 mgsl_wait_until_sent(tty, info->timeout);
3175
3176 if (tty->driver->flush_buffer)
3177 tty->driver->flush_buffer(tty);
3178
3179 tty_ldisc_flush(tty);
3180
3181 shutdown(info);
3182
3183 tty->closing = 0;
3184 info->tty = NULL;
3185
3186 if (info->blocked_open) {
3187 if (info->close_delay) {
3188 msleep_interruptible(jiffies_to_msecs(info->close_delay));
3189 }
3190 wake_up_interruptible(&info->open_wait);
3191 }
3192
3193 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
3194
3195 wake_up_interruptible(&info->close_wait);
3196
3197 cleanup:
3198 if (debug_level >= DEBUG_LEVEL_INFO)
3199 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3200 tty->driver->name, info->count);
3201
3202 } /* end of mgsl_close() */
3203
3204 /* mgsl_wait_until_sent()
3205 *
3206 * Wait until the transmitter is empty.
3207 *
3208 * Arguments:
3209 *
3210 * tty pointer to tty info structure
3211 * timeout time to wait for send completion
3212 *
3213 * Return Value: None
3214 */
3215 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3216 {
3217 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3218 unsigned long orig_jiffies, char_time;
3219
3220 if (!info )
3221 return;
3222
3223 if (debug_level >= DEBUG_LEVEL_INFO)
3224 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3225 __FILE__,__LINE__, info->device_name );
3226
3227 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3228 return;
3229
3230 if (!(info->flags & ASYNC_INITIALIZED))
3231 goto exit;
3232
3233 orig_jiffies = jiffies;
3234
3235 /* Set check interval to 1/5 of estimated time to
3236 * send a character, and make it at least 1. The check
3237 * interval should also be less than the timeout.
3238 * Note: use tight timings here to satisfy the NIST-PCTS.
3239 */
3240
3241 if ( info->params.data_rate ) {
3242 char_time = info->timeout/(32 * 5);
3243 if (!char_time)
3244 char_time++;
3245 } else
3246 char_time = 1;
3247
3248 if (timeout)
3249 char_time = min_t(unsigned long, char_time, timeout);
3250
3251 if ( info->params.mode == MGSL_MODE_HDLC ||
3252 info->params.mode == MGSL_MODE_RAW ) {
3253 while (info->tx_active) {
3254 msleep_interruptible(jiffies_to_msecs(char_time));
3255 if (signal_pending(current))
3256 break;
3257 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3258 break;
3259 }
3260 } else {
3261 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3262 info->tx_enabled) {
3263 msleep_interruptible(jiffies_to_msecs(char_time));
3264 if (signal_pending(current))
3265 break;
3266 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3267 break;
3268 }
3269 }
3270
3271 exit:
3272 if (debug_level >= DEBUG_LEVEL_INFO)
3273 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3274 __FILE__,__LINE__, info->device_name );
3275
3276 } /* end of mgsl_wait_until_sent() */
3277
3278 /* mgsl_hangup()
3279 *
3280 * Called by tty_hangup() when a hangup is signaled.
3281 * This is the same as to closing all open files for the port.
3282 *
3283 * Arguments: tty pointer to associated tty object
3284 * Return Value: None
3285 */
3286 static void mgsl_hangup(struct tty_struct *tty)
3287 {
3288 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3289
3290 if (debug_level >= DEBUG_LEVEL_INFO)
3291 printk("%s(%d):mgsl_hangup(%s)\n",
3292 __FILE__,__LINE__, info->device_name );
3293
3294 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3295 return;
3296
3297 mgsl_flush_buffer(tty);
3298 shutdown(info);
3299
3300 info->count = 0;
3301 info->flags &= ~ASYNC_NORMAL_ACTIVE;
3302 info->tty = NULL;
3303
3304 wake_up_interruptible(&info->open_wait);
3305
3306 } /* end of mgsl_hangup() */
3307
3308 /* block_til_ready()
3309 *
3310 * Block the current process until the specified port
3311 * is ready to be opened.
3312 *
3313 * Arguments:
3314 *
3315 * tty pointer to tty info structure
3316 * filp pointer to open file object
3317 * info pointer to device instance data
3318 *
3319 * Return Value: 0 if success, otherwise error code
3320 */
3321 static int block_til_ready(struct tty_struct *tty, struct file * filp,
3322 struct mgsl_struct *info)
3323 {
3324 DECLARE_WAITQUEUE(wait, current);
3325 int retval;
3326 int do_clocal = 0, extra_count = 0;
3327 unsigned long flags;
3328
3329 if (debug_level >= DEBUG_LEVEL_INFO)
3330 printk("%s(%d):block_til_ready on %s\n",
3331 __FILE__,__LINE__, tty->driver->name );
3332
3333 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3334 /* nonblock mode is set or port is not enabled */
3335 info->flags |= ASYNC_NORMAL_ACTIVE;
3336 return 0;
3337 }
3338
3339 if (tty->termios->c_cflag & CLOCAL)
3340 do_clocal = 1;
3341
3342 /* Wait for carrier detect and the line to become
3343 * free (i.e., not in use by the callout). While we are in
3344 * this loop, info->count is dropped by one, so that
3345 * mgsl_close() knows when to free things. We restore it upon
3346 * exit, either normal or abnormal.
3347 */
3348
3349 retval = 0;
3350 add_wait_queue(&info->open_wait, &wait);
3351
3352 if (debug_level >= DEBUG_LEVEL_INFO)
3353 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3354 __FILE__,__LINE__, tty->driver->name, info->count );
3355
3356 spin_lock_irqsave(&info->irq_spinlock, flags);
3357 if (!tty_hung_up_p(filp)) {
3358 extra_count = 1;
3359 info->count--;
3360 }
3361 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3362 info->blocked_open++;
3363
3364 while (1) {
3365 if (tty->termios->c_cflag & CBAUD) {
3366 spin_lock_irqsave(&info->irq_spinlock,flags);
3367 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3368 usc_set_serial_signals(info);
3369 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3370 }
3371
3372 set_current_state(TASK_INTERRUPTIBLE);
3373
3374 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){
3375 retval = (info->flags & ASYNC_HUP_NOTIFY) ?
3376 -EAGAIN : -ERESTARTSYS;
3377 break;
3378 }
3379
3380 spin_lock_irqsave(&info->irq_spinlock,flags);
3381 usc_get_serial_signals(info);
3382 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3383
3384 if (!(info->flags & ASYNC_CLOSING) &&
3385 (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) {
3386 break;
3387 }
3388
3389 if (signal_pending(current)) {
3390 retval = -ERESTARTSYS;
3391 break;
3392 }
3393
3394 if (debug_level >= DEBUG_LEVEL_INFO)
3395 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3396 __FILE__,__LINE__, tty->driver->name, info->count );
3397
3398 schedule();
3399 }
3400
3401 set_current_state(TASK_RUNNING);
3402 remove_wait_queue(&info->open_wait, &wait);
3403
3404 if (extra_count)
3405 info->count++;
3406 info->blocked_open--;
3407
3408 if (debug_level >= DEBUG_LEVEL_INFO)
3409 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3410 __FILE__,__LINE__, tty->driver->name, info->count );
3411
3412 if (!retval)
3413 info->flags |= ASYNC_NORMAL_ACTIVE;
3414
3415 return retval;
3416
3417 } /* end of block_til_ready() */
3418
3419 /* mgsl_open()
3420 *
3421 * Called when a port is opened. Init and enable port.
3422 * Perform serial-specific initialization for the tty structure.
3423 *
3424 * Arguments: tty pointer to tty info structure
3425 * filp associated file pointer
3426 *
3427 * Return Value: 0 if success, otherwise error code
3428 */
3429 static int mgsl_open(struct tty_struct *tty, struct file * filp)
3430 {
3431 struct mgsl_struct *info;
3432 int retval, line;
3433 unsigned long flags;
3434
3435 /* verify range of specified line number */
3436 line = tty->index;
3437 if ((line < 0) || (line >= mgsl_device_count)) {
3438 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3439 __FILE__,__LINE__,line);
3440 return -ENODEV;
3441 }
3442
3443 /* find the info structure for the specified line */
3444 info = mgsl_device_list;
3445 while(info && info->line != line)
3446 info = info->next_device;
3447 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3448 return -ENODEV;
3449
3450 tty->driver_data = info;
3451 info->tty = tty;
3452
3453 if (debug_level >= DEBUG_LEVEL_INFO)
3454 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3455 __FILE__,__LINE__,tty->driver->name, info->count);
3456
3457 /* If port is closing, signal caller to try again */
3458 if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){
3459 if (info->flags & ASYNC_CLOSING)
3460 interruptible_sleep_on(&info->close_wait);
3461 retval = ((info->flags & ASYNC_HUP_NOTIFY) ?
3462 -EAGAIN : -ERESTARTSYS);
3463 goto cleanup;
3464 }
3465
3466 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3467
3468 spin_lock_irqsave(&info->netlock, flags);
3469 if (info->netcount) {
3470 retval = -EBUSY;
3471 spin_unlock_irqrestore(&info->netlock, flags);
3472 goto cleanup;
3473 }
3474 info->count++;
3475 spin_unlock_irqrestore(&info->netlock, flags);
3476
3477 if (info->count == 1) {
3478 /* 1st open on this device, init hardware */
3479 retval = startup(info);
3480 if (retval < 0)
3481 goto cleanup;
3482 }
3483
3484 retval = block_til_ready(tty, filp, info);
3485 if (retval) {
3486 if (debug_level >= DEBUG_LEVEL_INFO)
3487 printk("%s(%d):block_til_ready(%s) returned %d\n",
3488 __FILE__,__LINE__, info->device_name, retval);
3489 goto cleanup;
3490 }
3491
3492 if (debug_level >= DEBUG_LEVEL_INFO)
3493 printk("%s(%d):mgsl_open(%s) success\n",
3494 __FILE__,__LINE__, info->device_name);
3495 retval = 0;
3496
3497 cleanup:
3498 if (retval) {
3499 if (tty->count == 1)
3500 info->tty = NULL; /* tty layer will release tty struct */
3501 if(info->count)
3502 info->count--;
3503 }
3504
3505 return retval;
3506
3507 } /* end of mgsl_open() */
3508
3509 /*
3510 * /proc fs routines....
3511 */
3512
3513 static inline int line_info(char *buf, struct mgsl_struct *info)
3514 {
3515 char stat_buf[30];
3516 int ret;
3517 unsigned long flags;
3518
3519 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3520 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3521 info->device_name, info->io_base, info->irq_level,
3522 info->phys_memory_base, info->phys_lcr_base);
3523 } else {
3524 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d",
3525 info->device_name, info->io_base,
3526 info->irq_level, info->dma_level);
3527 }
3528
3529 /* output current serial signal states */
3530 spin_lock_irqsave(&info->irq_spinlock,flags);
3531 usc_get_serial_signals(info);
3532 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3533
3534 stat_buf[0] = 0;
3535 stat_buf[1] = 0;
3536 if (info->serial_signals & SerialSignal_RTS)
3537 strcat(stat_buf, "|RTS");
3538 if (info->serial_signals & SerialSignal_CTS)
3539 strcat(stat_buf, "|CTS");
3540 if (info->serial_signals & SerialSignal_DTR)
3541 strcat(stat_buf, "|DTR");
3542 if (info->serial_signals & SerialSignal_DSR)
3543 strcat(stat_buf, "|DSR");
3544 if (info->serial_signals & SerialSignal_DCD)
3545 strcat(stat_buf, "|CD");
3546 if (info->serial_signals & SerialSignal_RI)
3547 strcat(stat_buf, "|RI");
3548
3549 if (info->params.mode == MGSL_MODE_HDLC ||
3550 info->params.mode == MGSL_MODE_RAW ) {
3551 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d",
3552 info->icount.txok, info->icount.rxok);
3553 if (info->icount.txunder)
3554 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder);
3555 if (info->icount.txabort)
3556 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort);
3557 if (info->icount.rxshort)
3558 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort);
3559 if (info->icount.rxlong)
3560 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong);
3561 if (info->icount.rxover)
3562 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover);
3563 if (info->icount.rxcrc)
3564 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc);
3565 } else {
3566 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d",
3567 info->icount.tx, info->icount.rx);
3568 if (info->icount.frame)
3569 ret += sprintf(buf+ret, " fe:%d", info->icount.frame);
3570 if (info->icount.parity)
3571 ret += sprintf(buf+ret, " pe:%d", info->icount.parity);
3572 if (info->icount.brk)
3573 ret += sprintf(buf+ret, " brk:%d", info->icount.brk);
3574 if (info->icount.overrun)
3575 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun);
3576 }
3577
3578 /* Append serial signal status to end */
3579 ret += sprintf(buf+ret, " %s\n", stat_buf+1);
3580
3581 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3582 info->tx_active,info->bh_requested,info->bh_running,
3583 info->pending_bh);
3584
3585 spin_lock_irqsave(&info->irq_spinlock,flags);
3586 {
3587 u16 Tcsr = usc_InReg( info, TCSR );
3588 u16 Tdmr = usc_InDmaReg( info, TDMR );
3589 u16 Ticr = usc_InReg( info, TICR );
3590 u16 Rscr = usc_InReg( info, RCSR );
3591 u16 Rdmr = usc_InDmaReg( info, RDMR );
3592 u16 Ricr = usc_InReg( info, RICR );
3593 u16 Icr = usc_InReg( info, ICR );
3594 u16 Dccr = usc_InReg( info, DCCR );
3595 u16 Tmr = usc_InReg( info, TMR );
3596 u16 Tccr = usc_InReg( info, TCCR );
3597 u16 Ccar = inw( info->io_base + CCAR );
3598 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3599 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3600 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3601 }
3602 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3603
3604 return ret;
3605
3606 } /* end of line_info() */
3607
3608 /* mgsl_read_proc()
3609 *
3610 * Called to print information about devices
3611 *
3612 * Arguments:
3613 * page page of memory to hold returned info
3614 * start
3615 * off
3616 * count
3617 * eof
3618 * data
3619 *
3620 * Return Value:
3621 */
3622 static int mgsl_read_proc(char *page, char **start, off_t off, int count,
3623 int *eof, void *data)
3624 {
3625 int len = 0, l;
3626 off_t begin = 0;
3627 struct mgsl_struct *info;
3628
3629 len += sprintf(page, "synclink driver:%s\n", driver_version);
3630
3631 info = mgsl_device_list;
3632 while( info ) {
3633 l = line_info(page + len, info);
3634 len += l;
3635 if (len+begin > off+count)
3636 goto done;
3637 if (len+begin < off) {
3638 begin += len;
3639 len = 0;
3640 }
3641 info = info->next_device;
3642 }
3643
3644 *eof = 1;
3645 done:
3646 if (off >= len+begin)
3647 return 0;
3648 *start = page + (off-begin);
3649 return ((count < begin+len-off) ? count : begin+len-off);
3650
3651 } /* end of mgsl_read_proc() */
3652
3653 /* mgsl_allocate_dma_buffers()
3654 *
3655 * Allocate and format DMA buffers (ISA adapter)
3656 * or format shared memory buffers (PCI adapter).
3657 *
3658 * Arguments: info pointer to device instance data
3659 * Return Value: 0 if success, otherwise error
3660 */
3661 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3662 {
3663 unsigned short BuffersPerFrame;
3664
3665 info->last_mem_alloc = 0;
3666
3667 /* Calculate the number of DMA buffers necessary to hold the */
3668 /* largest allowable frame size. Note: If the max frame size is */
3669 /* not an even multiple of the DMA buffer size then we need to */
3670 /* round the buffer count per frame up one. */
3671
3672 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3673 if ( info->max_frame_size % DMABUFFERSIZE )
3674 BuffersPerFrame++;
3675
3676 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3677 /*
3678 * The PCI adapter has 256KBytes of shared memory to use.
3679 * This is 64 PAGE_SIZE buffers.
3680 *
3681 * The first page is used for padding at this time so the
3682 * buffer list does not begin at offset 0 of the PCI
3683 * adapter's shared memory.
3684 *
3685 * The 2nd page is used for the buffer list. A 4K buffer
3686 * list can hold 128 DMA_BUFFER structures at 32 bytes
3687 * each.
3688 *
3689 * This leaves 62 4K pages.
3690 *
3691 * The next N pages are used for transmit frame(s). We
3692 * reserve enough 4K page blocks to hold the required
3693 * number of transmit dma buffers (num_tx_dma_buffers),
3694 * each of MaxFrameSize size.
3695 *
3696 * Of the remaining pages (62-N), determine how many can
3697 * be used to receive full MaxFrameSize inbound frames
3698 */
3699 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3700 info->rx_buffer_count = 62 - info->tx_buffer_count;
3701 } else {
3702 /* Calculate the number of PAGE_SIZE buffers needed for */
3703 /* receive and transmit DMA buffers. */
3704
3705
3706 /* Calculate the number of DMA buffers necessary to */
3707 /* hold 7 max size receive frames and one max size transmit frame. */
3708 /* The receive buffer count is bumped by one so we avoid an */
3709 /* End of List condition if all receive buffers are used when */
3710 /* using linked list DMA buffers. */
3711
3712 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3713 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3714
3715 /*
3716 * limit total TxBuffers & RxBuffers to 62 4K total
3717 * (ala PCI Allocation)
3718 */
3719
3720 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3721 info->rx_buffer_count = 62 - info->tx_buffer_count;
3722
3723 }
3724
3725 if ( debug_level >= DEBUG_LEVEL_INFO )
3726 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3727 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3728
3729 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3730 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3731 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3732 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3733 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3734 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3735 return -ENOMEM;
3736 }
3737
3738 mgsl_reset_rx_dma_buffers( info );
3739 mgsl_reset_tx_dma_buffers( info );
3740
3741 return 0;
3742
3743 } /* end of mgsl_allocate_dma_buffers() */
3744
3745 /*
3746 * mgsl_alloc_buffer_list_memory()
3747 *
3748 * Allocate a common DMA buffer for use as the
3749 * receive and transmit buffer lists.
3750 *
3751 * A buffer list is a set of buffer entries where each entry contains
3752 * a pointer to an actual buffer and a pointer to the next buffer entry
3753 * (plus some other info about the buffer).
3754 *
3755 * The buffer entries for a list are built to form a circular list so
3756 * that when the entire list has been traversed you start back at the
3757 * beginning.
3758 *
3759 * This function allocates memory for just the buffer entries.
3760 * The links (pointer to next entry) are filled in with the physical
3761 * address of the next entry so the adapter can navigate the list
3762 * using bus master DMA. The pointers to the actual buffers are filled
3763 * out later when the actual buffers are allocated.
3764 *
3765 * Arguments: info pointer to device instance data
3766 * Return Value: 0 if success, otherwise error
3767 */
3768 static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3769 {
3770 unsigned int i;
3771
3772 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3773 /* PCI adapter uses shared memory. */
3774 info->buffer_list = info->memory_base + info->last_mem_alloc;
3775 info->buffer_list_phys = info->last_mem_alloc;
3776 info->last_mem_alloc += BUFFERLISTSIZE;
3777 } else {
3778 /* ISA adapter uses system memory. */
3779 /* The buffer lists are allocated as a common buffer that both */
3780 /* the processor and adapter can access. This allows the driver to */
3781 /* inspect portions of the buffer while other portions are being */
3782 /* updated by the adapter using Bus Master DMA. */
3783
3784 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3785 if (info->buffer_list == NULL)
3786 return -ENOMEM;
3787 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3788 }
3789
3790 /* We got the memory for the buffer entry lists. */
3791 /* Initialize the memory block to all zeros. */
3792 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3793
3794 /* Save virtual address pointers to the receive and */
3795 /* transmit buffer lists. (Receive 1st). These pointers will */
3796 /* be used by the processor to access the lists. */
3797 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3798 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3799 info->tx_buffer_list += info->rx_buffer_count;
3800
3801 /*
3802 * Build the links for the buffer entry lists such that
3803 * two circular lists are built. (Transmit and Receive).
3804 *
3805 * Note: the links are physical addresses
3806 * which are read by the adapter to determine the next
3807 * buffer entry to use.
3808 */
3809
3810 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3811 /* calculate and store physical address of this buffer entry */
3812 info->rx_buffer_list[i].phys_entry =
3813 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3814
3815 /* calculate and store physical address of */
3816 /* next entry in cirular list of entries */
3817
3818 info->rx_buffer_list[i].link = info->buffer_list_phys;
3819
3820 if ( i < info->rx_buffer_count - 1 )
3821 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3822 }
3823
3824 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3825 /* calculate and store physical address of this buffer entry */
3826 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3827 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3828
3829 /* calculate and store physical address of */
3830 /* next entry in cirular list of entries */
3831
3832 info->tx_buffer_list[i].link = info->buffer_list_phys +
3833 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3834
3835 if ( i < info->tx_buffer_count - 1 )
3836 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3837 }
3838
3839 return 0;
3840
3841 } /* end of mgsl_alloc_buffer_list_memory() */
3842
3843 /* Free DMA buffers allocated for use as the
3844 * receive and transmit buffer lists.
3845 * Warning:
3846 *
3847 * The data transfer buffers associated with the buffer list
3848 * MUST be freed before freeing the buffer list itself because
3849 * the buffer list contains the information necessary to free
3850 * the individual buffers!
3851 */
3852 static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3853 {
3854 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3855 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3856
3857 info->buffer_list = NULL;
3858 info->rx_buffer_list = NULL;
3859 info->tx_buffer_list = NULL;
3860
3861 } /* end of mgsl_free_buffer_list_memory() */
3862
3863 /*
3864 * mgsl_alloc_frame_memory()
3865 *
3866 * Allocate the frame DMA buffers used by the specified buffer list.
3867 * Each DMA buffer will be one memory page in size. This is necessary
3868 * because memory can fragment enough that it may be impossible
3869 * contiguous pages.
3870 *
3871 * Arguments:
3872 *
3873 * info pointer to device instance data
3874 * BufferList pointer to list of buffer entries
3875 * Buffercount count of buffer entries in buffer list
3876 *
3877 * Return Value: 0 if success, otherwise -ENOMEM
3878 */
3879 static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3880 {
3881 int i;
3882 u32 phys_addr;
3883
3884 /* Allocate page sized buffers for the receive buffer list */
3885
3886 for ( i = 0; i < Buffercount; i++ ) {
3887 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3888 /* PCI adapter uses shared memory buffers. */
3889 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3890 phys_addr = info->last_mem_alloc;
3891 info->last_mem_alloc += DMABUFFERSIZE;
3892 } else {
3893 /* ISA adapter uses system memory. */
3894 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3895 if (BufferList[i].virt_addr == NULL)
3896 return -ENOMEM;
3897 phys_addr = (u32)(BufferList[i].dma_addr);
3898 }
3899 BufferList[i].phys_addr = phys_addr;
3900 }
3901
3902 return 0;
3903
3904 } /* end of mgsl_alloc_frame_memory() */
3905
3906 /*
3907 * mgsl_free_frame_memory()
3908 *
3909 * Free the buffers associated with
3910 * each buffer entry of a buffer list.
3911 *
3912 * Arguments:
3913 *
3914 * info pointer to device instance data
3915 * BufferList pointer to list of buffer entries
3916 * Buffercount count of buffer entries in buffer list
3917 *
3918 * Return Value: None
3919 */
3920 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3921 {
3922 int i;
3923
3924 if ( BufferList ) {
3925 for ( i = 0 ; i < Buffercount ; i++ ) {
3926 if ( BufferList[i].virt_addr ) {
3927 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3928 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3929 BufferList[i].virt_addr = NULL;
3930 }
3931 }
3932 }
3933
3934 } /* end of mgsl_free_frame_memory() */
3935
3936 /* mgsl_free_dma_buffers()
3937 *
3938 * Free DMA buffers
3939 *
3940 * Arguments: info pointer to device instance data
3941 * Return Value: None
3942 */
3943 static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3944 {
3945 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3946 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3947 mgsl_free_buffer_list_memory( info );
3948
3949 } /* end of mgsl_free_dma_buffers() */
3950
3951
3952 /*
3953 * mgsl_alloc_intermediate_rxbuffer_memory()
3954 *
3955 * Allocate a buffer large enough to hold max_frame_size. This buffer
3956 * is used to pass an assembled frame to the line discipline.
3957 *
3958 * Arguments:
3959 *
3960 * info pointer to device instance data
3961 *
3962 * Return Value: 0 if success, otherwise -ENOMEM
3963 */
3964 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3965 {
3966 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3967 if ( info->intermediate_rxbuffer == NULL )
3968 return -ENOMEM;
3969
3970 return 0;
3971
3972 } /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3973
3974 /*
3975 * mgsl_free_intermediate_rxbuffer_memory()
3976 *
3977 *
3978 * Arguments:
3979 *
3980 * info pointer to device instance data
3981 *
3982 * Return Value: None
3983 */
3984 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3985 {
3986 kfree(info->intermediate_rxbuffer);
3987 info->intermediate_rxbuffer = NULL;
3988
3989 } /* end of mgsl_free_intermediate_rxbuffer_memory() */
3990
3991 /*
3992 * mgsl_alloc_intermediate_txbuffer_memory()
3993 *
3994 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3995 * This buffer is used to load transmit frames into the adapter's dma transfer
3996 * buffers when there is sufficient space.
3997 *
3998 * Arguments:
3999 *
4000 * info pointer to device instance data
4001 *
4002 * Return Value: 0 if success, otherwise -ENOMEM
4003 */
4004 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
4005 {
4006 int i;
4007
4008 if ( debug_level >= DEBUG_LEVEL_INFO )
4009 printk("%s %s(%d) allocating %d tx holding buffers\n",
4010 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
4011
4012 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
4013
4014 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
4015 info->tx_holding_buffers[i].buffer =
4016 kmalloc(info->max_frame_size, GFP_KERNEL);
4017 if ( info->tx_holding_buffers[i].buffer == NULL )
4018 return -ENOMEM;
4019 }
4020
4021 return 0;
4022
4023 } /* end of mgsl_alloc_intermediate_txbuffer_memory() */
4024
4025 /*
4026 * mgsl_free_intermediate_txbuffer_memory()
4027 *
4028 *
4029 * Arguments:
4030 *
4031 * info pointer to device instance data
4032 *
4033 * Return Value: None
4034 */
4035 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
4036 {
4037 int i;
4038
4039 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
4040 kfree(info->tx_holding_buffers[i].buffer);
4041 info->tx_holding_buffers[i].buffer = NULL;
4042 }
4043
4044 info->get_tx_holding_index = 0;
4045 info->put_tx_holding_index = 0;
4046 info->tx_holding_count = 0;
4047
4048 } /* end of mgsl_free_intermediate_txbuffer_memory() */
4049
4050
4051 /*
4052 * load_next_tx_holding_buffer()
4053 *
4054 * attempts to load the next buffered tx request into the
4055 * tx dma buffers
4056 *
4057 * Arguments:
4058 *
4059 * info pointer to device instance data
4060 *
4061 * Return Value: 1 if next buffered tx request loaded
4062 * into adapter's tx dma buffer,
4063 * 0 otherwise
4064 */
4065 static int load_next_tx_holding_buffer(struct mgsl_struct *info)
4066 {
4067 int ret = 0;
4068
4069 if ( info->tx_holding_count ) {
4070 /* determine if we have enough tx dma buffers
4071 * to accommodate the next tx frame
4072 */
4073 struct tx_holding_buffer *ptx =
4074 &info->tx_holding_buffers[info->get_tx_holding_index];
4075 int num_free = num_free_tx_dma_buffers(info);
4076 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4077 if ( ptx->buffer_size % DMABUFFERSIZE )
4078 ++num_needed;
4079
4080 if (num_needed <= num_free) {
4081 info->xmit_cnt = ptx->buffer_size;
4082 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4083
4084 --info->tx_holding_count;
4085 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4086 info->get_tx_holding_index=0;
4087
4088 /* restart transmit timer */
4089 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4090
4091 ret = 1;
4092 }
4093 }
4094
4095 return ret;
4096 }
4097
4098 /*
4099 * save_tx_buffer_request()
4100 *
4101 * attempt to store transmit frame request for later transmission
4102 *
4103 * Arguments:
4104 *
4105 * info pointer to device instance data
4106 * Buffer pointer to buffer containing frame to load
4107 * BufferSize size in bytes of frame in Buffer
4108 *
4109 * Return Value: 1 if able to store, 0 otherwise
4110 */
4111 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4112 {
4113 struct tx_holding_buffer *ptx;
4114
4115 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4116 return 0; /* all buffers in use */
4117 }
4118
4119 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4120 ptx->buffer_size = BufferSize;
4121 memcpy( ptx->buffer, Buffer, BufferSize);
4122
4123 ++info->tx_holding_count;
4124 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4125 info->put_tx_holding_index=0;
4126
4127 return 1;
4128 }
4129
4130 static int mgsl_claim_resources(struct mgsl_struct *info)
4131 {
4132 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4133 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4134 __FILE__,__LINE__,info->device_name, info->io_base);
4135 return -ENODEV;
4136 }
4137 info->io_addr_requested = 1;
4138
4139 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4140 info->device_name, info ) < 0 ) {
4141 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n",
4142 __FILE__,__LINE__,info->device_name, info->irq_level );
4143 goto errout;
4144 }
4145 info->irq_requested = 1;
4146
4147 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4148 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4149 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4150 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4151 goto errout;
4152 }
4153 info->shared_mem_requested = 1;
4154 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4155 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4156 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4157 goto errout;
4158 }
4159 info->lcr_mem_requested = 1;
4160
4161 info->memory_base = ioremap(info->phys_memory_base,0x40000);
4162 if (!info->memory_base) {
4163 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n",
4164 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4165 goto errout;
4166 }
4167
4168 if ( !mgsl_memory_test(info) ) {
4169 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4170 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4171 goto errout;
4172 }
4173
4174 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset;
4175 if (!info->lcr_base) {
4176 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n",
4177 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4178 goto errout;
4179 }
4180
4181 } else {
4182 /* claim DMA channel */
4183
4184 if (request_dma(info->dma_level,info->device_name) < 0){
4185 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n",
4186 __FILE__,__LINE__,info->device_name, info->dma_level );
4187 mgsl_release_resources( info );
4188 return -ENODEV;
4189 }
4190 info->dma_requested = 1;
4191
4192 /* ISA adapter uses bus master DMA */
4193 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4194 enable_dma(info->dma_level);
4195 }
4196
4197 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4198 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n",
4199 __FILE__,__LINE__,info->device_name, info->dma_level );
4200 goto errout;
4201 }
4202
4203 return 0;
4204 errout:
4205 mgsl_release_resources(info);
4206 return -ENODEV;
4207
4208 } /* end of mgsl_claim_resources() */
4209
4210 static void mgsl_release_resources(struct mgsl_struct *info)
4211 {
4212 if ( debug_level >= DEBUG_LEVEL_INFO )
4213 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4214 __FILE__,__LINE__,info->device_name );
4215
4216 if ( info->irq_requested ) {
4217 free_irq(info->irq_level, info);
4218 info->irq_requested = 0;
4219 }
4220 if ( info->dma_requested ) {
4221 disable_dma(info->dma_level);
4222 free_dma(info->dma_level);
4223 info->dma_requested = 0;
4224 }
4225 mgsl_free_dma_buffers(info);
4226 mgsl_free_intermediate_rxbuffer_memory(info);
4227 mgsl_free_intermediate_txbuffer_memory(info);
4228
4229 if ( info->io_addr_requested ) {
4230 release_region(info->io_base,info->io_addr_size);
4231 info->io_addr_requested = 0;
4232 }
4233 if ( info->shared_mem_requested ) {
4234 release_mem_region(info->phys_memory_base,0x40000);
4235 info->shared_mem_requested = 0;
4236 }
4237 if ( info->lcr_mem_requested ) {
4238 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4239 info->lcr_mem_requested = 0;
4240 }
4241 if (info->memory_base){
4242 iounmap(info->memory_base);
4243 info->memory_base = NULL;
4244 }
4245 if (info->lcr_base){
4246 iounmap(info->lcr_base - info->lcr_offset);
4247 info->lcr_base = NULL;
4248 }
4249
4250 if ( debug_level >= DEBUG_LEVEL_INFO )
4251 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4252 __FILE__,__LINE__,info->device_name );
4253
4254 } /* end of mgsl_release_resources() */
4255
4256 /* mgsl_add_device()
4257 *
4258 * Add the specified device instance data structure to the
4259 * global linked list of devices and increment the device count.
4260 *
4261 * Arguments: info pointer to device instance data
4262 * Return Value: None
4263 */
4264 static void mgsl_add_device( struct mgsl_struct *info )
4265 {
4266 info->next_device = NULL;
4267 info->line = mgsl_device_count;
4268 sprintf(info->device_name,"ttySL%d",info->line);
4269
4270 if (info->line < MAX_TOTAL_DEVICES) {
4271 if (maxframe[info->line])
4272 info->max_frame_size = maxframe[info->line];
4273 info->dosyncppp = dosyncppp[info->line];
4274
4275 if (txdmabufs[info->line]) {
4276 info->num_tx_dma_buffers = txdmabufs[info->line];
4277 if (info->num_tx_dma_buffers < 1)
4278 info->num_tx_dma_buffers = 1;
4279 }
4280
4281 if (txholdbufs[info->line]) {
4282 info->num_tx_holding_buffers = txholdbufs[info->line];
4283 if (info->num_tx_holding_buffers < 1)
4284 info->num_tx_holding_buffers = 1;
4285 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4286 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4287 }
4288 }
4289
4290 mgsl_device_count++;
4291
4292 if ( !mgsl_device_list )
4293 mgsl_device_list = info;
4294 else {
4295 struct mgsl_struct *current_dev = mgsl_device_list;
4296 while( current_dev->next_device )
4297 current_dev = current_dev->next_device;
4298 current_dev->next_device = info;
4299 }
4300
4301 if ( info->max_frame_size < 4096 )
4302 info->max_frame_size = 4096;
4303 else if ( info->max_frame_size > 65535 )
4304 info->max_frame_size = 65535;
4305
4306 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4307 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4308 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4309 info->phys_memory_base, info->phys_lcr_base,
4310 info->max_frame_size );
4311 } else {
4312 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4313 info->device_name, info->io_base, info->irq_level, info->dma_level,
4314 info->max_frame_size );
4315 }
4316
4317 #ifdef CONFIG_HDLC
4318 hdlcdev_init(info);
4319 #endif
4320
4321 } /* end of mgsl_add_device() */
4322
4323 /* mgsl_allocate_device()
4324 *
4325 * Allocate and initialize a device instance structure
4326 *
4327 * Arguments: none
4328 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4329 */
4330 static struct mgsl_struct* mgsl_allocate_device(void)
4331 {
4332 struct mgsl_struct *info;
4333
4334 info = (struct mgsl_struct *)kmalloc(sizeof(struct mgsl_struct),
4335 GFP_KERNEL);
4336
4337 if (!info) {
4338 printk("Error can't allocate device instance data\n");
4339 } else {
4340 memset(info, 0, sizeof(struct mgsl_struct));
4341 info->magic = MGSL_MAGIC;
4342 INIT_WORK(&info->task, mgsl_bh_handler, info);
4343 info->max_frame_size = 4096;
4344 info->close_delay = 5*HZ/10;
4345 info->closing_wait = 30*HZ;
4346 init_waitqueue_head(&info->open_wait);
4347 init_waitqueue_head(&info->close_wait);
4348 init_waitqueue_head(&info->status_event_wait_q);
4349 init_waitqueue_head(&info->event_wait_q);
4350 spin_lock_init(&info->irq_spinlock);
4351 spin_lock_init(&info->netlock);
4352 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4353 info->idle_mode = HDLC_TXIDLE_FLAGS;
4354 info->num_tx_dma_buffers = 1;
4355 info->num_tx_holding_buffers = 0;
4356 }
4357
4358 return info;
4359
4360 } /* end of mgsl_allocate_device()*/
4361
4362 static struct tty_operations mgsl_ops = {
4363 .open = mgsl_open,
4364 .close = mgsl_close,
4365 .write = mgsl_write,
4366 .put_char = mgsl_put_char,
4367 .flush_chars = mgsl_flush_chars,
4368 .write_room = mgsl_write_room,
4369 .chars_in_buffer = mgsl_chars_in_buffer,
4370 .flush_buffer = mgsl_flush_buffer,
4371 .ioctl = mgsl_ioctl,
4372 .throttle = mgsl_throttle,
4373 .unthrottle = mgsl_unthrottle,
4374 .send_xchar = mgsl_send_xchar,
4375 .break_ctl = mgsl_break,
4376 .wait_until_sent = mgsl_wait_until_sent,
4377 .read_proc = mgsl_read_proc,
4378 .set_termios = mgsl_set_termios,
4379 .stop = mgsl_stop,
4380 .start = mgsl_start,
4381 .hangup = mgsl_hangup,
4382 .tiocmget = tiocmget,
4383 .tiocmset = tiocmset,
4384 };
4385
4386 /*
4387 * perform tty device initialization
4388 */
4389 static int mgsl_init_tty(void)
4390 {
4391 int rc;
4392
4393 serial_driver = alloc_tty_driver(128);
4394 if (!serial_driver)
4395 return -ENOMEM;
4396
4397 serial_driver->owner = THIS_MODULE;
4398 serial_driver->driver_name = "synclink";
4399 serial_driver->name = "ttySL";
4400 serial_driver->major = ttymajor;
4401 serial_driver->minor_start = 64;
4402 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4403 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4404 serial_driver->init_termios = tty_std_termios;
4405 serial_driver->init_termios.c_cflag =
4406 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4407 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4408 tty_set_operations(serial_driver, &mgsl_ops);
4409 if ((rc = tty_register_driver(serial_driver)) < 0) {
4410 printk("%s(%d):Couldn't register serial driver\n",
4411 __FILE__,__LINE__);
4412 put_tty_driver(serial_driver);
4413 serial_driver = NULL;
4414 return rc;
4415 }
4416
4417 printk("%s %s, tty major#%d\n",
4418 driver_name, driver_version,
4419 serial_driver->major);
4420 return 0;
4421 }
4422
4423 /* enumerate user specified ISA adapters
4424 */
4425 static void mgsl_enum_isa_devices(void)
4426 {
4427 struct mgsl_struct *info;
4428 int i;
4429
4430 /* Check for user specified ISA devices */
4431
4432 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4433 if ( debug_level >= DEBUG_LEVEL_INFO )
4434 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4435 io[i], irq[i], dma[i] );
4436
4437 info = mgsl_allocate_device();
4438 if ( !info ) {
4439 /* error allocating device instance data */
4440 if ( debug_level >= DEBUG_LEVEL_ERROR )
4441 printk( "can't allocate device instance data.\n");
4442 continue;
4443 }
4444
4445 /* Copy user configuration info to device instance data */
4446 info->io_base = (unsigned int)io[i];
4447 info->irq_level = (unsigned int)irq[i];
4448 info->irq_level = irq_canonicalize(info->irq_level);
4449 info->dma_level = (unsigned int)dma[i];
4450 info->bus_type = MGSL_BUS_TYPE_ISA;
4451 info->io_addr_size = 16;
4452 info->irq_flags = 0;
4453
4454 mgsl_add_device( info );
4455 }
4456 }
4457
4458 static void synclink_cleanup(void)
4459 {
4460 int rc;
4461 struct mgsl_struct *info;
4462 struct mgsl_struct *tmp;
4463
4464 printk("Unloading %s: %s\n", driver_name, driver_version);
4465
4466 if (serial_driver) {
4467 if ((rc = tty_unregister_driver(serial_driver)))
4468 printk("%s(%d) failed to unregister tty driver err=%d\n",
4469 __FILE__,__LINE__,rc);
4470 put_tty_driver(serial_driver);
4471 }
4472
4473 info = mgsl_device_list;
4474 while(info) {
4475 #ifdef CONFIG_HDLC
4476 hdlcdev_exit(info);
4477 #endif
4478 mgsl_release_resources(info);
4479 tmp = info;
4480 info = info->next_device;
4481 kfree(tmp);
4482 }
4483
4484 if (pci_registered)
4485 pci_unregister_driver(&synclink_pci_driver);
4486 }
4487
4488 static int __init synclink_init(void)
4489 {
4490 int rc;
4491
4492 if (break_on_load) {
4493 mgsl_get_text_ptr();
4494 BREAKPOINT();
4495 }
4496
4497 printk("%s %s\n", driver_name, driver_version);
4498
4499 mgsl_enum_isa_devices();
4500 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4501 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4502 else
4503 pci_registered = 1;
4504
4505 if ((rc = mgsl_init_tty()) < 0)
4506 goto error;
4507
4508 return 0;
4509
4510 error:
4511 synclink_cleanup();
4512 return rc;
4513 }
4514
4515 static void __exit synclink_exit(void)
4516 {
4517 synclink_cleanup();
4518 }
4519
4520 module_init(synclink_init);
4521 module_exit(synclink_exit);
4522
4523 /*
4524 * usc_RTCmd()
4525 *
4526 * Issue a USC Receive/Transmit command to the
4527 * Channel Command/Address Register (CCAR).
4528 *
4529 * Notes:
4530 *
4531 * The command is encoded in the most significant 5 bits <15..11>
4532 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4533 * and Bits <6..0> must be written as zeros.
4534 *
4535 * Arguments:
4536 *
4537 * info pointer to device information structure
4538 * Cmd command mask (use symbolic macros)
4539 *
4540 * Return Value:
4541 *
4542 * None
4543 */
4544 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4545 {
4546 /* output command to CCAR in bits <15..11> */
4547 /* preserve bits <10..7>, bits <6..0> must be zero */
4548
4549 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4550
4551 /* Read to flush write to CCAR */
4552 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4553 inw( info->io_base + CCAR );
4554
4555 } /* end of usc_RTCmd() */
4556
4557 /*
4558 * usc_DmaCmd()
4559 *
4560 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4561 *
4562 * Arguments:
4563 *
4564 * info pointer to device information structure
4565 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4566 *
4567 * Return Value:
4568 *
4569 * None
4570 */
4571 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4572 {
4573 /* write command mask to DCAR */
4574 outw( Cmd + info->mbre_bit, info->io_base );
4575
4576 /* Read to flush write to DCAR */
4577 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4578 inw( info->io_base );
4579
4580 } /* end of usc_DmaCmd() */
4581
4582 /*
4583 * usc_OutDmaReg()
4584 *
4585 * Write a 16-bit value to a USC DMA register
4586 *
4587 * Arguments:
4588 *
4589 * info pointer to device info structure
4590 * RegAddr register address (number) for write
4591 * RegValue 16-bit value to write to register
4592 *
4593 * Return Value:
4594 *
4595 * None
4596 *
4597 */
4598 static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4599 {
4600 /* Note: The DCAR is located at the adapter base address */
4601 /* Note: must preserve state of BIT8 in DCAR */
4602
4603 outw( RegAddr + info->mbre_bit, info->io_base );
4604 outw( RegValue, info->io_base );
4605
4606 /* Read to flush write to DCAR */
4607 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4608 inw( info->io_base );
4609
4610 } /* end of usc_OutDmaReg() */
4611
4612 /*
4613 * usc_InDmaReg()
4614 *
4615 * Read a 16-bit value from a DMA register
4616 *
4617 * Arguments:
4618 *
4619 * info pointer to device info structure
4620 * RegAddr register address (number) to read from
4621 *
4622 * Return Value:
4623 *
4624 * The 16-bit value read from register
4625 *
4626 */
4627 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4628 {
4629 /* Note: The DCAR is located at the adapter base address */
4630 /* Note: must preserve state of BIT8 in DCAR */
4631
4632 outw( RegAddr + info->mbre_bit, info->io_base );
4633 return inw( info->io_base );
4634
4635 } /* end of usc_InDmaReg() */
4636
4637 /*
4638 *
4639 * usc_OutReg()
4640 *
4641 * Write a 16-bit value to a USC serial channel register
4642 *
4643 * Arguments:
4644 *
4645 * info pointer to device info structure
4646 * RegAddr register address (number) to write to
4647 * RegValue 16-bit value to write to register
4648 *
4649 * Return Value:
4650 *
4651 * None
4652 *
4653 */
4654 static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4655 {
4656 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4657 outw( RegValue, info->io_base + CCAR );
4658
4659 /* Read to flush write to CCAR */
4660 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4661 inw( info->io_base + CCAR );
4662
4663 } /* end of usc_OutReg() */
4664
4665 /*
4666 * usc_InReg()
4667 *
4668 * Reads a 16-bit value from a USC serial channel register
4669 *
4670 * Arguments:
4671 *
4672 * info pointer to device extension
4673 * RegAddr register address (number) to read from
4674 *
4675 * Return Value:
4676 *
4677 * 16-bit value read from register
4678 */
4679 static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4680 {
4681 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4682 return inw( info->io_base + CCAR );
4683
4684 } /* end of usc_InReg() */
4685
4686 /* usc_set_sdlc_mode()
4687 *
4688 * Set up the adapter for SDLC DMA communications.
4689 *
4690 * Arguments: info pointer to device instance data
4691 * Return Value: NONE
4692 */
4693 static void usc_set_sdlc_mode( struct mgsl_struct *info )
4694 {
4695 u16 RegValue;
4696 int PreSL1660;
4697
4698 /*
4699 * determine if the IUSC on the adapter is pre-SL1660. If
4700 * not, take advantage of the UnderWait feature of more
4701 * modern chips. If an underrun occurs and this bit is set,
4702 * the transmitter will idle the programmed idle pattern
4703 * until the driver has time to service the underrun. Otherwise,
4704 * the dma controller may get the cycles previously requested
4705 * and begin transmitting queued tx data.
4706 */
4707 usc_OutReg(info,TMCR,0x1f);
4708 RegValue=usc_InReg(info,TMDR);
4709 if ( RegValue == IUSC_PRE_SL1660 )
4710 PreSL1660 = 1;
4711 else
4712 PreSL1660 = 0;
4713
4714
4715 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4716 {
4717 /*
4718 ** Channel Mode Register (CMR)
4719 **
4720 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4721 ** <13> 0 0 = Transmit Disabled (initially)
4722 ** <12> 0 1 = Consecutive Idles share common 0
4723 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4724 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4725 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4726 **
4727 ** 1000 1110 0000 0110 = 0x8e06
4728 */
4729 RegValue = 0x8e06;
4730
4731 /*--------------------------------------------------
4732 * ignore user options for UnderRun Actions and
4733 * preambles
4734 *--------------------------------------------------*/
4735 }
4736 else
4737 {
4738 /* Channel mode Register (CMR)
4739 *
4740 * <15..14> 00 Tx Sub modes, Underrun Action
4741 * <13> 0 1 = Send Preamble before opening flag
4742 * <12> 0 1 = Consecutive Idles share common 0
4743 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4744 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4745 * <3..0> 0110 Receiver mode = HDLC/SDLC
4746 *
4747 * 0000 0110 0000 0110 = 0x0606
4748 */
4749 if (info->params.mode == MGSL_MODE_RAW) {
4750 RegValue = 0x0001; /* Set Receive mode = external sync */
4751
4752 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4753 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4754
4755 /*
4756 * TxSubMode:
4757 * CMR <15> 0 Don't send CRC on Tx Underrun
4758 * CMR <14> x undefined
4759 * CMR <13> 0 Send preamble before openning sync
4760 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4761 *
4762 * TxMode:
4763 * CMR <11-8) 0100 MonoSync
4764 *
4765 * 0x00 0100 xxxx xxxx 04xx
4766 */
4767 RegValue |= 0x0400;
4768 }
4769 else {
4770
4771 RegValue = 0x0606;
4772
4773 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4774 RegValue |= BIT14;
4775 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4776 RegValue |= BIT15;
4777 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4778 RegValue |= BIT15 + BIT14;
4779 }
4780
4781 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4782 RegValue |= BIT13;
4783 }
4784
4785 if ( info->params.mode == MGSL_MODE_HDLC &&
4786 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4787 RegValue |= BIT12;
4788
4789 if ( info->params.addr_filter != 0xff )
4790 {
4791 /* set up receive address filtering */
4792 usc_OutReg( info, RSR, info->params.addr_filter );
4793 RegValue |= BIT4;
4794 }
4795
4796 usc_OutReg( info, CMR, RegValue );
4797 info->cmr_value = RegValue;
4798
4799 /* Receiver mode Register (RMR)
4800 *
4801 * <15..13> 000 encoding
4802 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4803 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4804 * <9> 0 1 = Include Receive chars in CRC
4805 * <8> 1 1 = Use Abort/PE bit as abort indicator
4806 * <7..6> 00 Even parity
4807 * <5> 0 parity disabled
4808 * <4..2> 000 Receive Char Length = 8 bits
4809 * <1..0> 00 Disable Receiver
4810 *
4811 * 0000 0101 0000 0000 = 0x0500
4812 */
4813
4814 RegValue = 0x0500;
4815
4816 switch ( info->params.encoding ) {
4817 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4818 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4819 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4820 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4821 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4822 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4823 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4824 }
4825
4826 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4827 RegValue |= BIT9;
4828 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4829 RegValue |= ( BIT12 | BIT10 | BIT9 );
4830
4831 usc_OutReg( info, RMR, RegValue );
4832
4833 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4834 /* When an opening flag of an SDLC frame is recognized the */
4835 /* Receive Character count (RCC) is loaded with the value in */
4836 /* RCLR. The RCC is decremented for each received byte. The */
4837 /* value of RCC is stored after the closing flag of the frame */
4838 /* allowing the frame size to be computed. */
4839
4840 usc_OutReg( info, RCLR, RCLRVALUE );
4841
4842 usc_RCmd( info, RCmd_SelectRicrdma_level );
4843
4844 /* Receive Interrupt Control Register (RICR)
4845 *
4846 * <15..8> ? RxFIFO DMA Request Level
4847 * <7> 0 Exited Hunt IA (Interrupt Arm)
4848 * <6> 0 Idle Received IA
4849 * <5> 0 Break/Abort IA
4850 * <4> 0 Rx Bound IA
4851 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4852 * <2> 0 Abort/PE IA
4853 * <1> 1 Rx Overrun IA
4854 * <0> 0 Select TC0 value for readback
4855 *
4856 * 0000 0000 0000 1000 = 0x000a
4857 */
4858
4859 /* Carry over the Exit Hunt and Idle Received bits */
4860 /* in case they have been armed by usc_ArmEvents. */
4861
4862 RegValue = usc_InReg( info, RICR ) & 0xc0;
4863
4864 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4865 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4866 else
4867 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4868
4869 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4870
4871 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4872 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4873
4874 /* Transmit mode Register (TMR)
4875 *
4876 * <15..13> 000 encoding
4877 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4878 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4879 * <9> 0 1 = Tx CRC Enabled
4880 * <8> 0 1 = Append CRC to end of transmit frame
4881 * <7..6> 00 Transmit parity Even
4882 * <5> 0 Transmit parity Disabled
4883 * <4..2> 000 Tx Char Length = 8 bits
4884 * <1..0> 00 Disable Transmitter
4885 *
4886 * 0000 0100 0000 0000 = 0x0400
4887 */
4888
4889 RegValue = 0x0400;
4890
4891 switch ( info->params.encoding ) {
4892 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4893 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4894 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4895 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4896 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4897 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4898 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4899 }
4900
4901 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4902 RegValue |= BIT9 + BIT8;
4903 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4904 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4905
4906 usc_OutReg( info, TMR, RegValue );
4907
4908 usc_set_txidle( info );
4909
4910
4911 usc_TCmd( info, TCmd_SelectTicrdma_level );
4912
4913 /* Transmit Interrupt Control Register (TICR)
4914 *
4915 * <15..8> ? Transmit FIFO DMA Level
4916 * <7> 0 Present IA (Interrupt Arm)
4917 * <6> 0 Idle Sent IA
4918 * <5> 1 Abort Sent IA
4919 * <4> 1 EOF/EOM Sent IA
4920 * <3> 0 CRC Sent IA
4921 * <2> 1 1 = Wait for SW Trigger to Start Frame
4922 * <1> 1 Tx Underrun IA
4923 * <0> 0 TC0 constant on read back
4924 *
4925 * 0000 0000 0011 0110 = 0x0036
4926 */
4927
4928 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4929 usc_OutReg( info, TICR, 0x0736 );
4930 else
4931 usc_OutReg( info, TICR, 0x1436 );
4932
4933 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4934 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4935
4936 /*
4937 ** Transmit Command/Status Register (TCSR)
4938 **
4939 ** <15..12> 0000 TCmd
4940 ** <11> 0/1 UnderWait
4941 ** <10..08> 000 TxIdle
4942 ** <7> x PreSent
4943 ** <6> x IdleSent
4944 ** <5> x AbortSent
4945 ** <4> x EOF/EOM Sent
4946 ** <3> x CRC Sent
4947 ** <2> x All Sent
4948 ** <1> x TxUnder
4949 ** <0> x TxEmpty
4950 **
4951 ** 0000 0000 0000 0000 = 0x0000
4952 */
4953 info->tcsr_value = 0;
4954
4955 if ( !PreSL1660 )
4956 info->tcsr_value |= TCSR_UNDERWAIT;
4957
4958 usc_OutReg( info, TCSR, info->tcsr_value );
4959
4960 /* Clock mode Control Register (CMCR)
4961 *
4962 * <15..14> 00 counter 1 Source = Disabled
4963 * <13..12> 00 counter 0 Source = Disabled
4964 * <11..10> 11 BRG1 Input is TxC Pin
4965 * <9..8> 11 BRG0 Input is TxC Pin
4966 * <7..6> 01 DPLL Input is BRG1 Output
4967 * <5..3> XXX TxCLK comes from Port 0
4968 * <2..0> XXX RxCLK comes from Port 1
4969 *
4970 * 0000 1111 0111 0111 = 0x0f77
4971 */
4972
4973 RegValue = 0x0f40;
4974
4975 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4976 RegValue |= 0x0003; /* RxCLK from DPLL */
4977 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4978 RegValue |= 0x0004; /* RxCLK from BRG0 */
4979 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4980 RegValue |= 0x0006; /* RxCLK from TXC Input */
4981 else
4982 RegValue |= 0x0007; /* RxCLK from Port1 */
4983
4984 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4985 RegValue |= 0x0018; /* TxCLK from DPLL */
4986 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4987 RegValue |= 0x0020; /* TxCLK from BRG0 */
4988 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4989 RegValue |= 0x0038; /* RxCLK from TXC Input */
4990 else
4991 RegValue |= 0x0030; /* TxCLK from Port0 */
4992
4993 usc_OutReg( info, CMCR, RegValue );
4994
4995
4996 /* Hardware Configuration Register (HCR)
4997 *
4998 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4999 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
5000 * <12> 0 CVOK:0=report code violation in biphase
5001 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
5002 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
5003 * <7..6> 00 reserved
5004 * <5> 0 BRG1 mode:0=continuous,1=single cycle
5005 * <4> X BRG1 Enable
5006 * <3..2> 00 reserved
5007 * <1> 0 BRG0 mode:0=continuous,1=single cycle
5008 * <0> 0 BRG0 Enable
5009 */
5010
5011 RegValue = 0x0000;
5012
5013 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
5014 u32 XtalSpeed;
5015 u32 DpllDivisor;
5016 u16 Tc;
5017
5018 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
5019 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
5020
5021 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5022 XtalSpeed = 11059200;
5023 else
5024 XtalSpeed = 14745600;
5025
5026 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
5027 DpllDivisor = 16;
5028 RegValue |= BIT10;
5029 }
5030 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
5031 DpllDivisor = 8;
5032 RegValue |= BIT11;
5033 }
5034 else
5035 DpllDivisor = 32;
5036
5037 /* Tc = (Xtal/Speed) - 1 */
5038 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5039 /* then rounding up gives a more precise time constant. Instead */
5040 /* of rounding up and then subtracting 1 we just don't subtract */
5041 /* the one in this case. */
5042
5043 /*--------------------------------------------------
5044 * ejz: for DPLL mode, application should use the
5045 * same clock speed as the partner system, even
5046 * though clocking is derived from the input RxData.
5047 * In case the user uses a 0 for the clock speed,
5048 * default to 0xffffffff and don't try to divide by
5049 * zero
5050 *--------------------------------------------------*/
5051 if ( info->params.clock_speed )
5052 {
5053 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
5054 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
5055 / info->params.clock_speed) )
5056 Tc--;
5057 }
5058 else
5059 Tc = -1;
5060
5061
5062 /* Write 16-bit Time Constant for BRG1 */
5063 usc_OutReg( info, TC1R, Tc );
5064
5065 RegValue |= BIT4; /* enable BRG1 */
5066
5067 switch ( info->params.encoding ) {
5068 case HDLC_ENCODING_NRZ:
5069 case HDLC_ENCODING_NRZB:
5070 case HDLC_ENCODING_NRZI_MARK:
5071 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5072 case HDLC_ENCODING_BIPHASE_MARK:
5073 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5074 case HDLC_ENCODING_BIPHASE_LEVEL:
5075 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5076 }
5077 }
5078
5079 usc_OutReg( info, HCR, RegValue );
5080
5081
5082 /* Channel Control/status Register (CCSR)
5083 *
5084 * <15> X RCC FIFO Overflow status (RO)
5085 * <14> X RCC FIFO Not Empty status (RO)
5086 * <13> 0 1 = Clear RCC FIFO (WO)
5087 * <12> X DPLL Sync (RW)
5088 * <11> X DPLL 2 Missed Clocks status (RO)
5089 * <10> X DPLL 1 Missed Clock status (RO)
5090 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5091 * <7> X SDLC Loop On status (RO)
5092 * <6> X SDLC Loop Send status (RO)
5093 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5094 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5095 * <1..0> 00 reserved
5096 *
5097 * 0000 0000 0010 0000 = 0x0020
5098 */
5099
5100 usc_OutReg( info, CCSR, 0x1020 );
5101
5102
5103 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5104 usc_OutReg( info, SICR,
5105 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5106 }
5107
5108
5109 /* enable Master Interrupt Enable bit (MIE) */
5110 usc_EnableMasterIrqBit( info );
5111
5112 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5113 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5114
5115 /* arm RCC underflow interrupt */
5116 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5117 usc_EnableInterrupts(info, MISC);
5118
5119 info->mbre_bit = 0;
5120 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5121 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5122 info->mbre_bit = BIT8;
5123 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5124
5125 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5126 /* Enable DMAEN (Port 7, Bit 14) */
5127 /* This connects the DMA request signal to the ISA bus */
5128 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5129 }
5130
5131 /* DMA Control Register (DCR)
5132 *
5133 * <15..14> 10 Priority mode = Alternating Tx/Rx
5134 * 01 Rx has priority
5135 * 00 Tx has priority
5136 *
5137 * <13> 1 Enable Priority Preempt per DCR<15..14>
5138 * (WARNING DCR<11..10> must be 00 when this is 1)
5139 * 0 Choose activate channel per DCR<11..10>
5140 *
5141 * <12> 0 Little Endian for Array/List
5142 * <11..10> 00 Both Channels can use each bus grant
5143 * <9..6> 0000 reserved
5144 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5145 * <4> 0 1 = drive D/C and S/D pins
5146 * <3> 1 1 = Add one wait state to all DMA cycles.
5147 * <2> 0 1 = Strobe /UAS on every transfer.
5148 * <1..0> 11 Addr incrementing only affects LS24 bits
5149 *
5150 * 0110 0000 0000 1011 = 0x600b
5151 */
5152
5153 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5154 /* PCI adapter does not need DMA wait state */
5155 usc_OutDmaReg( info, DCR, 0xa00b );
5156 }
5157 else
5158 usc_OutDmaReg( info, DCR, 0x800b );
5159
5160
5161 /* Receive DMA mode Register (RDMR)
5162 *
5163 * <15..14> 11 DMA mode = Linked List Buffer mode
5164 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5165 * <12> 1 Clear count of List Entry after fetching
5166 * <11..10> 00 Address mode = Increment
5167 * <9> 1 Terminate Buffer on RxBound
5168 * <8> 0 Bus Width = 16bits
5169 * <7..0> ? status Bits (write as 0s)
5170 *
5171 * 1111 0010 0000 0000 = 0xf200
5172 */
5173
5174 usc_OutDmaReg( info, RDMR, 0xf200 );
5175
5176
5177 /* Transmit DMA mode Register (TDMR)
5178 *
5179 * <15..14> 11 DMA mode = Linked List Buffer mode
5180 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5181 * <12> 1 Clear count of List Entry after fetching
5182 * <11..10> 00 Address mode = Increment
5183 * <9> 1 Terminate Buffer on end of frame
5184 * <8> 0 Bus Width = 16bits
5185 * <7..0> ? status Bits (Read Only so write as 0)
5186 *
5187 * 1111 0010 0000 0000 = 0xf200
5188 */
5189
5190 usc_OutDmaReg( info, TDMR, 0xf200 );
5191
5192
5193 /* DMA Interrupt Control Register (DICR)
5194 *
5195 * <15> 1 DMA Interrupt Enable
5196 * <14> 0 1 = Disable IEO from USC
5197 * <13> 0 1 = Don't provide vector during IntAck
5198 * <12> 1 1 = Include status in Vector
5199 * <10..2> 0 reserved, Must be 0s
5200 * <1> 0 1 = Rx DMA Interrupt Enabled
5201 * <0> 0 1 = Tx DMA Interrupt Enabled
5202 *
5203 * 1001 0000 0000 0000 = 0x9000
5204 */
5205
5206 usc_OutDmaReg( info, DICR, 0x9000 );
5207
5208 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5209 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5210 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5211
5212 /* Channel Control Register (CCR)
5213 *
5214 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5215 * <13> 0 Trigger Tx on SW Command Disabled
5216 * <12> 0 Flag Preamble Disabled
5217 * <11..10> 00 Preamble Length
5218 * <9..8> 00 Preamble Pattern
5219 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5220 * <5> 0 Trigger Rx on SW Command Disabled
5221 * <4..0> 0 reserved
5222 *
5223 * 1000 0000 1000 0000 = 0x8080
5224 */
5225
5226 RegValue = 0x8080;
5227
5228 switch ( info->params.preamble_length ) {
5229 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5230 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5231 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5232 }
5233
5234 switch ( info->params.preamble ) {
5235 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5236 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5237 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5238 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5239 }
5240
5241 usc_OutReg( info, CCR, RegValue );
5242
5243
5244 /*
5245 * Burst/Dwell Control Register
5246 *
5247 * <15..8> 0x20 Maximum number of transfers per bus grant
5248 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5249 */
5250
5251 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5252 /* don't limit bus occupancy on PCI adapter */
5253 usc_OutDmaReg( info, BDCR, 0x0000 );
5254 }
5255 else
5256 usc_OutDmaReg( info, BDCR, 0x2000 );
5257
5258 usc_stop_transmitter(info);
5259 usc_stop_receiver(info);
5260
5261 } /* end of usc_set_sdlc_mode() */
5262
5263 /* usc_enable_loopback()
5264 *
5265 * Set the 16C32 for internal loopback mode.
5266 * The TxCLK and RxCLK signals are generated from the BRG0 and
5267 * the TxD is looped back to the RxD internally.
5268 *
5269 * Arguments: info pointer to device instance data
5270 * enable 1 = enable loopback, 0 = disable
5271 * Return Value: None
5272 */
5273 static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5274 {
5275 if (enable) {
5276 /* blank external TXD output */
5277 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5278
5279 /* Clock mode Control Register (CMCR)
5280 *
5281 * <15..14> 00 counter 1 Disabled
5282 * <13..12> 00 counter 0 Disabled
5283 * <11..10> 11 BRG1 Input is TxC Pin
5284 * <9..8> 11 BRG0 Input is TxC Pin
5285 * <7..6> 01 DPLL Input is BRG1 Output
5286 * <5..3> 100 TxCLK comes from BRG0
5287 * <2..0> 100 RxCLK comes from BRG0
5288 *
5289 * 0000 1111 0110 0100 = 0x0f64
5290 */
5291
5292 usc_OutReg( info, CMCR, 0x0f64 );
5293
5294 /* Write 16-bit Time Constant for BRG0 */
5295 /* use clock speed if available, otherwise use 8 for diagnostics */
5296 if (info->params.clock_speed) {
5297 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5298 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5299 else
5300 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5301 } else
5302 usc_OutReg(info, TC0R, (u16)8);
5303
5304 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5305 mode = Continuous Set Bit 0 to enable BRG0. */
5306 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5307
5308 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5309 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5310
5311 /* set Internal Data loopback mode */
5312 info->loopback_bits = 0x300;
5313 outw( 0x0300, info->io_base + CCAR );
5314 } else {
5315 /* enable external TXD output */
5316 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5317
5318 /* clear Internal Data loopback mode */
5319 info->loopback_bits = 0;
5320 outw( 0,info->io_base + CCAR );
5321 }
5322
5323 } /* end of usc_enable_loopback() */
5324
5325 /* usc_enable_aux_clock()
5326 *
5327 * Enabled the AUX clock output at the specified frequency.
5328 *
5329 * Arguments:
5330 *
5331 * info pointer to device extension
5332 * data_rate data rate of clock in bits per second
5333 * A data rate of 0 disables the AUX clock.
5334 *
5335 * Return Value: None
5336 */
5337 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5338 {
5339 u32 XtalSpeed;
5340 u16 Tc;
5341
5342 if ( data_rate ) {
5343 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5344 XtalSpeed = 11059200;
5345 else
5346 XtalSpeed = 14745600;
5347
5348
5349 /* Tc = (Xtal/Speed) - 1 */
5350 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5351 /* then rounding up gives a more precise time constant. Instead */
5352 /* of rounding up and then subtracting 1 we just don't subtract */
5353 /* the one in this case. */
5354
5355
5356 Tc = (u16)(XtalSpeed/data_rate);
5357 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5358 Tc--;
5359
5360 /* Write 16-bit Time Constant for BRG0 */
5361 usc_OutReg( info, TC0R, Tc );
5362
5363 /*
5364 * Hardware Configuration Register (HCR)
5365 * Clear Bit 1, BRG0 mode = Continuous
5366 * Set Bit 0 to enable BRG0.
5367 */
5368
5369 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5370
5371 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5372 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5373 } else {
5374 /* data rate == 0 so turn off BRG0 */
5375 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5376 }
5377
5378 } /* end of usc_enable_aux_clock() */
5379
5380 /*
5381 *
5382 * usc_process_rxoverrun_sync()
5383 *
5384 * This function processes a receive overrun by resetting the
5385 * receive DMA buffers and issuing a Purge Rx FIFO command
5386 * to allow the receiver to continue receiving.
5387 *
5388 * Arguments:
5389 *
5390 * info pointer to device extension
5391 *
5392 * Return Value: None
5393 */
5394 static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5395 {
5396 int start_index;
5397 int end_index;
5398 int frame_start_index;
5399 int start_of_frame_found = FALSE;
5400 int end_of_frame_found = FALSE;
5401 int reprogram_dma = FALSE;
5402
5403 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5404 u32 phys_addr;
5405
5406 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5407 usc_RCmd( info, RCmd_EnterHuntmode );
5408 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5409
5410 /* CurrentRxBuffer points to the 1st buffer of the next */
5411 /* possibly available receive frame. */
5412
5413 frame_start_index = start_index = end_index = info->current_rx_buffer;
5414
5415 /* Search for an unfinished string of buffers. This means */
5416 /* that a receive frame started (at least one buffer with */
5417 /* count set to zero) but there is no terminiting buffer */
5418 /* (status set to non-zero). */
5419
5420 while( !buffer_list[end_index].count )
5421 {
5422 /* Count field has been reset to zero by 16C32. */
5423 /* This buffer is currently in use. */
5424
5425 if ( !start_of_frame_found )
5426 {
5427 start_of_frame_found = TRUE;
5428 frame_start_index = end_index;
5429 end_of_frame_found = FALSE;
5430 }
5431
5432 if ( buffer_list[end_index].status )
5433 {
5434 /* Status field has been set by 16C32. */
5435 /* This is the last buffer of a received frame. */
5436
5437 /* We want to leave the buffers for this frame intact. */
5438 /* Move on to next possible frame. */
5439
5440 start_of_frame_found = FALSE;
5441 end_of_frame_found = TRUE;
5442 }
5443
5444 /* advance to next buffer entry in linked list */
5445 end_index++;
5446 if ( end_index == info->rx_buffer_count )
5447 end_index = 0;
5448
5449 if ( start_index == end_index )
5450 {
5451 /* The entire list has been searched with all Counts == 0 and */
5452 /* all Status == 0. The receive buffers are */
5453 /* completely screwed, reset all receive buffers! */
5454 mgsl_reset_rx_dma_buffers( info );
5455 frame_start_index = 0;
5456 start_of_frame_found = FALSE;
5457 reprogram_dma = TRUE;
5458 break;
5459 }
5460 }
5461
5462 if ( start_of_frame_found && !end_of_frame_found )
5463 {
5464 /* There is an unfinished string of receive DMA buffers */
5465 /* as a result of the receiver overrun. */
5466
5467 /* Reset the buffers for the unfinished frame */
5468 /* and reprogram the receive DMA controller to start */
5469 /* at the 1st buffer of unfinished frame. */
5470
5471 start_index = frame_start_index;
5472
5473 do
5474 {
5475 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5476
5477 /* Adjust index for wrap around. */
5478 if ( start_index == info->rx_buffer_count )
5479 start_index = 0;
5480
5481 } while( start_index != end_index );
5482
5483 reprogram_dma = TRUE;
5484 }
5485
5486 if ( reprogram_dma )
5487 {
5488 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5489 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5490 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5491
5492 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5493
5494 /* This empties the receive FIFO and loads the RCC with RCLR */
5495 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5496
5497 /* program 16C32 with physical address of 1st DMA buffer entry */
5498 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5499 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5500 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5501
5502 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5503 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5504 usc_EnableInterrupts( info, RECEIVE_STATUS );
5505
5506 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5507 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5508
5509 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5510 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5511 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5512 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5513 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5514 else
5515 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5516 }
5517 else
5518 {
5519 /* This empties the receive FIFO and loads the RCC with RCLR */
5520 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5521 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5522 }
5523
5524 } /* end of usc_process_rxoverrun_sync() */
5525
5526 /* usc_stop_receiver()
5527 *
5528 * Disable USC receiver
5529 *
5530 * Arguments: info pointer to device instance data
5531 * Return Value: None
5532 */
5533 static void usc_stop_receiver( struct mgsl_struct *info )
5534 {
5535 if (debug_level >= DEBUG_LEVEL_ISR)
5536 printk("%s(%d):usc_stop_receiver(%s)\n",
5537 __FILE__,__LINE__, info->device_name );
5538
5539 /* Disable receive DMA channel. */
5540 /* This also disables receive DMA channel interrupts */
5541 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5542
5543 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5544 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5545 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5546
5547 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5548
5549 /* This empties the receive FIFO and loads the RCC with RCLR */
5550 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5551 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5552
5553 info->rx_enabled = 0;
5554 info->rx_overflow = 0;
5555 info->rx_rcc_underrun = 0;
5556
5557 } /* end of stop_receiver() */
5558
5559 /* usc_start_receiver()
5560 *
5561 * Enable the USC receiver
5562 *
5563 * Arguments: info pointer to device instance data
5564 * Return Value: None
5565 */
5566 static void usc_start_receiver( struct mgsl_struct *info )
5567 {
5568 u32 phys_addr;
5569
5570 if (debug_level >= DEBUG_LEVEL_ISR)
5571 printk("%s(%d):usc_start_receiver(%s)\n",
5572 __FILE__,__LINE__, info->device_name );
5573
5574 mgsl_reset_rx_dma_buffers( info );
5575 usc_stop_receiver( info );
5576
5577 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5578 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5579
5580 if ( info->params.mode == MGSL_MODE_HDLC ||
5581 info->params.mode == MGSL_MODE_RAW ) {
5582 /* DMA mode Transfers */
5583 /* Program the DMA controller. */
5584 /* Enable the DMA controller end of buffer interrupt. */
5585
5586 /* program 16C32 with physical address of 1st DMA buffer entry */
5587 phys_addr = info->rx_buffer_list[0].phys_entry;
5588 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5589 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5590
5591 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5592 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5593 usc_EnableInterrupts( info, RECEIVE_STATUS );
5594
5595 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5596 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5597
5598 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5599 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5600 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5601 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5602 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5603 else
5604 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5605 } else {
5606 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5607 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5608 usc_EnableInterrupts(info, RECEIVE_DATA);
5609
5610 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5611 usc_RCmd( info, RCmd_EnterHuntmode );
5612
5613 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5614 }
5615
5616 usc_OutReg( info, CCSR, 0x1020 );
5617
5618 info->rx_enabled = 1;
5619
5620 } /* end of usc_start_receiver() */
5621
5622 /* usc_start_transmitter()
5623 *
5624 * Enable the USC transmitter and send a transmit frame if
5625 * one is loaded in the DMA buffers.
5626 *
5627 * Arguments: info pointer to device instance data
5628 * Return Value: None
5629 */
5630 static void usc_start_transmitter( struct mgsl_struct *info )
5631 {
5632 u32 phys_addr;
5633 unsigned int FrameSize;
5634
5635 if (debug_level >= DEBUG_LEVEL_ISR)
5636 printk("%s(%d):usc_start_transmitter(%s)\n",
5637 __FILE__,__LINE__, info->device_name );
5638
5639 if ( info->xmit_cnt ) {
5640
5641 /* If auto RTS enabled and RTS is inactive, then assert */
5642 /* RTS and set a flag indicating that the driver should */
5643 /* negate RTS when the transmission completes. */
5644
5645 info->drop_rts_on_tx_done = 0;
5646
5647 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5648 usc_get_serial_signals( info );
5649 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5650 info->serial_signals |= SerialSignal_RTS;
5651 usc_set_serial_signals( info );
5652 info->drop_rts_on_tx_done = 1;
5653 }
5654 }
5655
5656
5657 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5658 if ( !info->tx_active ) {
5659 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5660 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5661 usc_EnableInterrupts(info, TRANSMIT_DATA);
5662 usc_load_txfifo(info);
5663 }
5664 } else {
5665 /* Disable transmit DMA controller while programming. */
5666 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5667
5668 /* Transmit DMA buffer is loaded, so program USC */
5669 /* to send the frame contained in the buffers. */
5670
5671 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5672
5673 /* if operating in Raw sync mode, reset the rcc component
5674 * of the tx dma buffer entry, otherwise, the serial controller
5675 * will send a closing sync char after this count.
5676 */
5677 if ( info->params.mode == MGSL_MODE_RAW )
5678 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5679
5680 /* Program the Transmit Character Length Register (TCLR) */
5681 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5682 usc_OutReg( info, TCLR, (u16)FrameSize );
5683
5684 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5685
5686 /* Program the address of the 1st DMA Buffer Entry in linked list */
5687 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5688 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5689 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5690
5691 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5692 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5693 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5694
5695 if ( info->params.mode == MGSL_MODE_RAW &&
5696 info->num_tx_dma_buffers > 1 ) {
5697 /* When running external sync mode, attempt to 'stream' transmit */
5698 /* by filling tx dma buffers as they become available. To do this */
5699 /* we need to enable Tx DMA EOB Status interrupts : */
5700 /* */
5701 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5702 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5703
5704 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5705 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5706 }
5707
5708 /* Initialize Transmit DMA Channel */
5709 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5710
5711 usc_TCmd( info, TCmd_SendFrame );
5712
5713 info->tx_timer.expires = jiffies + msecs_to_jiffies(5000);
5714 add_timer(&info->tx_timer);
5715 }
5716 info->tx_active = 1;
5717 }
5718
5719 if ( !info->tx_enabled ) {
5720 info->tx_enabled = 1;
5721 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5722 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5723 else
5724 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5725 }
5726
5727 } /* end of usc_start_transmitter() */
5728
5729 /* usc_stop_transmitter()
5730 *
5731 * Stops the transmitter and DMA
5732 *
5733 * Arguments: info pointer to device isntance data
5734 * Return Value: None
5735 */
5736 static void usc_stop_transmitter( struct mgsl_struct *info )
5737 {
5738 if (debug_level >= DEBUG_LEVEL_ISR)
5739 printk("%s(%d):usc_stop_transmitter(%s)\n",
5740 __FILE__,__LINE__, info->device_name );
5741
5742 del_timer(&info->tx_timer);
5743
5744 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5745 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5746 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5747
5748 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5749 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5750 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5751
5752 info->tx_enabled = 0;
5753 info->tx_active = 0;
5754
5755 } /* end of usc_stop_transmitter() */
5756
5757 /* usc_load_txfifo()
5758 *
5759 * Fill the transmit FIFO until the FIFO is full or
5760 * there is no more data to load.
5761 *
5762 * Arguments: info pointer to device extension (instance data)
5763 * Return Value: None
5764 */
5765 static void usc_load_txfifo( struct mgsl_struct *info )
5766 {
5767 int Fifocount;
5768 u8 TwoBytes[2];
5769
5770 if ( !info->xmit_cnt && !info->x_char )
5771 return;
5772
5773 /* Select transmit FIFO status readback in TICR */
5774 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5775
5776 /* load the Transmit FIFO until FIFOs full or all data sent */
5777
5778 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5779 /* there is more space in the transmit FIFO and */
5780 /* there is more data in transmit buffer */
5781
5782 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5783 /* write a 16-bit word from transmit buffer to 16C32 */
5784
5785 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5786 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5787 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5788 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5789
5790 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5791
5792 info->xmit_cnt -= 2;
5793 info->icount.tx += 2;
5794 } else {
5795 /* only 1 byte left to transmit or 1 FIFO slot left */
5796
5797 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5798 info->io_base + CCAR );
5799
5800 if (info->x_char) {
5801 /* transmit pending high priority char */
5802 outw( info->x_char,info->io_base + CCAR );
5803 info->x_char = 0;
5804 } else {
5805 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5806 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5807 info->xmit_cnt--;
5808 }
5809 info->icount.tx++;
5810 }
5811 }
5812
5813 } /* end of usc_load_txfifo() */
5814
5815 /* usc_reset()
5816 *
5817 * Reset the adapter to a known state and prepare it for further use.
5818 *
5819 * Arguments: info pointer to device instance data
5820 * Return Value: None
5821 */
5822 static void usc_reset( struct mgsl_struct *info )
5823 {
5824 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5825 int i;
5826 u32 readval;
5827
5828 /* Set BIT30 of Misc Control Register */
5829 /* (Local Control Register 0x50) to force reset of USC. */
5830
5831 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5832 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5833
5834 info->misc_ctrl_value |= BIT30;
5835 *MiscCtrl = info->misc_ctrl_value;
5836
5837 /*
5838 * Force at least 170ns delay before clearing
5839 * reset bit. Each read from LCR takes at least
5840 * 30ns so 10 times for 300ns to be safe.
5841 */
5842 for(i=0;i<10;i++)
5843 readval = *MiscCtrl;
5844
5845 info->misc_ctrl_value &= ~BIT30;
5846 *MiscCtrl = info->misc_ctrl_value;
5847
5848 *LCR0BRDR = BUS_DESCRIPTOR(
5849 1, // Write Strobe Hold (0-3)
5850 2, // Write Strobe Delay (0-3)
5851 2, // Read Strobe Delay (0-3)
5852 0, // NWDD (Write data-data) (0-3)
5853 4, // NWAD (Write Addr-data) (0-31)
5854 0, // NXDA (Read/Write Data-Addr) (0-3)
5855 0, // NRDD (Read Data-Data) (0-3)
5856 5 // NRAD (Read Addr-Data) (0-31)
5857 );
5858 } else {
5859 /* do HW reset */
5860 outb( 0,info->io_base + 8 );
5861 }
5862
5863 info->mbre_bit = 0;
5864 info->loopback_bits = 0;
5865 info->usc_idle_mode = 0;
5866
5867 /*
5868 * Program the Bus Configuration Register (BCR)
5869 *
5870 * <15> 0 Don't use separate address
5871 * <14..6> 0 reserved
5872 * <5..4> 00 IAckmode = Default, don't care
5873 * <3> 1 Bus Request Totem Pole output
5874 * <2> 1 Use 16 Bit data bus
5875 * <1> 0 IRQ Totem Pole output
5876 * <0> 0 Don't Shift Right Addr
5877 *
5878 * 0000 0000 0000 1100 = 0x000c
5879 *
5880 * By writing to io_base + SDPIN the Wait/Ack pin is
5881 * programmed to work as a Wait pin.
5882 */
5883
5884 outw( 0x000c,info->io_base + SDPIN );
5885
5886
5887 outw( 0,info->io_base );
5888 outw( 0,info->io_base + CCAR );
5889
5890 /* select little endian byte ordering */
5891 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5892
5893
5894 /* Port Control Register (PCR)
5895 *
5896 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5897 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5898 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5899 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5900 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5901 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5902 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5903 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5904 *
5905 * 1111 0000 1111 0101 = 0xf0f5
5906 */
5907
5908 usc_OutReg( info, PCR, 0xf0f5 );
5909
5910
5911 /*
5912 * Input/Output Control Register
5913 *
5914 * <15..14> 00 CTS is active low input
5915 * <13..12> 00 DCD is active low input
5916 * <11..10> 00 TxREQ pin is input (DSR)
5917 * <9..8> 00 RxREQ pin is input (RI)
5918 * <7..6> 00 TxD is output (Transmit Data)
5919 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5920 * <2..0> 100 RxC is Output (drive with BRG0)
5921 *
5922 * 0000 0000 0000 0100 = 0x0004
5923 */
5924
5925 usc_OutReg( info, IOCR, 0x0004 );
5926
5927 } /* end of usc_reset() */
5928
5929 /* usc_set_async_mode()
5930 *
5931 * Program adapter for asynchronous communications.
5932 *
5933 * Arguments: info pointer to device instance data
5934 * Return Value: None
5935 */
5936 static void usc_set_async_mode( struct mgsl_struct *info )
5937 {
5938 u16 RegValue;
5939
5940 /* disable interrupts while programming USC */
5941 usc_DisableMasterIrqBit( info );
5942
5943 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5944 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5945
5946 usc_loopback_frame( info );
5947
5948 /* Channel mode Register (CMR)
5949 *
5950 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5951 * <13..12> 00 00 = 16X Clock
5952 * <11..8> 0000 Transmitter mode = Asynchronous
5953 * <7..6> 00 reserved?
5954 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5955 * <3..0> 0000 Receiver mode = Asynchronous
5956 *
5957 * 0000 0000 0000 0000 = 0x0
5958 */
5959
5960 RegValue = 0;
5961 if ( info->params.stop_bits != 1 )
5962 RegValue |= BIT14;
5963 usc_OutReg( info, CMR, RegValue );
5964
5965
5966 /* Receiver mode Register (RMR)
5967 *
5968 * <15..13> 000 encoding = None
5969 * <12..08> 00000 reserved (Sync Only)
5970 * <7..6> 00 Even parity
5971 * <5> 0 parity disabled
5972 * <4..2> 000 Receive Char Length = 8 bits
5973 * <1..0> 00 Disable Receiver
5974 *
5975 * 0000 0000 0000 0000 = 0x0
5976 */
5977
5978 RegValue = 0;
5979
5980 if ( info->params.data_bits != 8 )
5981 RegValue |= BIT4+BIT3+BIT2;
5982
5983 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5984 RegValue |= BIT5;
5985 if ( info->params.parity != ASYNC_PARITY_ODD )
5986 RegValue |= BIT6;
5987 }
5988
5989 usc_OutReg( info, RMR, RegValue );
5990
5991
5992 /* Set IRQ trigger level */
5993
5994 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5995
5996
5997 /* Receive Interrupt Control Register (RICR)
5998 *
5999 * <15..8> ? RxFIFO IRQ Request Level
6000 *
6001 * Note: For async mode the receive FIFO level must be set
6002 * to 0 to avoid the situation where the FIFO contains fewer bytes
6003 * than the trigger level and no more data is expected.
6004 *
6005 * <7> 0 Exited Hunt IA (Interrupt Arm)
6006 * <6> 0 Idle Received IA
6007 * <5> 0 Break/Abort IA
6008 * <4> 0 Rx Bound IA
6009 * <3> 0 Queued status reflects oldest byte in FIFO
6010 * <2> 0 Abort/PE IA
6011 * <1> 0 Rx Overrun IA
6012 * <0> 0 Select TC0 value for readback
6013 *
6014 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
6015 */
6016
6017 usc_OutReg( info, RICR, 0x0000 );
6018
6019 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
6020 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
6021
6022
6023 /* Transmit mode Register (TMR)
6024 *
6025 * <15..13> 000 encoding = None
6026 * <12..08> 00000 reserved (Sync Only)
6027 * <7..6> 00 Transmit parity Even
6028 * <5> 0 Transmit parity Disabled
6029 * <4..2> 000 Tx Char Length = 8 bits
6030 * <1..0> 00 Disable Transmitter
6031 *
6032 * 0000 0000 0000 0000 = 0x0
6033 */
6034
6035 RegValue = 0;
6036
6037 if ( info->params.data_bits != 8 )
6038 RegValue |= BIT4+BIT3+BIT2;
6039
6040 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6041 RegValue |= BIT5;
6042 if ( info->params.parity != ASYNC_PARITY_ODD )
6043 RegValue |= BIT6;
6044 }
6045
6046 usc_OutReg( info, TMR, RegValue );
6047
6048 usc_set_txidle( info );
6049
6050
6051 /* Set IRQ trigger level */
6052
6053 usc_TCmd( info, TCmd_SelectTicrIntLevel );
6054
6055
6056 /* Transmit Interrupt Control Register (TICR)
6057 *
6058 * <15..8> ? Transmit FIFO IRQ Level
6059 * <7> 0 Present IA (Interrupt Arm)
6060 * <6> 1 Idle Sent IA
6061 * <5> 0 Abort Sent IA
6062 * <4> 0 EOF/EOM Sent IA
6063 * <3> 0 CRC Sent IA
6064 * <2> 0 1 = Wait for SW Trigger to Start Frame
6065 * <1> 0 Tx Underrun IA
6066 * <0> 0 TC0 constant on read back
6067 *
6068 * 0000 0000 0100 0000 = 0x0040
6069 */
6070
6071 usc_OutReg( info, TICR, 0x1f40 );
6072
6073 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6074 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6075
6076 usc_enable_async_clock( info, info->params.data_rate );
6077
6078
6079 /* Channel Control/status Register (CCSR)
6080 *
6081 * <15> X RCC FIFO Overflow status (RO)
6082 * <14> X RCC FIFO Not Empty status (RO)
6083 * <13> 0 1 = Clear RCC FIFO (WO)
6084 * <12> X DPLL in Sync status (RO)
6085 * <11> X DPLL 2 Missed Clocks status (RO)
6086 * <10> X DPLL 1 Missed Clock status (RO)
6087 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6088 * <7> X SDLC Loop On status (RO)
6089 * <6> X SDLC Loop Send status (RO)
6090 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6091 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6092 * <1..0> 00 reserved
6093 *
6094 * 0000 0000 0010 0000 = 0x0020
6095 */
6096
6097 usc_OutReg( info, CCSR, 0x0020 );
6098
6099 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6100 RECEIVE_DATA + RECEIVE_STATUS );
6101
6102 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6103 RECEIVE_DATA + RECEIVE_STATUS );
6104
6105 usc_EnableMasterIrqBit( info );
6106
6107 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6108 /* Enable INTEN (Port 6, Bit12) */
6109 /* This connects the IRQ request signal to the ISA bus */
6110 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6111 }
6112
6113 if (info->params.loopback) {
6114 info->loopback_bits = 0x300;
6115 outw(0x0300, info->io_base + CCAR);
6116 }
6117
6118 } /* end of usc_set_async_mode() */
6119
6120 /* usc_loopback_frame()
6121 *
6122 * Loop back a small (2 byte) dummy SDLC frame.
6123 * Interrupts and DMA are NOT used. The purpose of this is to
6124 * clear any 'stale' status info left over from running in async mode.
6125 *
6126 * The 16C32 shows the strange behaviour of marking the 1st
6127 * received SDLC frame with a CRC error even when there is no
6128 * CRC error. To get around this a small dummy from of 2 bytes
6129 * is looped back when switching from async to sync mode.
6130 *
6131 * Arguments: info pointer to device instance data
6132 * Return Value: None
6133 */
6134 static void usc_loopback_frame( struct mgsl_struct *info )
6135 {
6136 int i;
6137 unsigned long oldmode = info->params.mode;
6138
6139 info->params.mode = MGSL_MODE_HDLC;
6140
6141 usc_DisableMasterIrqBit( info );
6142
6143 usc_set_sdlc_mode( info );
6144 usc_enable_loopback( info, 1 );
6145
6146 /* Write 16-bit Time Constant for BRG0 */
6147 usc_OutReg( info, TC0R, 0 );
6148
6149 /* Channel Control Register (CCR)
6150 *
6151 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6152 * <13> 0 Trigger Tx on SW Command Disabled
6153 * <12> 0 Flag Preamble Disabled
6154 * <11..10> 00 Preamble Length = 8-Bits
6155 * <9..8> 01 Preamble Pattern = flags
6156 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6157 * <5> 0 Trigger Rx on SW Command Disabled
6158 * <4..0> 0 reserved
6159 *
6160 * 0000 0001 0000 0000 = 0x0100
6161 */
6162
6163 usc_OutReg( info, CCR, 0x0100 );
6164
6165 /* SETUP RECEIVER */
6166 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6167 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6168
6169 /* SETUP TRANSMITTER */
6170 /* Program the Transmit Character Length Register (TCLR) */
6171 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6172 usc_OutReg( info, TCLR, 2 );
6173 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6174
6175 /* unlatch Tx status bits, and start transmit channel. */
6176 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6177 outw(0,info->io_base + DATAREG);
6178
6179 /* ENABLE TRANSMITTER */
6180 usc_TCmd( info, TCmd_SendFrame );
6181 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6182
6183 /* WAIT FOR RECEIVE COMPLETE */
6184 for (i=0 ; i<1000 ; i++)
6185 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6186 break;
6187
6188 /* clear Internal Data loopback mode */
6189 usc_enable_loopback(info, 0);
6190
6191 usc_EnableMasterIrqBit(info);
6192
6193 info->params.mode = oldmode;
6194
6195 } /* end of usc_loopback_frame() */
6196
6197 /* usc_set_sync_mode() Programs the USC for SDLC communications.
6198 *
6199 * Arguments: info pointer to adapter info structure
6200 * Return Value: None
6201 */
6202 static void usc_set_sync_mode( struct mgsl_struct *info )
6203 {
6204 usc_loopback_frame( info );
6205 usc_set_sdlc_mode( info );
6206
6207 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6208 /* Enable INTEN (Port 6, Bit12) */
6209 /* This connects the IRQ request signal to the ISA bus */
6210 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6211 }
6212
6213 usc_enable_aux_clock(info, info->params.clock_speed);
6214
6215 if (info->params.loopback)
6216 usc_enable_loopback(info,1);
6217
6218 } /* end of mgsl_set_sync_mode() */
6219
6220 /* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6221 *
6222 * Arguments: info pointer to device instance data
6223 * Return Value: None
6224 */
6225 static void usc_set_txidle( struct mgsl_struct *info )
6226 {
6227 u16 usc_idle_mode = IDLEMODE_FLAGS;
6228
6229 /* Map API idle mode to USC register bits */
6230
6231 switch( info->idle_mode ){
6232 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6233 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6234 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6235 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6236 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6237 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6238 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6239 }
6240
6241 info->usc_idle_mode = usc_idle_mode;
6242 //usc_OutReg(info, TCSR, usc_idle_mode);
6243 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6244 info->tcsr_value += usc_idle_mode;
6245 usc_OutReg(info, TCSR, info->tcsr_value);
6246
6247 /*
6248 * if SyncLink WAN adapter is running in external sync mode, the
6249 * transmitter has been set to Monosync in order to try to mimic
6250 * a true raw outbound bit stream. Monosync still sends an open/close
6251 * sync char at the start/end of a frame. Try to match those sync
6252 * patterns to the idle mode set here
6253 */
6254 if ( info->params.mode == MGSL_MODE_RAW ) {
6255 unsigned char syncpat = 0;
6256 switch( info->idle_mode ) {
6257 case HDLC_TXIDLE_FLAGS:
6258 syncpat = 0x7e;
6259 break;
6260 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6261 syncpat = 0x55;
6262 break;
6263 case HDLC_TXIDLE_ZEROS:
6264 case HDLC_TXIDLE_SPACE:
6265 syncpat = 0x00;
6266 break;
6267 case HDLC_TXIDLE_ONES:
6268 case HDLC_TXIDLE_MARK:
6269 syncpat = 0xff;
6270 break;
6271 case HDLC_TXIDLE_ALT_MARK_SPACE:
6272 syncpat = 0xaa;
6273 break;
6274 }
6275
6276 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6277 }
6278
6279 } /* end of usc_set_txidle() */
6280
6281 /* usc_get_serial_signals()
6282 *
6283 * Query the adapter for the state of the V24 status (input) signals.
6284 *
6285 * Arguments: info pointer to device instance data
6286 * Return Value: None
6287 */
6288 static void usc_get_serial_signals( struct mgsl_struct *info )
6289 {
6290 u16 status;
6291
6292 /* clear all serial signals except DTR and RTS */
6293 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6294
6295 /* Read the Misc Interrupt status Register (MISR) to get */
6296 /* the V24 status signals. */
6297
6298 status = usc_InReg( info, MISR );
6299
6300 /* set serial signal bits to reflect MISR */
6301
6302 if ( status & MISCSTATUS_CTS )
6303 info->serial_signals |= SerialSignal_CTS;
6304
6305 if ( status & MISCSTATUS_DCD )
6306 info->serial_signals |= SerialSignal_DCD;
6307
6308 if ( status & MISCSTATUS_RI )
6309 info->serial_signals |= SerialSignal_RI;
6310
6311 if ( status & MISCSTATUS_DSR )
6312 info->serial_signals |= SerialSignal_DSR;
6313
6314 } /* end of usc_get_serial_signals() */
6315
6316 /* usc_set_serial_signals()
6317 *
6318 * Set the state of DTR and RTS based on contents of
6319 * serial_signals member of device extension.
6320 *
6321 * Arguments: info pointer to device instance data
6322 * Return Value: None
6323 */
6324 static void usc_set_serial_signals( struct mgsl_struct *info )
6325 {
6326 u16 Control;
6327 unsigned char V24Out = info->serial_signals;
6328
6329 /* get the current value of the Port Control Register (PCR) */
6330
6331 Control = usc_InReg( info, PCR );
6332
6333 if ( V24Out & SerialSignal_RTS )
6334 Control &= ~(BIT6);
6335 else
6336 Control |= BIT6;
6337
6338 if ( V24Out & SerialSignal_DTR )
6339 Control &= ~(BIT4);
6340 else
6341 Control |= BIT4;
6342
6343 usc_OutReg( info, PCR, Control );
6344
6345 } /* end of usc_set_serial_signals() */
6346
6347 /* usc_enable_async_clock()
6348 *
6349 * Enable the async clock at the specified frequency.
6350 *
6351 * Arguments: info pointer to device instance data
6352 * data_rate data rate of clock in bps
6353 * 0 disables the AUX clock.
6354 * Return Value: None
6355 */
6356 static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6357 {
6358 if ( data_rate ) {
6359 /*
6360 * Clock mode Control Register (CMCR)
6361 *
6362 * <15..14> 00 counter 1 Disabled
6363 * <13..12> 00 counter 0 Disabled
6364 * <11..10> 11 BRG1 Input is TxC Pin
6365 * <9..8> 11 BRG0 Input is TxC Pin
6366 * <7..6> 01 DPLL Input is BRG1 Output
6367 * <5..3> 100 TxCLK comes from BRG0
6368 * <2..0> 100 RxCLK comes from BRG0
6369 *
6370 * 0000 1111 0110 0100 = 0x0f64
6371 */
6372
6373 usc_OutReg( info, CMCR, 0x0f64 );
6374
6375
6376 /*
6377 * Write 16-bit Time Constant for BRG0
6378 * Time Constant = (ClkSpeed / data_rate) - 1
6379 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6380 */
6381
6382 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6383 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6384 else
6385 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6386
6387
6388 /*
6389 * Hardware Configuration Register (HCR)
6390 * Clear Bit 1, BRG0 mode = Continuous
6391 * Set Bit 0 to enable BRG0.
6392 */
6393
6394 usc_OutReg( info, HCR,
6395 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6396
6397
6398 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6399
6400 usc_OutReg( info, IOCR,
6401 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6402 } else {
6403 /* data rate == 0 so turn off BRG0 */
6404 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6405 }
6406
6407 } /* end of usc_enable_async_clock() */
6408
6409 /*
6410 * Buffer Structures:
6411 *
6412 * Normal memory access uses virtual addresses that can make discontiguous
6413 * physical memory pages appear to be contiguous in the virtual address
6414 * space (the processors memory mapping handles the conversions).
6415 *
6416 * DMA transfers require physically contiguous memory. This is because
6417 * the DMA system controller and DMA bus masters deal with memory using
6418 * only physical addresses.
6419 *
6420 * This causes a problem under Windows NT when large DMA buffers are
6421 * needed. Fragmentation of the nonpaged pool prevents allocations of
6422 * physically contiguous buffers larger than the PAGE_SIZE.
6423 *
6424 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6425 * allows DMA transfers to physically discontiguous buffers. Information
6426 * about each data transfer buffer is contained in a memory structure
6427 * called a 'buffer entry'. A list of buffer entries is maintained
6428 * to track and control the use of the data transfer buffers.
6429 *
6430 * To support this strategy we will allocate sufficient PAGE_SIZE
6431 * contiguous memory buffers to allow for the total required buffer
6432 * space.
6433 *
6434 * The 16C32 accesses the list of buffer entries using Bus Master
6435 * DMA. Control information is read from the buffer entries by the
6436 * 16C32 to control data transfers. status information is written to
6437 * the buffer entries by the 16C32 to indicate the status of completed
6438 * transfers.
6439 *
6440 * The CPU writes control information to the buffer entries to control
6441 * the 16C32 and reads status information from the buffer entries to
6442 * determine information about received and transmitted frames.
6443 *
6444 * Because the CPU and 16C32 (adapter) both need simultaneous access
6445 * to the buffer entries, the buffer entry memory is allocated with
6446 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6447 * entry list to PAGE_SIZE.
6448 *
6449 * The actual data buffers on the other hand will only be accessed
6450 * by the CPU or the adapter but not by both simultaneously. This allows
6451 * Scatter/Gather packet based DMA procedures for using physically
6452 * discontiguous pages.
6453 */
6454
6455 /*
6456 * mgsl_reset_tx_dma_buffers()
6457 *
6458 * Set the count for all transmit buffers to 0 to indicate the
6459 * buffer is available for use and set the current buffer to the
6460 * first buffer. This effectively makes all buffers free and
6461 * discards any data in buffers.
6462 *
6463 * Arguments: info pointer to device instance data
6464 * Return Value: None
6465 */
6466 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6467 {
6468 unsigned int i;
6469
6470 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6471 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6472 }
6473
6474 info->current_tx_buffer = 0;
6475 info->start_tx_dma_buffer = 0;
6476 info->tx_dma_buffers_used = 0;
6477
6478 info->get_tx_holding_index = 0;
6479 info->put_tx_holding_index = 0;
6480 info->tx_holding_count = 0;
6481
6482 } /* end of mgsl_reset_tx_dma_buffers() */
6483
6484 /*
6485 * num_free_tx_dma_buffers()
6486 *
6487 * returns the number of free tx dma buffers available
6488 *
6489 * Arguments: info pointer to device instance data
6490 * Return Value: number of free tx dma buffers
6491 */
6492 static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6493 {
6494 return info->tx_buffer_count - info->tx_dma_buffers_used;
6495 }
6496
6497 /*
6498 * mgsl_reset_rx_dma_buffers()
6499 *
6500 * Set the count for all receive buffers to DMABUFFERSIZE
6501 * and set the current buffer to the first buffer. This effectively
6502 * makes all buffers free and discards any data in buffers.
6503 *
6504 * Arguments: info pointer to device instance data
6505 * Return Value: None
6506 */
6507 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6508 {
6509 unsigned int i;
6510
6511 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6512 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6513 // info->rx_buffer_list[i].count = DMABUFFERSIZE;
6514 // info->rx_buffer_list[i].status = 0;
6515 }
6516
6517 info->current_rx_buffer = 0;
6518
6519 } /* end of mgsl_reset_rx_dma_buffers() */
6520
6521 /*
6522 * mgsl_free_rx_frame_buffers()
6523 *
6524 * Free the receive buffers used by a received SDLC
6525 * frame such that the buffers can be reused.
6526 *
6527 * Arguments:
6528 *
6529 * info pointer to device instance data
6530 * StartIndex index of 1st receive buffer of frame
6531 * EndIndex index of last receive buffer of frame
6532 *
6533 * Return Value: None
6534 */
6535 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6536 {
6537 int Done = 0;
6538 DMABUFFERENTRY *pBufEntry;
6539 unsigned int Index;
6540
6541 /* Starting with 1st buffer entry of the frame clear the status */
6542 /* field and set the count field to DMA Buffer Size. */
6543
6544 Index = StartIndex;
6545
6546 while( !Done ) {
6547 pBufEntry = &(info->rx_buffer_list[Index]);
6548
6549 if ( Index == EndIndex ) {
6550 /* This is the last buffer of the frame! */
6551 Done = 1;
6552 }
6553
6554 /* reset current buffer for reuse */
6555 // pBufEntry->status = 0;
6556 // pBufEntry->count = DMABUFFERSIZE;
6557 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6558
6559 /* advance to next buffer entry in linked list */
6560 Index++;
6561 if ( Index == info->rx_buffer_count )
6562 Index = 0;
6563 }
6564
6565 /* set current buffer to next buffer after last buffer of frame */
6566 info->current_rx_buffer = Index;
6567
6568 } /* end of free_rx_frame_buffers() */
6569
6570 /* mgsl_get_rx_frame()
6571 *
6572 * This function attempts to return a received SDLC frame from the
6573 * receive DMA buffers. Only frames received without errors are returned.
6574 *
6575 * Arguments: info pointer to device extension
6576 * Return Value: 1 if frame returned, otherwise 0
6577 */
6578 static int mgsl_get_rx_frame(struct mgsl_struct *info)
6579 {
6580 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6581 unsigned short status;
6582 DMABUFFERENTRY *pBufEntry;
6583 unsigned int framesize = 0;
6584 int ReturnCode = 0;
6585 unsigned long flags;
6586 struct tty_struct *tty = info->tty;
6587 int return_frame = 0;
6588
6589 /*
6590 * current_rx_buffer points to the 1st buffer of the next available
6591 * receive frame. To find the last buffer of the frame look for
6592 * a non-zero status field in the buffer entries. (The status
6593 * field is set by the 16C32 after completing a receive frame.
6594 */
6595
6596 StartIndex = EndIndex = info->current_rx_buffer;
6597
6598 while( !info->rx_buffer_list[EndIndex].status ) {
6599 /*
6600 * If the count field of the buffer entry is non-zero then
6601 * this buffer has not been used. (The 16C32 clears the count
6602 * field when it starts using the buffer.) If an unused buffer
6603 * is encountered then there are no frames available.
6604 */
6605
6606 if ( info->rx_buffer_list[EndIndex].count )
6607 goto Cleanup;
6608
6609 /* advance to next buffer entry in linked list */
6610 EndIndex++;
6611 if ( EndIndex == info->rx_buffer_count )
6612 EndIndex = 0;
6613
6614 /* if entire list searched then no frame available */
6615 if ( EndIndex == StartIndex ) {
6616 /* If this occurs then something bad happened,
6617 * all buffers have been 'used' but none mark
6618 * the end of a frame. Reset buffers and receiver.
6619 */
6620
6621 if ( info->rx_enabled ){
6622 spin_lock_irqsave(&info->irq_spinlock,flags);
6623 usc_start_receiver(info);
6624 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6625 }
6626 goto Cleanup;
6627 }
6628 }
6629
6630
6631 /* check status of receive frame */
6632
6633 status = info->rx_buffer_list[EndIndex].status;
6634
6635 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6636 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6637 if ( status & RXSTATUS_SHORT_FRAME )
6638 info->icount.rxshort++;
6639 else if ( status & RXSTATUS_ABORT )
6640 info->icount.rxabort++;
6641 else if ( status & RXSTATUS_OVERRUN )
6642 info->icount.rxover++;
6643 else {
6644 info->icount.rxcrc++;
6645 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6646 return_frame = 1;
6647 }
6648 framesize = 0;
6649 #ifdef CONFIG_HDLC
6650 {
6651 struct net_device_stats *stats = hdlc_stats(info->netdev);
6652 stats->rx_errors++;
6653 stats->rx_frame_errors++;
6654 }
6655 #endif
6656 } else
6657 return_frame = 1;
6658
6659 if ( return_frame ) {
6660 /* receive frame has no errors, get frame size.
6661 * The frame size is the starting value of the RCC (which was
6662 * set to 0xffff) minus the ending value of the RCC (decremented
6663 * once for each receive character) minus 2 for the 16-bit CRC.
6664 */
6665
6666 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6667
6668 /* adjust frame size for CRC if any */
6669 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6670 framesize -= 2;
6671 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6672 framesize -= 4;
6673 }
6674
6675 if ( debug_level >= DEBUG_LEVEL_BH )
6676 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6677 __FILE__,__LINE__,info->device_name,status,framesize);
6678
6679 if ( debug_level >= DEBUG_LEVEL_DATA )
6680 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6681 min_t(int, framesize, DMABUFFERSIZE),0);
6682
6683 if (framesize) {
6684 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6685 ((framesize+1) > info->max_frame_size) ) ||
6686 (framesize > info->max_frame_size) )
6687 info->icount.rxlong++;
6688 else {
6689 /* copy dma buffer(s) to contiguous intermediate buffer */
6690 int copy_count = framesize;
6691 int index = StartIndex;
6692 unsigned char *ptmp = info->intermediate_rxbuffer;
6693
6694 if ( !(status & RXSTATUS_CRC_ERROR))
6695 info->icount.rxok++;
6696
6697 while(copy_count) {
6698 int partial_count;
6699 if ( copy_count > DMABUFFERSIZE )
6700 partial_count = DMABUFFERSIZE;
6701 else
6702 partial_count = copy_count;
6703
6704 pBufEntry = &(info->rx_buffer_list[index]);
6705 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6706 ptmp += partial_count;
6707 copy_count -= partial_count;
6708
6709 if ( ++index == info->rx_buffer_count )
6710 index = 0;
6711 }
6712
6713 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6714 ++framesize;
6715 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6716 RX_CRC_ERROR :
6717 RX_OK);
6718
6719 if ( debug_level >= DEBUG_LEVEL_DATA )
6720 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6721 __FILE__,__LINE__,info->device_name,
6722 *ptmp);
6723 }
6724
6725 #ifdef CONFIG_HDLC
6726 if (info->netcount)
6727 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6728 else
6729 #endif
6730 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6731 }
6732 }
6733 /* Free the buffers used by this frame. */
6734 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6735
6736 ReturnCode = 1;
6737
6738 Cleanup:
6739
6740 if ( info->rx_enabled && info->rx_overflow ) {
6741 /* The receiver needs to restarted because of
6742 * a receive overflow (buffer or FIFO). If the
6743 * receive buffers are now empty, then restart receiver.
6744 */
6745
6746 if ( !info->rx_buffer_list[EndIndex].status &&
6747 info->rx_buffer_list[EndIndex].count ) {
6748 spin_lock_irqsave(&info->irq_spinlock,flags);
6749 usc_start_receiver(info);
6750 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6751 }
6752 }
6753
6754 return ReturnCode;
6755
6756 } /* end of mgsl_get_rx_frame() */
6757
6758 /* mgsl_get_raw_rx_frame()
6759 *
6760 * This function attempts to return a received frame from the
6761 * receive DMA buffers when running in external loop mode. In this mode,
6762 * we will return at most one DMABUFFERSIZE frame to the application.
6763 * The USC receiver is triggering off of DCD going active to start a new
6764 * frame, and DCD going inactive to terminate the frame (similar to
6765 * processing a closing flag character).
6766 *
6767 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6768 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6769 * status field and the RCC field will indicate the length of the
6770 * entire received frame. We take this RCC field and get the modulus
6771 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6772 * last Rx DMA buffer and return that last portion of the frame.
6773 *
6774 * Arguments: info pointer to device extension
6775 * Return Value: 1 if frame returned, otherwise 0
6776 */
6777 static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6778 {
6779 unsigned int CurrentIndex, NextIndex;
6780 unsigned short status;
6781 DMABUFFERENTRY *pBufEntry;
6782 unsigned int framesize = 0;
6783 int ReturnCode = 0;
6784 unsigned long flags;
6785 struct tty_struct *tty = info->tty;
6786
6787 /*
6788 * current_rx_buffer points to the 1st buffer of the next available
6789 * receive frame. The status field is set by the 16C32 after
6790 * completing a receive frame. If the status field of this buffer
6791 * is zero, either the USC is still filling this buffer or this
6792 * is one of a series of buffers making up a received frame.
6793 *
6794 * If the count field of this buffer is zero, the USC is either
6795 * using this buffer or has used this buffer. Look at the count
6796 * field of the next buffer. If that next buffer's count is
6797 * non-zero, the USC is still actively using the current buffer.
6798 * Otherwise, if the next buffer's count field is zero, the
6799 * current buffer is complete and the USC is using the next
6800 * buffer.
6801 */
6802 CurrentIndex = NextIndex = info->current_rx_buffer;
6803 ++NextIndex;
6804 if ( NextIndex == info->rx_buffer_count )
6805 NextIndex = 0;
6806
6807 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6808 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6809 info->rx_buffer_list[NextIndex].count == 0)) {
6810 /*
6811 * Either the status field of this dma buffer is non-zero
6812 * (indicating the last buffer of a receive frame) or the next
6813 * buffer is marked as in use -- implying this buffer is complete
6814 * and an intermediate buffer for this received frame.
6815 */
6816
6817 status = info->rx_buffer_list[CurrentIndex].status;
6818
6819 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6820 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6821 if ( status & RXSTATUS_SHORT_FRAME )
6822 info->icount.rxshort++;
6823 else if ( status & RXSTATUS_ABORT )
6824 info->icount.rxabort++;
6825 else if ( status & RXSTATUS_OVERRUN )
6826 info->icount.rxover++;
6827 else
6828 info->icount.rxcrc++;
6829 framesize = 0;
6830 } else {
6831 /*
6832 * A receive frame is available, get frame size and status.
6833 *
6834 * The frame size is the starting value of the RCC (which was
6835 * set to 0xffff) minus the ending value of the RCC (decremented
6836 * once for each receive character) minus 2 or 4 for the 16-bit
6837 * or 32-bit CRC.
6838 *
6839 * If the status field is zero, this is an intermediate buffer.
6840 * It's size is 4K.
6841 *
6842 * If the DMA Buffer Entry's Status field is non-zero, the
6843 * receive operation completed normally (ie: DCD dropped). The
6844 * RCC field is valid and holds the received frame size.
6845 * It is possible that the RCC field will be zero on a DMA buffer
6846 * entry with a non-zero status. This can occur if the total
6847 * frame size (number of bytes between the time DCD goes active
6848 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6849 * case the 16C32 has underrun on the RCC count and appears to
6850 * stop updating this counter to let us know the actual received
6851 * frame size. If this happens (non-zero status and zero RCC),
6852 * simply return the entire RxDMA Buffer
6853 */
6854 if ( status ) {
6855 /*
6856 * In the event that the final RxDMA Buffer is
6857 * terminated with a non-zero status and the RCC
6858 * field is zero, we interpret this as the RCC
6859 * having underflowed (received frame > 65535 bytes).
6860 *
6861 * Signal the event to the user by passing back
6862 * a status of RxStatus_CrcError returning the full
6863 * buffer and let the app figure out what data is
6864 * actually valid
6865 */
6866 if ( info->rx_buffer_list[CurrentIndex].rcc )
6867 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6868 else
6869 framesize = DMABUFFERSIZE;
6870 }
6871 else
6872 framesize = DMABUFFERSIZE;
6873 }
6874
6875 if ( framesize > DMABUFFERSIZE ) {
6876 /*
6877 * if running in raw sync mode, ISR handler for
6878 * End Of Buffer events terminates all buffers at 4K.
6879 * If this frame size is said to be >4K, get the
6880 * actual number of bytes of the frame in this buffer.
6881 */
6882 framesize = framesize % DMABUFFERSIZE;
6883 }
6884
6885
6886 if ( debug_level >= DEBUG_LEVEL_BH )
6887 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6888 __FILE__,__LINE__,info->device_name,status,framesize);
6889
6890 if ( debug_level >= DEBUG_LEVEL_DATA )
6891 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6892 min_t(int, framesize, DMABUFFERSIZE),0);
6893
6894 if (framesize) {
6895 /* copy dma buffer(s) to contiguous intermediate buffer */
6896 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6897
6898 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6899 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6900 info->icount.rxok++;
6901
6902 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6903 }
6904
6905 /* Free the buffers used by this frame. */
6906 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6907
6908 ReturnCode = 1;
6909 }
6910
6911
6912 if ( info->rx_enabled && info->rx_overflow ) {
6913 /* The receiver needs to restarted because of
6914 * a receive overflow (buffer or FIFO). If the
6915 * receive buffers are now empty, then restart receiver.
6916 */
6917
6918 if ( !info->rx_buffer_list[CurrentIndex].status &&
6919 info->rx_buffer_list[CurrentIndex].count ) {
6920 spin_lock_irqsave(&info->irq_spinlock,flags);
6921 usc_start_receiver(info);
6922 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6923 }
6924 }
6925
6926 return ReturnCode;
6927
6928 } /* end of mgsl_get_raw_rx_frame() */
6929
6930 /* mgsl_load_tx_dma_buffer()
6931 *
6932 * Load the transmit DMA buffer with the specified data.
6933 *
6934 * Arguments:
6935 *
6936 * info pointer to device extension
6937 * Buffer pointer to buffer containing frame to load
6938 * BufferSize size in bytes of frame in Buffer
6939 *
6940 * Return Value: None
6941 */
6942 static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6943 const char *Buffer, unsigned int BufferSize)
6944 {
6945 unsigned short Copycount;
6946 unsigned int i = 0;
6947 DMABUFFERENTRY *pBufEntry;
6948
6949 if ( debug_level >= DEBUG_LEVEL_DATA )
6950 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6951
6952 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6953 /* set CMR:13 to start transmit when
6954 * next GoAhead (abort) is received
6955 */
6956 info->cmr_value |= BIT13;
6957 }
6958
6959 /* begin loading the frame in the next available tx dma
6960 * buffer, remember it's starting location for setting
6961 * up tx dma operation
6962 */
6963 i = info->current_tx_buffer;
6964 info->start_tx_dma_buffer = i;
6965
6966 /* Setup the status and RCC (Frame Size) fields of the 1st */
6967 /* buffer entry in the transmit DMA buffer list. */
6968
6969 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6970 info->tx_buffer_list[i].rcc = BufferSize;
6971 info->tx_buffer_list[i].count = BufferSize;
6972
6973 /* Copy frame data from 1st source buffer to the DMA buffers. */
6974 /* The frame data may span multiple DMA buffers. */
6975
6976 while( BufferSize ){
6977 /* Get a pointer to next DMA buffer entry. */
6978 pBufEntry = &info->tx_buffer_list[i++];
6979
6980 if ( i == info->tx_buffer_count )
6981 i=0;
6982
6983 /* Calculate the number of bytes that can be copied from */
6984 /* the source buffer to this DMA buffer. */
6985 if ( BufferSize > DMABUFFERSIZE )
6986 Copycount = DMABUFFERSIZE;
6987 else
6988 Copycount = BufferSize;
6989
6990 /* Actually copy data from source buffer to DMA buffer. */
6991 /* Also set the data count for this individual DMA buffer. */
6992 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6993 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6994 else
6995 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6996
6997 pBufEntry->count = Copycount;
6998
6999 /* Advance source pointer and reduce remaining data count. */
7000 Buffer += Copycount;
7001 BufferSize -= Copycount;
7002
7003 ++info->tx_dma_buffers_used;
7004 }
7005
7006 /* remember next available tx dma buffer */
7007 info->current_tx_buffer = i;
7008
7009 } /* end of mgsl_load_tx_dma_buffer() */
7010
7011 /*
7012 * mgsl_register_test()
7013 *
7014 * Performs a register test of the 16C32.
7015 *
7016 * Arguments: info pointer to device instance data
7017 * Return Value: TRUE if test passed, otherwise FALSE
7018 */
7019 static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
7020 {
7021 static unsigned short BitPatterns[] =
7022 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
7023 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
7024 unsigned int i;
7025 BOOLEAN rc = TRUE;
7026 unsigned long flags;
7027
7028 spin_lock_irqsave(&info->irq_spinlock,flags);
7029 usc_reset(info);
7030
7031 /* Verify the reset state of some registers. */
7032
7033 if ( (usc_InReg( info, SICR ) != 0) ||
7034 (usc_InReg( info, IVR ) != 0) ||
7035 (usc_InDmaReg( info, DIVR ) != 0) ){
7036 rc = FALSE;
7037 }
7038
7039 if ( rc == TRUE ){
7040 /* Write bit patterns to various registers but do it out of */
7041 /* sync, then read back and verify values. */
7042
7043 for ( i = 0 ; i < Patterncount ; i++ ) {
7044 usc_OutReg( info, TC0R, BitPatterns[i] );
7045 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
7046 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
7047 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
7048 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
7049 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
7050
7051 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
7052 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
7053 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
7054 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
7055 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
7056 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
7057 rc = FALSE;
7058 break;
7059 }
7060 }
7061 }
7062
7063 usc_reset(info);
7064 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7065
7066 return rc;
7067
7068 } /* end of mgsl_register_test() */
7069
7070 /* mgsl_irq_test() Perform interrupt test of the 16C32.
7071 *
7072 * Arguments: info pointer to device instance data
7073 * Return Value: TRUE if test passed, otherwise FALSE
7074 */
7075 static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
7076 {
7077 unsigned long EndTime;
7078 unsigned long flags;
7079
7080 spin_lock_irqsave(&info->irq_spinlock,flags);
7081 usc_reset(info);
7082
7083 /*
7084 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7085 * The ISR sets irq_occurred to 1.
7086 */
7087
7088 info->irq_occurred = FALSE;
7089
7090 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7091 /* Enable INTEN (Port 6, Bit12) */
7092 /* This connects the IRQ request signal to the ISA bus */
7093 /* on the ISA adapter. This has no effect for the PCI adapter */
7094 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7095
7096 usc_EnableMasterIrqBit(info);
7097 usc_EnableInterrupts(info, IO_PIN);
7098 usc_ClearIrqPendingBits(info, IO_PIN);
7099
7100 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7101 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7102
7103 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7104
7105 EndTime=100;
7106 while( EndTime-- && !info->irq_occurred ) {
7107 msleep_interruptible(10);
7108 }
7109
7110 spin_lock_irqsave(&info->irq_spinlock,flags);
7111 usc_reset(info);
7112 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7113
7114 if ( !info->irq_occurred )
7115 return FALSE;
7116 else
7117 return TRUE;
7118
7119 } /* end of mgsl_irq_test() */
7120
7121 /* mgsl_dma_test()
7122 *
7123 * Perform a DMA test of the 16C32. A small frame is
7124 * transmitted via DMA from a transmit buffer to a receive buffer
7125 * using single buffer DMA mode.
7126 *
7127 * Arguments: info pointer to device instance data
7128 * Return Value: TRUE if test passed, otherwise FALSE
7129 */
7130 static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
7131 {
7132 unsigned short FifoLevel;
7133 unsigned long phys_addr;
7134 unsigned int FrameSize;
7135 unsigned int i;
7136 char *TmpPtr;
7137 BOOLEAN rc = TRUE;
7138 unsigned short status=0;
7139 unsigned long EndTime;
7140 unsigned long flags;
7141 MGSL_PARAMS tmp_params;
7142
7143 /* save current port options */
7144 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7145 /* load default port options */
7146 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7147
7148 #define TESTFRAMESIZE 40
7149
7150 spin_lock_irqsave(&info->irq_spinlock,flags);
7151
7152 /* setup 16C32 for SDLC DMA transfer mode */
7153
7154 usc_reset(info);
7155 usc_set_sdlc_mode(info);
7156 usc_enable_loopback(info,1);
7157
7158 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7159 * field of the buffer entry after fetching buffer address. This
7160 * way we can detect a DMA failure for a DMA read (which should be
7161 * non-destructive to system memory) before we try and write to
7162 * memory (where a failure could corrupt system memory).
7163 */
7164
7165 /* Receive DMA mode Register (RDMR)
7166 *
7167 * <15..14> 11 DMA mode = Linked List Buffer mode
7168 * <13> 1 RSBinA/L = store Rx status Block in List entry
7169 * <12> 0 1 = Clear count of List Entry after fetching
7170 * <11..10> 00 Address mode = Increment
7171 * <9> 1 Terminate Buffer on RxBound
7172 * <8> 0 Bus Width = 16bits
7173 * <7..0> ? status Bits (write as 0s)
7174 *
7175 * 1110 0010 0000 0000 = 0xe200
7176 */
7177
7178 usc_OutDmaReg( info, RDMR, 0xe200 );
7179
7180 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7181
7182
7183 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7184
7185 FrameSize = TESTFRAMESIZE;
7186
7187 /* setup 1st transmit buffer entry: */
7188 /* with frame size and transmit control word */
7189
7190 info->tx_buffer_list[0].count = FrameSize;
7191 info->tx_buffer_list[0].rcc = FrameSize;
7192 info->tx_buffer_list[0].status = 0x4000;
7193
7194 /* build a transmit frame in 1st transmit DMA buffer */
7195
7196 TmpPtr = info->tx_buffer_list[0].virt_addr;
7197 for (i = 0; i < FrameSize; i++ )
7198 *TmpPtr++ = i;
7199
7200 /* setup 1st receive buffer entry: */
7201 /* clear status, set max receive buffer size */
7202
7203 info->rx_buffer_list[0].status = 0;
7204 info->rx_buffer_list[0].count = FrameSize + 4;
7205
7206 /* zero out the 1st receive buffer */
7207
7208 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7209
7210 /* Set count field of next buffer entries to prevent */
7211 /* 16C32 from using buffers after the 1st one. */
7212
7213 info->tx_buffer_list[1].count = 0;
7214 info->rx_buffer_list[1].count = 0;
7215
7216
7217 /***************************/
7218 /* Program 16C32 receiver. */
7219 /***************************/
7220
7221 spin_lock_irqsave(&info->irq_spinlock,flags);
7222
7223 /* setup DMA transfers */
7224 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7225
7226 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7227 phys_addr = info->rx_buffer_list[0].phys_entry;
7228 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7229 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7230
7231 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7232 usc_InDmaReg( info, RDMR );
7233 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7234
7235 /* Enable Receiver (RMR <1..0> = 10) */
7236 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7237
7238 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7239
7240
7241 /*************************************************************/
7242 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7243 /*************************************************************/
7244
7245 /* Wait 100ms for interrupt. */
7246 EndTime = jiffies + msecs_to_jiffies(100);
7247
7248 for(;;) {
7249 if (time_after(jiffies, EndTime)) {
7250 rc = FALSE;
7251 break;
7252 }
7253
7254 spin_lock_irqsave(&info->irq_spinlock,flags);
7255 status = usc_InDmaReg( info, RDMR );
7256 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7257
7258 if ( !(status & BIT4) && (status & BIT5) ) {
7259 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7260 /* BUSY (BIT 5) is active (channel still active). */
7261 /* This means the buffer entry read has completed. */
7262 break;
7263 }
7264 }
7265
7266
7267 /******************************/
7268 /* Program 16C32 transmitter. */
7269 /******************************/
7270
7271 spin_lock_irqsave(&info->irq_spinlock,flags);
7272
7273 /* Program the Transmit Character Length Register (TCLR) */
7274 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7275
7276 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7277 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7278
7279 /* Program the address of the 1st DMA Buffer Entry in linked list */
7280
7281 phys_addr = info->tx_buffer_list[0].phys_entry;
7282 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7283 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7284
7285 /* unlatch Tx status bits, and start transmit channel. */
7286
7287 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7288 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7289
7290 /* wait for DMA controller to fill transmit FIFO */
7291
7292 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7293
7294 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7295
7296
7297 /**********************************/
7298 /* WAIT FOR TRANSMIT FIFO TO FILL */
7299 /**********************************/
7300
7301 /* Wait 100ms */
7302 EndTime = jiffies + msecs_to_jiffies(100);
7303
7304 for(;;) {
7305 if (time_after(jiffies, EndTime)) {
7306 rc = FALSE;
7307 break;
7308 }
7309
7310 spin_lock_irqsave(&info->irq_spinlock,flags);
7311 FifoLevel = usc_InReg(info, TICR) >> 8;
7312 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7313
7314 if ( FifoLevel < 16 )
7315 break;
7316 else
7317 if ( FrameSize < 32 ) {
7318 /* This frame is smaller than the entire transmit FIFO */
7319 /* so wait for the entire frame to be loaded. */
7320 if ( FifoLevel <= (32 - FrameSize) )
7321 break;
7322 }
7323 }
7324
7325
7326 if ( rc == TRUE )
7327 {
7328 /* Enable 16C32 transmitter. */
7329
7330 spin_lock_irqsave(&info->irq_spinlock,flags);
7331
7332 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7333 usc_TCmd( info, TCmd_SendFrame );
7334 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7335
7336 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7337
7338
7339 /******************************/
7340 /* WAIT FOR TRANSMIT COMPLETE */
7341 /******************************/
7342
7343 /* Wait 100ms */
7344 EndTime = jiffies + msecs_to_jiffies(100);
7345
7346 /* While timer not expired wait for transmit complete */
7347
7348 spin_lock_irqsave(&info->irq_spinlock,flags);
7349 status = usc_InReg( info, TCSR );
7350 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7351
7352 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7353 if (time_after(jiffies, EndTime)) {
7354 rc = FALSE;
7355 break;
7356 }
7357
7358 spin_lock_irqsave(&info->irq_spinlock,flags);
7359 status = usc_InReg( info, TCSR );
7360 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7361 }
7362 }
7363
7364
7365 if ( rc == TRUE ){
7366 /* CHECK FOR TRANSMIT ERRORS */
7367 if ( status & (BIT5 + BIT1) )
7368 rc = FALSE;
7369 }
7370
7371 if ( rc == TRUE ) {
7372 /* WAIT FOR RECEIVE COMPLETE */
7373
7374 /* Wait 100ms */
7375 EndTime = jiffies + msecs_to_jiffies(100);
7376
7377 /* Wait for 16C32 to write receive status to buffer entry. */
7378 status=info->rx_buffer_list[0].status;
7379 while ( status == 0 ) {
7380 if (time_after(jiffies, EndTime)) {
7381 rc = FALSE;
7382 break;
7383 }
7384 status=info->rx_buffer_list[0].status;
7385 }
7386 }
7387
7388
7389 if ( rc == TRUE ) {
7390 /* CHECK FOR RECEIVE ERRORS */
7391 status = info->rx_buffer_list[0].status;
7392
7393 if ( status & (BIT8 + BIT3 + BIT1) ) {
7394 /* receive error has occurred */
7395 rc = FALSE;
7396 } else {
7397 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7398 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7399 rc = FALSE;
7400 }
7401 }
7402 }
7403
7404 spin_lock_irqsave(&info->irq_spinlock,flags);
7405 usc_reset( info );
7406 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7407
7408 /* restore current port options */
7409 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7410
7411 return rc;
7412
7413 } /* end of mgsl_dma_test() */
7414
7415 /* mgsl_adapter_test()
7416 *
7417 * Perform the register, IRQ, and DMA tests for the 16C32.
7418 *
7419 * Arguments: info pointer to device instance data
7420 * Return Value: 0 if success, otherwise -ENODEV
7421 */
7422 static int mgsl_adapter_test( struct mgsl_struct *info )
7423 {
7424 if ( debug_level >= DEBUG_LEVEL_INFO )
7425 printk( "%s(%d):Testing device %s\n",
7426 __FILE__,__LINE__,info->device_name );
7427
7428 if ( !mgsl_register_test( info ) ) {
7429 info->init_error = DiagStatus_AddressFailure;
7430 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7431 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7432 return -ENODEV;
7433 }
7434
7435 if ( !mgsl_irq_test( info ) ) {
7436 info->init_error = DiagStatus_IrqFailure;
7437 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7438 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7439 return -ENODEV;
7440 }
7441
7442 if ( !mgsl_dma_test( info ) ) {
7443 info->init_error = DiagStatus_DmaFailure;
7444 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7445 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7446 return -ENODEV;
7447 }
7448
7449 if ( debug_level >= DEBUG_LEVEL_INFO )
7450 printk( "%s(%d):device %s passed diagnostics\n",
7451 __FILE__,__LINE__,info->device_name );
7452
7453 return 0;
7454
7455 } /* end of mgsl_adapter_test() */
7456
7457 /* mgsl_memory_test()
7458 *
7459 * Test the shared memory on a PCI adapter.
7460 *
7461 * Arguments: info pointer to device instance data
7462 * Return Value: TRUE if test passed, otherwise FALSE
7463 */
7464 static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
7465 {
7466 static unsigned long BitPatterns[] =
7467 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7468 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7469 unsigned long i;
7470 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7471 unsigned long * TestAddr;
7472
7473 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7474 return TRUE;
7475
7476 TestAddr = (unsigned long *)info->memory_base;
7477
7478 /* Test data lines with test pattern at one location. */
7479
7480 for ( i = 0 ; i < Patterncount ; i++ ) {
7481 *TestAddr = BitPatterns[i];
7482 if ( *TestAddr != BitPatterns[i] )
7483 return FALSE;
7484 }
7485
7486 /* Test address lines with incrementing pattern over */
7487 /* entire address range. */
7488
7489 for ( i = 0 ; i < TestLimit ; i++ ) {
7490 *TestAddr = i * 4;
7491 TestAddr++;
7492 }
7493
7494 TestAddr = (unsigned long *)info->memory_base;
7495
7496 for ( i = 0 ; i < TestLimit ; i++ ) {
7497 if ( *TestAddr != i * 4 )
7498 return FALSE;
7499 TestAddr++;
7500 }
7501
7502 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7503
7504 return TRUE;
7505
7506 } /* End Of mgsl_memory_test() */
7507
7508
7509 /* mgsl_load_pci_memory()
7510 *
7511 * Load a large block of data into the PCI shared memory.
7512 * Use this instead of memcpy() or memmove() to move data
7513 * into the PCI shared memory.
7514 *
7515 * Notes:
7516 *
7517 * This function prevents the PCI9050 interface chip from hogging
7518 * the adapter local bus, which can starve the 16C32 by preventing
7519 * 16C32 bus master cycles.
7520 *
7521 * The PCI9050 documentation says that the 9050 will always release
7522 * control of the local bus after completing the current read
7523 * or write operation.
7524 *
7525 * It appears that as long as the PCI9050 write FIFO is full, the
7526 * PCI9050 treats all of the writes as a single burst transaction
7527 * and will not release the bus. This causes DMA latency problems
7528 * at high speeds when copying large data blocks to the shared
7529 * memory.
7530 *
7531 * This function in effect, breaks the a large shared memory write
7532 * into multiple transations by interleaving a shared memory read
7533 * which will flush the write FIFO and 'complete' the write
7534 * transation. This allows any pending DMA request to gain control
7535 * of the local bus in a timely fasion.
7536 *
7537 * Arguments:
7538 *
7539 * TargetPtr pointer to target address in PCI shared memory
7540 * SourcePtr pointer to source buffer for data
7541 * count count in bytes of data to copy
7542 *
7543 * Return Value: None
7544 */
7545 static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7546 unsigned short count )
7547 {
7548 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7549 #define PCI_LOAD_INTERVAL 64
7550
7551 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7552 unsigned short Index;
7553 unsigned long Dummy;
7554
7555 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7556 {
7557 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7558 Dummy = *((volatile unsigned long *)TargetPtr);
7559 TargetPtr += PCI_LOAD_INTERVAL;
7560 SourcePtr += PCI_LOAD_INTERVAL;
7561 }
7562
7563 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7564
7565 } /* End Of mgsl_load_pci_memory() */
7566
7567 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7568 {
7569 int i;
7570 int linecount;
7571 if (xmit)
7572 printk("%s tx data:\n",info->device_name);
7573 else
7574 printk("%s rx data:\n",info->device_name);
7575
7576 while(count) {
7577 if (count > 16)
7578 linecount = 16;
7579 else
7580 linecount = count;
7581
7582 for(i=0;i<linecount;i++)
7583 printk("%02X ",(unsigned char)data[i]);
7584 for(;i<17;i++)
7585 printk(" ");
7586 for(i=0;i<linecount;i++) {
7587 if (data[i]>=040 && data[i]<=0176)
7588 printk("%c",data[i]);
7589 else
7590 printk(".");
7591 }
7592 printk("\n");
7593
7594 data += linecount;
7595 count -= linecount;
7596 }
7597 } /* end of mgsl_trace_block() */
7598
7599 /* mgsl_tx_timeout()
7600 *
7601 * called when HDLC frame times out
7602 * update stats and do tx completion processing
7603 *
7604 * Arguments: context pointer to device instance data
7605 * Return Value: None
7606 */
7607 static void mgsl_tx_timeout(unsigned long context)
7608 {
7609 struct mgsl_struct *info = (struct mgsl_struct*)context;
7610 unsigned long flags;
7611
7612 if ( debug_level >= DEBUG_LEVEL_INFO )
7613 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7614 __FILE__,__LINE__,info->device_name);
7615 if(info->tx_active &&
7616 (info->params.mode == MGSL_MODE_HDLC ||
7617 info->params.mode == MGSL_MODE_RAW) ) {
7618 info->icount.txtimeout++;
7619 }
7620 spin_lock_irqsave(&info->irq_spinlock,flags);
7621 info->tx_active = 0;
7622 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7623
7624 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7625 usc_loopmode_cancel_transmit( info );
7626
7627 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7628
7629 #ifdef CONFIG_HDLC
7630 if (info->netcount)
7631 hdlcdev_tx_done(info);
7632 else
7633 #endif
7634 mgsl_bh_transmit(info);
7635
7636 } /* end of mgsl_tx_timeout() */
7637
7638 /* signal that there are no more frames to send, so that
7639 * line is 'released' by echoing RxD to TxD when current
7640 * transmission is complete (or immediately if no tx in progress).
7641 */
7642 static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7643 {
7644 unsigned long flags;
7645
7646 spin_lock_irqsave(&info->irq_spinlock,flags);
7647 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7648 if (info->tx_active)
7649 info->loopmode_send_done_requested = TRUE;
7650 else
7651 usc_loopmode_send_done(info);
7652 }
7653 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7654
7655 return 0;
7656 }
7657
7658 /* release the line by echoing RxD to TxD
7659 * upon completion of a transmit frame
7660 */
7661 static void usc_loopmode_send_done( struct mgsl_struct * info )
7662 {
7663 info->loopmode_send_done_requested = FALSE;
7664 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7665 info->cmr_value &= ~BIT13;
7666 usc_OutReg(info, CMR, info->cmr_value);
7667 }
7668
7669 /* abort a transmit in progress while in HDLC LoopMode
7670 */
7671 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7672 {
7673 /* reset tx dma channel and purge TxFifo */
7674 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7675 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7676 usc_loopmode_send_done( info );
7677 }
7678
7679 /* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7680 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7681 * we must clear CMR:13 to begin repeating TxData to RxData
7682 */
7683 static void usc_loopmode_insert_request( struct mgsl_struct * info )
7684 {
7685 info->loopmode_insert_requested = TRUE;
7686
7687 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7688 * begin repeating TxData on RxData (complete insertion)
7689 */
7690 usc_OutReg( info, RICR,
7691 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7692
7693 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7694 info->cmr_value |= BIT13;
7695 usc_OutReg(info, CMR, info->cmr_value);
7696 }
7697
7698 /* return 1 if station is inserted into the loop, otherwise 0
7699 */
7700 static int usc_loopmode_active( struct mgsl_struct * info)
7701 {
7702 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7703 }
7704
7705 #ifdef CONFIG_HDLC
7706
7707 /**
7708 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7709 * set encoding and frame check sequence (FCS) options
7710 *
7711 * dev pointer to network device structure
7712 * encoding serial encoding setting
7713 * parity FCS setting
7714 *
7715 * returns 0 if success, otherwise error code
7716 */
7717 static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7718 unsigned short parity)
7719 {
7720 struct mgsl_struct *info = dev_to_port(dev);
7721 unsigned char new_encoding;
7722 unsigned short new_crctype;
7723
7724 /* return error if TTY interface open */
7725 if (info->count)
7726 return -EBUSY;
7727
7728 switch (encoding)
7729 {
7730 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7731 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7732 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7733 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7734 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7735 default: return -EINVAL;
7736 }
7737
7738 switch (parity)
7739 {
7740 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7741 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7742 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7743 default: return -EINVAL;
7744 }
7745
7746 info->params.encoding = new_encoding;
7747 info->params.crc_type = new_crctype;
7748
7749 /* if network interface up, reprogram hardware */
7750 if (info->netcount)
7751 mgsl_program_hw(info);
7752
7753 return 0;
7754 }
7755
7756 /**
7757 * called by generic HDLC layer to send frame
7758 *
7759 * skb socket buffer containing HDLC frame
7760 * dev pointer to network device structure
7761 *
7762 * returns 0 if success, otherwise error code
7763 */
7764 static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
7765 {
7766 struct mgsl_struct *info = dev_to_port(dev);
7767 struct net_device_stats *stats = hdlc_stats(dev);
7768 unsigned long flags;
7769
7770 if (debug_level >= DEBUG_LEVEL_INFO)
7771 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7772
7773 /* stop sending until this frame completes */
7774 netif_stop_queue(dev);
7775
7776 /* copy data to device buffers */
7777 info->xmit_cnt = skb->len;
7778 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7779
7780 /* update network statistics */
7781 stats->tx_packets++;
7782 stats->tx_bytes += skb->len;
7783
7784 /* done with socket buffer, so free it */
7785 dev_kfree_skb(skb);
7786
7787 /* save start time for transmit timeout detection */
7788 dev->trans_start = jiffies;
7789
7790 /* start hardware transmitter if necessary */
7791 spin_lock_irqsave(&info->irq_spinlock,flags);
7792 if (!info->tx_active)
7793 usc_start_transmitter(info);
7794 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7795
7796 return 0;
7797 }
7798
7799 /**
7800 * called by network layer when interface enabled
7801 * claim resources and initialize hardware
7802 *
7803 * dev pointer to network device structure
7804 *
7805 * returns 0 if success, otherwise error code
7806 */
7807 static int hdlcdev_open(struct net_device *dev)
7808 {
7809 struct mgsl_struct *info = dev_to_port(dev);
7810 int rc;
7811 unsigned long flags;
7812
7813 if (debug_level >= DEBUG_LEVEL_INFO)
7814 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7815
7816 /* generic HDLC layer open processing */
7817 if ((rc = hdlc_open(dev)))
7818 return rc;
7819
7820 /* arbitrate between network and tty opens */
7821 spin_lock_irqsave(&info->netlock, flags);
7822 if (info->count != 0 || info->netcount != 0) {
7823 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7824 spin_unlock_irqrestore(&info->netlock, flags);
7825 return -EBUSY;
7826 }
7827 info->netcount=1;
7828 spin_unlock_irqrestore(&info->netlock, flags);
7829
7830 /* claim resources and init adapter */
7831 if ((rc = startup(info)) != 0) {
7832 spin_lock_irqsave(&info->netlock, flags);
7833 info->netcount=0;
7834 spin_unlock_irqrestore(&info->netlock, flags);
7835 return rc;
7836 }
7837
7838 /* assert DTR and RTS, apply hardware settings */
7839 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7840 mgsl_program_hw(info);
7841
7842 /* enable network layer transmit */
7843 dev->trans_start = jiffies;
7844 netif_start_queue(dev);
7845
7846 /* inform generic HDLC layer of current DCD status */
7847 spin_lock_irqsave(&info->irq_spinlock, flags);
7848 usc_get_serial_signals(info);
7849 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7850 if (info->serial_signals & SerialSignal_DCD)
7851 netif_carrier_on(dev);
7852 else
7853 netif_carrier_off(dev);
7854 return 0;
7855 }
7856
7857 /**
7858 * called by network layer when interface is disabled
7859 * shutdown hardware and release resources
7860 *
7861 * dev pointer to network device structure
7862 *
7863 * returns 0 if success, otherwise error code
7864 */
7865 static int hdlcdev_close(struct net_device *dev)
7866 {
7867 struct mgsl_struct *info = dev_to_port(dev);
7868 unsigned long flags;
7869
7870 if (debug_level >= DEBUG_LEVEL_INFO)
7871 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7872
7873 netif_stop_queue(dev);
7874
7875 /* shutdown adapter and release resources */
7876 shutdown(info);
7877
7878 hdlc_close(dev);
7879
7880 spin_lock_irqsave(&info->netlock, flags);
7881 info->netcount=0;
7882 spin_unlock_irqrestore(&info->netlock, flags);
7883
7884 return 0;
7885 }
7886
7887 /**
7888 * called by network layer to process IOCTL call to network device
7889 *
7890 * dev pointer to network device structure
7891 * ifr pointer to network interface request structure
7892 * cmd IOCTL command code
7893 *
7894 * returns 0 if success, otherwise error code
7895 */
7896 static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7897 {
7898 const size_t size = sizeof(sync_serial_settings);
7899 sync_serial_settings new_line;
7900 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7901 struct mgsl_struct *info = dev_to_port(dev);
7902 unsigned int flags;
7903
7904 if (debug_level >= DEBUG_LEVEL_INFO)
7905 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7906
7907 /* return error if TTY interface open */
7908 if (info->count)
7909 return -EBUSY;
7910
7911 if (cmd != SIOCWANDEV)
7912 return hdlc_ioctl(dev, ifr, cmd);
7913
7914 switch(ifr->ifr_settings.type) {
7915 case IF_GET_IFACE: /* return current sync_serial_settings */
7916
7917 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7918 if (ifr->ifr_settings.size < size) {
7919 ifr->ifr_settings.size = size; /* data size wanted */
7920 return -ENOBUFS;
7921 }
7922
7923 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7924 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7925 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7926 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7927
7928 switch (flags){
7929 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7930 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7931 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7932 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7933 default: new_line.clock_type = CLOCK_DEFAULT;
7934 }
7935
7936 new_line.clock_rate = info->params.clock_speed;
7937 new_line.loopback = info->params.loopback ? 1:0;
7938
7939 if (copy_to_user(line, &new_line, size))
7940 return -EFAULT;
7941 return 0;
7942
7943 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7944
7945 if(!capable(CAP_NET_ADMIN))
7946 return -EPERM;
7947 if (copy_from_user(&new_line, line, size))
7948 return -EFAULT;
7949
7950 switch (new_line.clock_type)
7951 {
7952 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7953 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7954 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7955 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7956 case CLOCK_DEFAULT: flags = info->params.flags &
7957 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7958 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7959 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7960 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7961 default: return -EINVAL;
7962 }
7963
7964 if (new_line.loopback != 0 && new_line.loopback != 1)
7965 return -EINVAL;
7966
7967 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7968 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7969 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7970 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7971 info->params.flags |= flags;
7972
7973 info->params.loopback = new_line.loopback;
7974
7975 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7976 info->params.clock_speed = new_line.clock_rate;
7977 else
7978 info->params.clock_speed = 0;
7979
7980 /* if network interface up, reprogram hardware */
7981 if (info->netcount)
7982 mgsl_program_hw(info);
7983 return 0;
7984
7985 default:
7986 return hdlc_ioctl(dev, ifr, cmd);
7987 }
7988 }
7989
7990 /**
7991 * called by network layer when transmit timeout is detected
7992 *
7993 * dev pointer to network device structure
7994 */
7995 static void hdlcdev_tx_timeout(struct net_device *dev)
7996 {
7997 struct mgsl_struct *info = dev_to_port(dev);
7998 struct net_device_stats *stats = hdlc_stats(dev);
7999 unsigned long flags;
8000
8001 if (debug_level >= DEBUG_LEVEL_INFO)
8002 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
8003
8004 stats->tx_errors++;
8005 stats->tx_aborted_errors++;
8006
8007 spin_lock_irqsave(&info->irq_spinlock,flags);
8008 usc_stop_transmitter(info);
8009 spin_unlock_irqrestore(&info->irq_spinlock,flags);
8010
8011 netif_wake_queue(dev);
8012 }
8013
8014 /**
8015 * called by device driver when transmit completes
8016 * reenable network layer transmit if stopped
8017 *
8018 * info pointer to device instance information
8019 */
8020 static void hdlcdev_tx_done(struct mgsl_struct *info)
8021 {
8022 if (netif_queue_stopped(info->netdev))
8023 netif_wake_queue(info->netdev);
8024 }
8025
8026 /**
8027 * called by device driver when frame received
8028 * pass frame to network layer
8029 *
8030 * info pointer to device instance information
8031 * buf pointer to buffer contianing frame data
8032 * size count of data bytes in buf
8033 */
8034 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
8035 {
8036 struct sk_buff *skb = dev_alloc_skb(size);
8037 struct net_device *dev = info->netdev;
8038 struct net_device_stats *stats = hdlc_stats(dev);
8039
8040 if (debug_level >= DEBUG_LEVEL_INFO)
8041 printk("hdlcdev_rx(%s)\n",dev->name);
8042
8043 if (skb == NULL) {
8044 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
8045 stats->rx_dropped++;
8046 return;
8047 }
8048
8049 memcpy(skb_put(skb, size),buf,size);
8050
8051 skb->protocol = hdlc_type_trans(skb, info->netdev);
8052
8053 stats->rx_packets++;
8054 stats->rx_bytes += size;
8055
8056 netif_rx(skb);
8057
8058 info->netdev->last_rx = jiffies;
8059 }
8060
8061 /**
8062 * called by device driver when adding device instance
8063 * do generic HDLC initialization
8064 *
8065 * info pointer to device instance information
8066 *
8067 * returns 0 if success, otherwise error code
8068 */
8069 static int hdlcdev_init(struct mgsl_struct *info)
8070 {
8071 int rc;
8072 struct net_device *dev;
8073 hdlc_device *hdlc;
8074
8075 /* allocate and initialize network and HDLC layer objects */
8076
8077 if (!(dev = alloc_hdlcdev(info))) {
8078 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8079 return -ENOMEM;
8080 }
8081
8082 /* for network layer reporting purposes only */
8083 dev->base_addr = info->io_base;
8084 dev->irq = info->irq_level;
8085 dev->dma = info->dma_level;
8086
8087 /* network layer callbacks and settings */
8088 dev->do_ioctl = hdlcdev_ioctl;
8089 dev->open = hdlcdev_open;
8090 dev->stop = hdlcdev_close;
8091 dev->tx_timeout = hdlcdev_tx_timeout;
8092 dev->watchdog_timeo = 10*HZ;
8093 dev->tx_queue_len = 50;
8094
8095 /* generic HDLC layer callbacks and settings */
8096 hdlc = dev_to_hdlc(dev);
8097 hdlc->attach = hdlcdev_attach;
8098 hdlc->xmit = hdlcdev_xmit;
8099
8100 /* register objects with HDLC layer */
8101 if ((rc = register_hdlc_device(dev))) {
8102 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8103 free_netdev(dev);
8104 return rc;
8105 }
8106
8107 info->netdev = dev;
8108 return 0;
8109 }
8110
8111 /**
8112 * called by device driver when removing device instance
8113 * do generic HDLC cleanup
8114 *
8115 * info pointer to device instance information
8116 */
8117 static void hdlcdev_exit(struct mgsl_struct *info)
8118 {
8119 unregister_hdlc_device(info->netdev);
8120 free_netdev(info->netdev);
8121 info->netdev = NULL;
8122 }
8123
8124 #endif /* CONFIG_HDLC */
8125
8126
8127 static int __devinit synclink_init_one (struct pci_dev *dev,
8128 const struct pci_device_id *ent)
8129 {
8130 struct mgsl_struct *info;
8131
8132 if (pci_enable_device(dev)) {
8133 printk("error enabling pci device %p\n", dev);
8134 return -EIO;
8135 }
8136
8137 if (!(info = mgsl_allocate_device())) {
8138 printk("can't allocate device instance data.\n");
8139 return -EIO;
8140 }
8141
8142 /* Copy user configuration info to device instance data */
8143
8144 info->io_base = pci_resource_start(dev, 2);
8145 info->irq_level = dev->irq;
8146 info->phys_memory_base = pci_resource_start(dev, 3);
8147
8148 /* Because veremap only works on page boundaries we must map
8149 * a larger area than is actually implemented for the LCR
8150 * memory range. We map a full page starting at the page boundary.
8151 */
8152 info->phys_lcr_base = pci_resource_start(dev, 0);
8153 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8154 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8155
8156 info->bus_type = MGSL_BUS_TYPE_PCI;
8157 info->io_addr_size = 8;
8158 info->irq_flags = IRQF_SHARED;
8159
8160 if (dev->device == 0x0210) {
8161 /* Version 1 PCI9030 based universal PCI adapter */
8162 info->misc_ctrl_value = 0x007c4080;
8163 info->hw_version = 1;
8164 } else {
8165 /* Version 0 PCI9050 based 5V PCI adapter
8166 * A PCI9050 bug prevents reading LCR registers if
8167 * LCR base address bit 7 is set. Maintain shadow
8168 * value so we can write to LCR misc control reg.
8169 */
8170 info->misc_ctrl_value = 0x087e4546;
8171 info->hw_version = 0;
8172 }
8173
8174 mgsl_add_device(info);
8175
8176 return 0;
8177 }
8178
8179 static void __devexit synclink_remove_one (struct pci_dev *dev)
8180 {
8181 }
8182
This page took 0.191381 seconds and 6 git commands to generate.