Merge branch 'reiserfs/kill-bkl' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / net / wan / ixp4xx_hss.c
CommitLineData
f5b89e41
KH
1/*
2 * Intel IXP4xx HSS (synchronous serial port) driver for Linux
3 *
4 * Copyright (C) 2007-2008 Krzysztof Hałasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 */
10
11#include <linux/bitops.h>
12#include <linux/cdev.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/fs.h>
16#include <linux/hdlc.h>
17#include <linux/io.h>
18#include <linux/kernel.h>
19#include <linux/platform_device.h>
20#include <linux/poll.h>
21#include <mach/npe.h>
22#include <mach/qmgr.h>
23
f5b89e41
KH
24#define DEBUG_DESC 0
25#define DEBUG_RX 0
26#define DEBUG_TX 0
27#define DEBUG_PKT_BYTES 0
28#define DEBUG_CLOSE 0
29
30#define DRV_NAME "ixp4xx_hss"
31
32#define PKT_EXTRA_FLAGS 0 /* orig 1 */
33#define PKT_NUM_PIPES 1 /* 1, 2 or 4 */
34#define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */
35
36#define RX_DESCS 16 /* also length of all RX queues */
37#define TX_DESCS 16 /* also length of all TX queues */
38
39#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
40#define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */
41#define MAX_CLOSE_WAIT 1000 /* microseconds */
42#define HSS_COUNT 2
43#define FRAME_SIZE 256 /* doesn't matter at this point */
44#define FRAME_OFFSET 0
45#define MAX_CHANNELS (FRAME_SIZE / 8)
46
47#define NAPI_WEIGHT 16
48
49/* Queue IDs */
50#define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */
51#define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */
52#define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */
53#define HSS0_PKT_TX1_QUEUE 15
54#define HSS0_PKT_TX2_QUEUE 16
55#define HSS0_PKT_TX3_QUEUE 17
56#define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */
57#define HSS0_PKT_RXFREE1_QUEUE 19
58#define HSS0_PKT_RXFREE2_QUEUE 20
59#define HSS0_PKT_RXFREE3_QUEUE 21
60#define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */
61
62#define HSS1_CHL_RXTRIG_QUEUE 10
63#define HSS1_PKT_RX_QUEUE 0
64#define HSS1_PKT_TX0_QUEUE 5
65#define HSS1_PKT_TX1_QUEUE 6
66#define HSS1_PKT_TX2_QUEUE 7
67#define HSS1_PKT_TX3_QUEUE 8
68#define HSS1_PKT_RXFREE0_QUEUE 1
69#define HSS1_PKT_RXFREE1_QUEUE 2
70#define HSS1_PKT_RXFREE2_QUEUE 3
71#define HSS1_PKT_RXFREE3_QUEUE 4
72#define HSS1_PKT_TXDONE_QUEUE 9
73
74#define NPE_PKT_MODE_HDLC 0
75#define NPE_PKT_MODE_RAW 1
76#define NPE_PKT_MODE_56KMODE 2
77#define NPE_PKT_MODE_56KENDIAN_MSB 4
78
79/* PKT_PIPE_HDLC_CFG_WRITE flags */
80#define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */
81#define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
82#define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
83
84
85/* hss_config, PCRs */
86/* Frame sync sampling, default = active low */
87#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
88#define PCR_FRM_SYNC_FALLINGEDGE 0x80000000
89#define PCR_FRM_SYNC_RISINGEDGE 0xC0000000
90
91/* Frame sync pin: input (default) or output generated off a given clk edge */
92#define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000
93#define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000
94
95/* Frame and data clock sampling on edge, default = falling */
96#define PCR_FCLK_EDGE_RISING 0x08000000
97#define PCR_DCLK_EDGE_RISING 0x04000000
98
99/* Clock direction, default = input */
100#define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000
101
102/* Generate/Receive frame pulses, default = enabled */
103#define PCR_FRM_PULSE_DISABLED 0x01000000
104
105 /* Data rate is full (default) or half the configured clk speed */
106#define PCR_HALF_CLK_RATE 0x00200000
107
108/* Invert data between NPE and HSS FIFOs? (default = no) */
109#define PCR_DATA_POLARITY_INVERT 0x00100000
110
111/* TX/RX endianness, default = LSB */
112#define PCR_MSB_ENDIAN 0x00080000
113
114/* Normal (default) / open drain mode (TX only) */
115#define PCR_TX_PINS_OPEN_DRAIN 0x00040000
116
117/* No framing bit transmitted and expected on RX? (default = framing bit) */
118#define PCR_SOF_NO_FBIT 0x00020000
119
120/* Drive data pins? */
121#define PCR_TX_DATA_ENABLE 0x00010000
122
123/* Voice 56k type: drive the data pins low (default), high, high Z */
124#define PCR_TX_V56K_HIGH 0x00002000
125#define PCR_TX_V56K_HIGH_IMP 0x00004000
126
127/* Unassigned type: drive the data pins low (default), high, high Z */
128#define PCR_TX_UNASS_HIGH 0x00000800
129#define PCR_TX_UNASS_HIGH_IMP 0x00001000
130
131/* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
132#define PCR_TX_FB_HIGH_IMP 0x00000400
133
134/* 56k data endiannes - which bit unused: high (default) or low */
135#define PCR_TX_56KE_BIT_0_UNUSED 0x00000200
136
137/* 56k data transmission type: 32/8 bit data (default) or 56K data */
138#define PCR_TX_56KS_56K_DATA 0x00000100
139
140/* hss_config, cCR */
141/* Number of packetized clients, default = 1 */
142#define CCR_NPE_HFIFO_2_HDLC 0x04000000
143#define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000
144
145/* default = no loopback */
146#define CCR_LOOPBACK 0x02000000
147
148/* HSS number, default = 0 (first) */
149#define CCR_SECOND_HSS 0x01000000
150
151
152/* hss_config, clkCR: main:10, num:10, denom:12 */
153#define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/
154
155#define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
156#define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
157#define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
158#define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
159#define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
160#define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
161
162#define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
163#define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
164#define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
165#define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
166#define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
167#define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
168
5dbc4650
KH
169/*
170 * HSS_CONFIG_CLOCK_CR register consists of 3 parts:
171 * A (10 bits), B (10 bits) and C (12 bits).
172 * IXP42x HSS clock generator operation (verified with an oscilloscope):
173 * Each clock bit takes 7.5 ns (1 / 133.xx MHz).
174 * The clock sequence consists of (C - B) states of 0s and 1s, each state is
175 * A bits wide. It's followed by (B + 1) states of 0s and 1s, each state is
176 * (A + 1) bits wide.
177 *
178 * The resulting average clock frequency (assuming 33.333 MHz oscillator) is:
179 * freq = 66.666 MHz / (A + (B + 1) / (C + 1))
180 * minumum freq = 66.666 MHz / (A + 1)
181 * maximum freq = 66.666 MHz / A
182 *
183 * Example: A = 2, B = 2, C = 7, CLOCK_CR register = 2 << 22 | 2 << 12 | 7
184 * freq = 66.666 MHz / (2 + (2 + 1) / (7 + 1)) = 28.07 MHz (Mb/s).
185 * The clock sequence is: 1100110011 (5 doubles) 000111000 (3 triples).
186 * The sequence takes (C - B) * A + (B + 1) * (A + 1) = 5 * 2 + 3 * 3 bits
187 * = 19 bits (each 7.5 ns long) = 142.5 ns (then the sequence repeats).
188 * The sequence consists of 4 complete clock periods, thus the average
189 * frequency (= clock rate) is 4 / 142.5 ns = 28.07 MHz (Mb/s).
190 * (max specified clock rate for IXP42x HSS is 8.192 Mb/s).
191 */
f5b89e41
KH
192
193/* hss_config, LUT entries */
194#define TDMMAP_UNASSIGNED 0
195#define TDMMAP_HDLC 1 /* HDLC - packetized */
196#define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */
197#define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */
198
199/* offsets into HSS config */
200#define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */
201#define HSS_CONFIG_RX_PCR 0x04
202#define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */
203#define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */
204#define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */
205#define HSS_CONFIG_RX_FCR 0x14
206#define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
207#define HSS_CONFIG_RX_LUT 0x38
208
209
210/* NPE command codes */
211/* writes the ConfigWord value to the location specified by offset */
212#define PORT_CONFIG_WRITE 0x40
213
214/* triggers the NPE to load the contents of the configuration table */
215#define PORT_CONFIG_LOAD 0x41
216
217/* triggers the NPE to return an HssErrorReadResponse message */
218#define PORT_ERROR_READ 0x42
219
220/* triggers the NPE to reset internal status and enable the HssPacketized
221 operation for the flow specified by pPipe */
222#define PKT_PIPE_FLOW_ENABLE 0x50
223#define PKT_PIPE_FLOW_DISABLE 0x51
224#define PKT_NUM_PIPES_WRITE 0x52
225#define PKT_PIPE_FIFO_SIZEW_WRITE 0x53
226#define PKT_PIPE_HDLC_CFG_WRITE 0x54
227#define PKT_PIPE_IDLE_PATTERN_WRITE 0x55
228#define PKT_PIPE_RX_SIZE_WRITE 0x56
229#define PKT_PIPE_MODE_WRITE 0x57
230
231/* HDLC packet status values - desc->status */
232#define ERR_SHUTDOWN 1 /* stop or shutdown occurrance */
233#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
234#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
235#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
236 this packet (if buf_len < pkt_len) */
237#define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
238#define ERR_HDLC_ABORT 6 /* abort sequence received */
239#define ERR_DISCONNECTING 7 /* disconnect is in progress */
240
241
242#ifdef __ARMEB__
243typedef struct sk_buff buffer_t;
244#define free_buffer dev_kfree_skb
245#define free_buffer_irq dev_kfree_skb_irq
246#else
247typedef void buffer_t;
248#define free_buffer kfree
249#define free_buffer_irq kfree
250#endif
251
252struct port {
253 struct device *dev;
254 struct npe *npe;
255 struct net_device *netdev;
256 struct napi_struct napi;
257 struct hss_plat_info *plat;
258 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
259 struct desc *desc_tab; /* coherent */
260 u32 desc_tab_phys;
261 unsigned int id;
262 unsigned int clock_type, clock_rate, loopback;
263 unsigned int initialized, carrier;
264 u8 hdlc_cfg;
5dbc4650 265 u32 clock_reg;
f5b89e41
KH
266};
267
268/* NPE message structure */
269struct msg {
270#ifdef __ARMEB__
271 u8 cmd, unused, hss_port, index;
272 union {
273 struct { u8 data8a, data8b, data8c, data8d; };
274 struct { u16 data16a, data16b; };
275 struct { u32 data32; };
276 };
277#else
278 u8 index, hss_port, unused, cmd;
279 union {
280 struct { u8 data8d, data8c, data8b, data8a; };
281 struct { u16 data16b, data16a; };
282 struct { u32 data32; };
283 };
284#endif
285};
286
287/* HDLC packet descriptor */
288struct desc {
289 u32 next; /* pointer to next buffer, unused */
290
291#ifdef __ARMEB__
292 u16 buf_len; /* buffer length */
293 u16 pkt_len; /* packet length */
294 u32 data; /* pointer to data buffer in RAM */
295 u8 status;
296 u8 error_count;
297 u16 __reserved;
298#else
299 u16 pkt_len; /* packet length */
300 u16 buf_len; /* buffer length */
301 u32 data; /* pointer to data buffer in RAM */
302 u16 __reserved;
303 u8 error_count;
304 u8 status;
305#endif
306 u32 __reserved1[4];
307};
308
309
310#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
311 (n) * sizeof(struct desc))
312#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
313
314#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
315 ((n) + RX_DESCS) * sizeof(struct desc))
316#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
317
318/*****************************************************************************
319 * global variables
320 ****************************************************************************/
321
322static int ports_open;
323static struct dma_pool *dma_pool;
324static spinlock_t npe_lock;
325
326static const struct {
327 int tx, txdone, rx, rxfree;
328}queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
329 HSS0_PKT_RXFREE0_QUEUE},
330 {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
331 HSS1_PKT_RXFREE0_QUEUE},
332};
333
334/*****************************************************************************
335 * utility functions
336 ****************************************************************************/
337
338static inline struct port* dev_to_port(struct net_device *dev)
339{
340 return dev_to_hdlc(dev)->priv;
341}
342
343#ifndef __ARMEB__
344static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
345{
346 int i;
347 for (i = 0; i < cnt; i++)
348 dest[i] = swab32(src[i]);
349}
350#endif
351
352/*****************************************************************************
353 * HSS access
354 ****************************************************************************/
355
356static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
357{
358 u32 *val = (u32*)msg;
359 if (npe_send_message(port->npe, msg, what)) {
360 printk(KERN_CRIT "HSS-%i: unable to send command [%08X:%08X]"
361 " to %s\n", port->id, val[0], val[1],
362 npe_name(port->npe));
363 BUG();
364 }
365}
366
367static void hss_config_set_lut(struct port *port)
368{
369 struct msg msg;
370 int ch;
371
372 memset(&msg, 0, sizeof(msg));
373 msg.cmd = PORT_CONFIG_WRITE;
374 msg.hss_port = port->id;
375
376 for (ch = 0; ch < MAX_CHANNELS; ch++) {
377 msg.data32 >>= 2;
378 msg.data32 |= TDMMAP_HDLC << 30;
379
380 if (ch % 16 == 15) {
381 msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3);
382 hss_npe_send(port, &msg, "HSS_SET_TX_LUT");
383
384 msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT;
385 hss_npe_send(port, &msg, "HSS_SET_RX_LUT");
386 }
387 }
388}
389
390static void hss_config(struct port *port)
391{
392 struct msg msg;
393
394 memset(&msg, 0, sizeof(msg));
395 msg.cmd = PORT_CONFIG_WRITE;
396 msg.hss_port = port->id;
397 msg.index = HSS_CONFIG_TX_PCR;
398 msg.data32 = PCR_FRM_SYNC_OUTPUT_RISING | PCR_MSB_ENDIAN |
399 PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
400 if (port->clock_type == CLOCK_INT)
401 msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
402 hss_npe_send(port, &msg, "HSS_SET_TX_PCR");
403
404 msg.index = HSS_CONFIG_RX_PCR;
405 msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING;
406 hss_npe_send(port, &msg, "HSS_SET_RX_PCR");
407
408 memset(&msg, 0, sizeof(msg));
409 msg.cmd = PORT_CONFIG_WRITE;
410 msg.hss_port = port->id;
411 msg.index = HSS_CONFIG_CORE_CR;
412 msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) |
413 (port->id ? CCR_SECOND_HSS : 0);
414 hss_npe_send(port, &msg, "HSS_SET_CORE_CR");
415
416 memset(&msg, 0, sizeof(msg));
417 msg.cmd = PORT_CONFIG_WRITE;
418 msg.hss_port = port->id;
419 msg.index = HSS_CONFIG_CLOCK_CR;
5dbc4650 420 msg.data32 = port->clock_reg;
f5b89e41
KH
421 hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR");
422
423 memset(&msg, 0, sizeof(msg));
424 msg.cmd = PORT_CONFIG_WRITE;
425 msg.hss_port = port->id;
426 msg.index = HSS_CONFIG_TX_FCR;
427 msg.data16a = FRAME_OFFSET;
428 msg.data16b = FRAME_SIZE - 1;
429 hss_npe_send(port, &msg, "HSS_SET_TX_FCR");
430
431 memset(&msg, 0, sizeof(msg));
432 msg.cmd = PORT_CONFIG_WRITE;
433 msg.hss_port = port->id;
434 msg.index = HSS_CONFIG_RX_FCR;
435 msg.data16a = FRAME_OFFSET;
436 msg.data16b = FRAME_SIZE - 1;
437 hss_npe_send(port, &msg, "HSS_SET_RX_FCR");
438
439 hss_config_set_lut(port);
440
441 memset(&msg, 0, sizeof(msg));
442 msg.cmd = PORT_CONFIG_LOAD;
443 msg.hss_port = port->id;
444 hss_npe_send(port, &msg, "HSS_LOAD_CONFIG");
445
446 if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") ||
447 /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
448 msg.cmd != PORT_CONFIG_LOAD || msg.data32) {
449 printk(KERN_CRIT "HSS-%i: HSS_LOAD_CONFIG failed\n",
450 port->id);
451 BUG();
452 }
453
454 /* HDLC may stop working without this - check FIXME */
455 npe_recv_message(port->npe, &msg, "FLUSH_IT");
456}
457
458static void hss_set_hdlc_cfg(struct port *port)
459{
460 struct msg msg;
461
462 memset(&msg, 0, sizeof(msg));
463 msg.cmd = PKT_PIPE_HDLC_CFG_WRITE;
464 msg.hss_port = port->id;
465 msg.data8a = port->hdlc_cfg; /* rx_cfg */
466 msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
467 hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG");
468}
469
470static u32 hss_get_status(struct port *port)
471{
472 struct msg msg;
473
474 memset(&msg, 0, sizeof(msg));
475 msg.cmd = PORT_ERROR_READ;
476 msg.hss_port = port->id;
477 hss_npe_send(port, &msg, "PORT_ERROR_READ");
478 if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) {
479 printk(KERN_CRIT "HSS-%i: unable to read HSS status\n",
480 port->id);
481 BUG();
482 }
483
484 return msg.data32;
485}
486
487static void hss_start_hdlc(struct port *port)
488{
489 struct msg msg;
490
491 memset(&msg, 0, sizeof(msg));
492 msg.cmd = PKT_PIPE_FLOW_ENABLE;
493 msg.hss_port = port->id;
494 msg.data32 = 0;
495 hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE");
496}
497
498static void hss_stop_hdlc(struct port *port)
499{
500 struct msg msg;
501
502 memset(&msg, 0, sizeof(msg));
503 msg.cmd = PKT_PIPE_FLOW_DISABLE;
504 msg.hss_port = port->id;
505 hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE");
506 hss_get_status(port); /* make sure it's halted */
507}
508
509static int hss_load_firmware(struct port *port)
510{
511 struct msg msg;
512 int err;
513
514 if (port->initialized)
515 return 0;
516
517 if (!npe_running(port->npe) &&
518 (err = npe_load_firmware(port->npe, npe_name(port->npe),
519 port->dev)))
520 return err;
521
522 /* HDLC mode configuration */
523 memset(&msg, 0, sizeof(msg));
524 msg.cmd = PKT_NUM_PIPES_WRITE;
525 msg.hss_port = port->id;
526 msg.data8a = PKT_NUM_PIPES;
527 hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES");
528
529 msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE;
530 msg.data8a = PKT_PIPE_FIFO_SIZEW;
531 hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO");
532
533 msg.cmd = PKT_PIPE_MODE_WRITE;
534 msg.data8a = NPE_PKT_MODE_HDLC;
535 /* msg.data8b = inv_mask */
536 /* msg.data8c = or_mask */
537 hss_npe_send(port, &msg, "HSS_SET_PKT_MODE");
538
539 msg.cmd = PKT_PIPE_RX_SIZE_WRITE;
540 msg.data16a = HDLC_MAX_MRU; /* including CRC */
541 hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE");
542
543 msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE;
544 msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
545 hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
546
547 port->initialized = 1;
548 return 0;
549}
550
551/*****************************************************************************
552 * packetized (HDLC) operation
553 ****************************************************************************/
554
555static inline void debug_pkt(struct net_device *dev, const char *func,
556 u8 *data, int len)
557{
558#if DEBUG_PKT_BYTES
559 int i;
560
561 printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len);
562 for (i = 0; i < len; i++) {
563 if (i >= DEBUG_PKT_BYTES)
564 break;
565 printk("%s%02X", !(i % 4) ? " " : "", data[i]);
566 }
567 printk("\n");
568#endif
569}
570
571
572static inline void debug_desc(u32 phys, struct desc *desc)
573{
574#if DEBUG_DESC
575 printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n",
576 phys, desc->next, desc->buf_len, desc->pkt_len,
577 desc->data, desc->status, desc->error_count);
578#endif
579}
580
f5b89e41
KH
581static inline int queue_get_desc(unsigned int queue, struct port *port,
582 int is_tx)
583{
584 u32 phys, tab_phys, n_desc;
585 struct desc *tab;
586
e6da96ac 587 if (!(phys = qmgr_get_entry(queue)))
f5b89e41
KH
588 return -1;
589
590 BUG_ON(phys & 0x1F);
591 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
592 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
593 n_desc = (phys - tab_phys) / sizeof(struct desc);
594 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
595 debug_desc(phys, &tab[n_desc]);
596 BUG_ON(tab[n_desc].next);
597 return n_desc;
598}
599
600static inline void queue_put_desc(unsigned int queue, u32 phys,
601 struct desc *desc)
602{
f5b89e41
KH
603 debug_desc(phys, desc);
604 BUG_ON(phys & 0x1F);
605 qmgr_put_entry(queue, phys);
6a68afe3
KH
606 /* Don't check for queue overflow here, we've allocated sufficient
607 length and queues >= 32 don't support this check anyway. */
f5b89e41
KH
608}
609
610
611static inline void dma_unmap_tx(struct port *port, struct desc *desc)
612{
613#ifdef __ARMEB__
614 dma_unmap_single(&port->netdev->dev, desc->data,
615 desc->buf_len, DMA_TO_DEVICE);
616#else
617 dma_unmap_single(&port->netdev->dev, desc->data & ~3,
618 ALIGN((desc->data & 3) + desc->buf_len, 4),
619 DMA_TO_DEVICE);
620#endif
621}
622
623
624static void hss_hdlc_set_carrier(void *pdev, int carrier)
625{
626 struct net_device *netdev = pdev;
627 struct port *port = dev_to_port(netdev);
628 unsigned long flags;
629
630 spin_lock_irqsave(&npe_lock, flags);
631 port->carrier = carrier;
632 if (!port->loopback) {
633 if (carrier)
634 netif_carrier_on(netdev);
635 else
636 netif_carrier_off(netdev);
637 }
638 spin_unlock_irqrestore(&npe_lock, flags);
639}
640
641static void hss_hdlc_rx_irq(void *pdev)
642{
643 struct net_device *dev = pdev;
644 struct port *port = dev_to_port(dev);
645
646#if DEBUG_RX
647 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
648#endif
649 qmgr_disable_irq(queue_ids[port->id].rx);
288379f0 650 napi_schedule(&port->napi);
f5b89e41
KH
651}
652
653static int hss_hdlc_poll(struct napi_struct *napi, int budget)
654{
655 struct port *port = container_of(napi, struct port, napi);
656 struct net_device *dev = port->netdev;
657 unsigned int rxq = queue_ids[port->id].rx;
658 unsigned int rxfreeq = queue_ids[port->id].rxfree;
659 int received = 0;
660
661#if DEBUG_RX
662 printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
663#endif
664
665 while (received < budget) {
666 struct sk_buff *skb;
667 struct desc *desc;
668 int n;
669#ifdef __ARMEB__
670 struct sk_buff *temp;
671 u32 phys;
672#endif
673
674 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
675#if DEBUG_RX
676 printk(KERN_DEBUG "%s: hss_hdlc_poll"
288379f0 677 " napi_complete\n", dev->name);
f5b89e41 678#endif
288379f0 679 napi_complete(napi);
f5b89e41
KH
680 qmgr_enable_irq(rxq);
681 if (!qmgr_stat_empty(rxq) &&
288379f0 682 napi_reschedule(napi)) {
f5b89e41
KH
683#if DEBUG_RX
684 printk(KERN_DEBUG "%s: hss_hdlc_poll"
288379f0 685 " napi_reschedule succeeded\n",
f5b89e41
KH
686 dev->name);
687#endif
688 qmgr_disable_irq(rxq);
689 continue;
690 }
691#if DEBUG_RX
692 printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
693 dev->name);
694#endif
695 return received; /* all work done */
696 }
697
698 desc = rx_desc_ptr(port, n);
699#if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
700 if (desc->error_count)
701 printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
702 " errors %u\n", dev->name, desc->status,
703 desc->error_count);
704#endif
705 skb = NULL;
706 switch (desc->status) {
707 case 0:
708#ifdef __ARMEB__
709 if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
710 phys = dma_map_single(&dev->dev, skb->data,
711 RX_SIZE,
712 DMA_FROM_DEVICE);
713 if (dma_mapping_error(&dev->dev, phys)) {
714 dev_kfree_skb(skb);
715 skb = NULL;
716 }
717 }
718#else
719 skb = netdev_alloc_skb(dev, desc->pkt_len);
720#endif
721 if (!skb)
722 dev->stats.rx_dropped++;
723 break;
724 case ERR_HDLC_ALIGN:
725 case ERR_HDLC_ABORT:
726 dev->stats.rx_frame_errors++;
727 dev->stats.rx_errors++;
728 break;
729 case ERR_HDLC_FCS:
730 dev->stats.rx_crc_errors++;
731 dev->stats.rx_errors++;
732 break;
733 case ERR_HDLC_TOO_LONG:
734 dev->stats.rx_length_errors++;
735 dev->stats.rx_errors++;
736 break;
737 default: /* FIXME - remove printk */
738 printk(KERN_ERR "%s: hss_hdlc_poll: status 0x%02X"
739 " errors %u\n", dev->name, desc->status,
740 desc->error_count);
741 dev->stats.rx_errors++;
742 }
743
744 if (!skb) {
745 /* put the desc back on RX-ready queue */
746 desc->buf_len = RX_SIZE;
747 desc->pkt_len = desc->status = 0;
748 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
749 continue;
750 }
751
752 /* process received frame */
753#ifdef __ARMEB__
754 temp = skb;
755 skb = port->rx_buff_tab[n];
756 dma_unmap_single(&dev->dev, desc->data,
757 RX_SIZE, DMA_FROM_DEVICE);
758#else
5d23a1d2
FT
759 dma_sync_single_for_cpu(&dev->dev, desc->data,
760 RX_SIZE, DMA_FROM_DEVICE);
f5b89e41
KH
761 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
762 ALIGN(desc->pkt_len, 4) / 4);
763#endif
764 skb_put(skb, desc->pkt_len);
765
766 debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);
767
768 skb->protocol = hdlc_type_trans(skb, dev);
769 dev->stats.rx_packets++;
770 dev->stats.rx_bytes += skb->len;
771 netif_receive_skb(skb);
772
773 /* put the new buffer on RX-free queue */
774#ifdef __ARMEB__
775 port->rx_buff_tab[n] = temp;
776 desc->data = phys;
777#endif
778 desc->buf_len = RX_SIZE;
779 desc->pkt_len = 0;
780 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
781 received++;
782 }
783#if DEBUG_RX
784 printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
785#endif
786 return received; /* not all work done */
787}
788
789
790static void hss_hdlc_txdone_irq(void *pdev)
791{
792 struct net_device *dev = pdev;
793 struct port *port = dev_to_port(dev);
794 int n_desc;
795
796#if DEBUG_TX
797 printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
798#endif
799 while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
800 port, 1)) >= 0) {
801 struct desc *desc;
802 int start;
803
804 desc = tx_desc_ptr(port, n_desc);
805
806 dev->stats.tx_packets++;
807 dev->stats.tx_bytes += desc->pkt_len;
808
809 dma_unmap_tx(port, desc);
810#if DEBUG_TX
811 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n",
812 dev->name, port->tx_buff_tab[n_desc]);
813#endif
814 free_buffer_irq(port->tx_buff_tab[n_desc]);
815 port->tx_buff_tab[n_desc] = NULL;
816
9733bb8e 817 start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
f5b89e41
KH
818 queue_put_desc(port->plat->txreadyq,
819 tx_desc_phys(port, n_desc), desc);
9733bb8e 820 if (start) { /* TX-ready queue was empty */
f5b89e41
KH
821#if DEBUG_TX
822 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
823 " ready\n", dev->name);
824#endif
825 netif_wake_queue(dev);
826 }
827 }
828}
829
830static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
831{
832 struct port *port = dev_to_port(dev);
833 unsigned int txreadyq = port->plat->txreadyq;
834 int len, offset, bytes, n;
835 void *mem;
836 u32 phys;
837 struct desc *desc;
838
839#if DEBUG_TX
840 printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name);
841#endif
842
843 if (unlikely(skb->len > HDLC_MAX_MRU)) {
844 dev_kfree_skb(skb);
845 dev->stats.tx_errors++;
846 return NETDEV_TX_OK;
847 }
848
849 debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len);
850
851 len = skb->len;
852#ifdef __ARMEB__
853 offset = 0; /* no need to keep alignment */
854 bytes = len;
855 mem = skb->data;
856#else
857 offset = (int)skb->data & 3; /* keep 32-bit alignment */
858 bytes = ALIGN(offset + len, 4);
859 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
860 dev_kfree_skb(skb);
861 dev->stats.tx_dropped++;
862 return NETDEV_TX_OK;
863 }
864 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
865 dev_kfree_skb(skb);
866#endif
867
868 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
869 if (dma_mapping_error(&dev->dev, phys)) {
870#ifdef __ARMEB__
871 dev_kfree_skb(skb);
872#else
873 kfree(mem);
874#endif
875 dev->stats.tx_dropped++;
876 return NETDEV_TX_OK;
877 }
878
879 n = queue_get_desc(txreadyq, port, 1);
880 BUG_ON(n < 0);
881 desc = tx_desc_ptr(port, n);
882
883#ifdef __ARMEB__
884 port->tx_buff_tab[n] = skb;
885#else
886 port->tx_buff_tab[n] = mem;
887#endif
888 desc->data = phys + offset;
889 desc->buf_len = desc->pkt_len = len;
890
891 wmb();
892 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
893 dev->trans_start = jiffies;
894
9733bb8e 895 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
f5b89e41
KH
896#if DEBUG_TX
897 printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
898#endif
899 netif_stop_queue(dev);
900 /* we could miss TX ready interrupt */
9733bb8e 901 if (!qmgr_stat_below_low_watermark(txreadyq)) {
f5b89e41
KH
902#if DEBUG_TX
903 printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
904 dev->name);
905#endif
906 netif_wake_queue(dev);
907 }
908 }
909
910#if DEBUG_TX
911 printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name);
912#endif
913 return NETDEV_TX_OK;
914}
915
916
917static int request_hdlc_queues(struct port *port)
918{
919 int err;
920
e6da96ac
KH
921 err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
922 "%s:RX-free", port->netdev->name);
f5b89e41
KH
923 if (err)
924 return err;
925
e6da96ac
KH
926 err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0,
927 "%s:RX", port->netdev->name);
f5b89e41
KH
928 if (err)
929 goto rel_rxfree;
930
e6da96ac
KH
931 err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0,
932 "%s:TX", port->netdev->name);
f5b89e41
KH
933 if (err)
934 goto rel_rx;
935
e6da96ac
KH
936 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
937 "%s:TX-ready", port->netdev->name);
f5b89e41
KH
938 if (err)
939 goto rel_tx;
940
e6da96ac
KH
941 err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0,
942 "%s:TX-done", port->netdev->name);
f5b89e41
KH
943 if (err)
944 goto rel_txready;
945 return 0;
946
947rel_txready:
948 qmgr_release_queue(port->plat->txreadyq);
949rel_tx:
950 qmgr_release_queue(queue_ids[port->id].tx);
951rel_rx:
952 qmgr_release_queue(queue_ids[port->id].rx);
953rel_rxfree:
954 qmgr_release_queue(queue_ids[port->id].rxfree);
955 printk(KERN_DEBUG "%s: unable to request hardware queues\n",
956 port->netdev->name);
957 return err;
958}
959
960static void release_hdlc_queues(struct port *port)
961{
962 qmgr_release_queue(queue_ids[port->id].rxfree);
963 qmgr_release_queue(queue_ids[port->id].rx);
964 qmgr_release_queue(queue_ids[port->id].txdone);
965 qmgr_release_queue(queue_ids[port->id].tx);
966 qmgr_release_queue(port->plat->txreadyq);
967}
968
969static int init_hdlc_queues(struct port *port)
970{
971 int i;
972
973 if (!ports_open)
974 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
975 POOL_ALLOC_SIZE, 32, 0)))
976 return -ENOMEM;
977
978 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
979 &port->desc_tab_phys)))
980 return -ENOMEM;
981 memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
982 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
983 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
984
985 /* Setup RX buffers */
986 for (i = 0; i < RX_DESCS; i++) {
987 struct desc *desc = rx_desc_ptr(port, i);
988 buffer_t *buff;
989 void *data;
990#ifdef __ARMEB__
991 if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
992 return -ENOMEM;
993 data = buff->data;
994#else
995 if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
996 return -ENOMEM;
997 data = buff;
998#endif
999 desc->buf_len = RX_SIZE;
1000 desc->data = dma_map_single(&port->netdev->dev, data,
1001 RX_SIZE, DMA_FROM_DEVICE);
1002 if (dma_mapping_error(&port->netdev->dev, desc->data)) {
1003 free_buffer(buff);
1004 return -EIO;
1005 }
1006 port->rx_buff_tab[i] = buff;
1007 }
1008
1009 return 0;
1010}
1011
1012static void destroy_hdlc_queues(struct port *port)
1013{
1014 int i;
1015
1016 if (port->desc_tab) {
1017 for (i = 0; i < RX_DESCS; i++) {
1018 struct desc *desc = rx_desc_ptr(port, i);
1019 buffer_t *buff = port->rx_buff_tab[i];
1020 if (buff) {
1021 dma_unmap_single(&port->netdev->dev,
1022 desc->data, RX_SIZE,
1023 DMA_FROM_DEVICE);
1024 free_buffer(buff);
1025 }
1026 }
1027 for (i = 0; i < TX_DESCS; i++) {
1028 struct desc *desc = tx_desc_ptr(port, i);
1029 buffer_t *buff = port->tx_buff_tab[i];
1030 if (buff) {
1031 dma_unmap_tx(port, desc);
1032 free_buffer(buff);
1033 }
1034 }
1035 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
1036 port->desc_tab = NULL;
1037 }
1038
1039 if (!ports_open && dma_pool) {
1040 dma_pool_destroy(dma_pool);
1041 dma_pool = NULL;
1042 }
1043}
1044
1045static int hss_hdlc_open(struct net_device *dev)
1046{
1047 struct port *port = dev_to_port(dev);
1048 unsigned long flags;
1049 int i, err = 0;
1050
1051 if ((err = hdlc_open(dev)))
1052 return err;
1053
1054 if ((err = hss_load_firmware(port)))
1055 goto err_hdlc_close;
1056
1057 if ((err = request_hdlc_queues(port)))
1058 goto err_hdlc_close;
1059
1060 if ((err = init_hdlc_queues(port)))
1061 goto err_destroy_queues;
1062
1063 spin_lock_irqsave(&npe_lock, flags);
1064 if (port->plat->open)
1065 if ((err = port->plat->open(port->id, dev,
1066 hss_hdlc_set_carrier)))
1067 goto err_unlock;
1068 spin_unlock_irqrestore(&npe_lock, flags);
1069
1070 /* Populate queues with buffers, no failure after this point */
1071 for (i = 0; i < TX_DESCS; i++)
1072 queue_put_desc(port->plat->txreadyq,
1073 tx_desc_phys(port, i), tx_desc_ptr(port, i));
1074
1075 for (i = 0; i < RX_DESCS; i++)
1076 queue_put_desc(queue_ids[port->id].rxfree,
1077 rx_desc_phys(port, i), rx_desc_ptr(port, i));
1078
1079 napi_enable(&port->napi);
1080 netif_start_queue(dev);
1081
1082 qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
1083 hss_hdlc_rx_irq, dev);
1084
1085 qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
1086 hss_hdlc_txdone_irq, dev);
1087 qmgr_enable_irq(queue_ids[port->id].txdone);
1088
1089 ports_open++;
1090
1091 hss_set_hdlc_cfg(port);
1092 hss_config(port);
1093
1094 hss_start_hdlc(port);
1095
1096 /* we may already have RX data, enables IRQ */
288379f0 1097 napi_schedule(&port->napi);
f5b89e41
KH
1098 return 0;
1099
1100err_unlock:
1101 spin_unlock_irqrestore(&npe_lock, flags);
1102err_destroy_queues:
1103 destroy_hdlc_queues(port);
1104 release_hdlc_queues(port);
1105err_hdlc_close:
1106 hdlc_close(dev);
1107 return err;
1108}
1109
1110static int hss_hdlc_close(struct net_device *dev)
1111{
1112 struct port *port = dev_to_port(dev);
1113 unsigned long flags;
1114 int i, buffs = RX_DESCS; /* allocated RX buffers */
1115
1116 spin_lock_irqsave(&npe_lock, flags);
1117 ports_open--;
1118 qmgr_disable_irq(queue_ids[port->id].rx);
1119 netif_stop_queue(dev);
1120 napi_disable(&port->napi);
1121
1122 hss_stop_hdlc(port);
1123
1124 while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
1125 buffs--;
1126 while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
1127 buffs--;
1128
1129 if (buffs)
1130 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1131 " left in NPE\n", dev->name, buffs);
1132
1133 buffs = TX_DESCS;
1134 while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
1135 buffs--; /* cancel TX */
1136
1137 i = 0;
1138 do {
1139 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1140 buffs--;
1141 if (!buffs)
1142 break;
1143 } while (++i < MAX_CLOSE_WAIT);
1144
1145 if (buffs)
1146 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1147 "left in NPE\n", dev->name, buffs);
1148#if DEBUG_CLOSE
1149 if (!buffs)
1150 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1151#endif
1152 qmgr_disable_irq(queue_ids[port->id].txdone);
1153
1154 if (port->plat->close)
1155 port->plat->close(port->id, dev);
1156 spin_unlock_irqrestore(&npe_lock, flags);
1157
1158 destroy_hdlc_queues(port);
1159 release_hdlc_queues(port);
1160 hdlc_close(dev);
1161 return 0;
1162}
1163
1164
1165static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
1166 unsigned short parity)
1167{
1168 struct port *port = dev_to_port(dev);
1169
1170 if (encoding != ENCODING_NRZ)
1171 return -EINVAL;
1172
1173 switch(parity) {
1174 case PARITY_CRC16_PR1_CCITT:
1175 port->hdlc_cfg = 0;
1176 return 0;
1177
1178 case PARITY_CRC32_PR1_CCITT:
1179 port->hdlc_cfg = PKT_HDLC_CRC_32;
1180 return 0;
1181
1182 default:
1183 return -EINVAL;
1184 }
1185}
1186
5dbc4650
KH
1187static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
1188 u32 *best, u32 *best_diff, u32 *reg)
1189{
1190 /* a is 10-bit, b is 10-bit, c is 12-bit */
1191 u64 new_rate;
1192 u32 new_diff;
1193
1194 new_rate = ixp4xx_timer_freq * (u64)(c + 1);
1195 do_div(new_rate, a * (c + 1) + b + 1);
1196 new_diff = abs((u32)new_rate - rate);
1197
1198 if (new_diff < *best_diff) {
1199 *best = new_rate;
1200 *best_diff = new_diff;
1201 *reg = (a << 22) | (b << 12) | c;
1202 }
1203 return new_diff;
1204}
1205
1206static void find_best_clock(u32 rate, u32 *best, u32 *reg)
1207{
1208 u32 a, b, diff = 0xFFFFFFFF;
1209
1210 a = ixp4xx_timer_freq / rate;
1211
1212 if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */
1213 check_clock(rate, 0x3FF, 1, 1, best, &diff, reg);
1214 return;
1215 }
1216 if (a == 0) { /* > 66.666 MHz */
1217 a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */
1218 rate = ixp4xx_timer_freq;
1219 }
1220
1221 if (rate * a == ixp4xx_timer_freq) { /* don't divide by 0 later */
1222 check_clock(rate, a - 1, 1, 1, best, &diff, reg);
1223 return;
1224 }
1225
1226 for (b = 0; b < 0x400; b++) {
1227 u64 c = (b + 1) * (u64)rate;
1228 do_div(c, ixp4xx_timer_freq - rate * a);
1229 c--;
1230 if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */
1231 if (b == 0 && /* also try a bit higher rate */
1232 !check_clock(rate, a - 1, 1, 1, best, &diff, reg))
1233 return;
1234 check_clock(rate, a, b, 0xFFF, best, &diff, reg);
1235 return;
1236 }
1237 if (!check_clock(rate, a, b, c, best, &diff, reg))
1238 return;
1239 if (!check_clock(rate, a, b, c + 1, best, &diff, reg))
1240 return;
1241 }
1242}
f5b89e41
KH
1243
1244static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1245{
1246 const size_t size = sizeof(sync_serial_settings);
1247 sync_serial_settings new_line;
1248 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1249 struct port *port = dev_to_port(dev);
1250 unsigned long flags;
1251 int clk;
1252
1253 if (cmd != SIOCWANDEV)
1254 return hdlc_ioctl(dev, ifr, cmd);
1255
1256 switch(ifr->ifr_settings.type) {
1257 case IF_GET_IFACE:
1258 ifr->ifr_settings.type = IF_IFACE_V35;
1259 if (ifr->ifr_settings.size < size) {
1260 ifr->ifr_settings.size = size; /* data size wanted */
1261 return -ENOBUFS;
1262 }
1263 memset(&new_line, 0, sizeof(new_line));
1264 new_line.clock_type = port->clock_type;
5dbc4650 1265 new_line.clock_rate = port->clock_rate;
f5b89e41
KH
1266 new_line.loopback = port->loopback;
1267 if (copy_to_user(line, &new_line, size))
1268 return -EFAULT;
1269 return 0;
1270
1271 case IF_IFACE_SYNC_SERIAL:
1272 case IF_IFACE_V35:
1273 if(!capable(CAP_NET_ADMIN))
1274 return -EPERM;
1275 if (copy_from_user(&new_line, line, size))
1276 return -EFAULT;
1277
1278 clk = new_line.clock_type;
1279 if (port->plat->set_clock)
1280 clk = port->plat->set_clock(port->id, clk);
1281
1282 if (clk != CLOCK_EXT && clk != CLOCK_INT)
1283 return -EINVAL; /* No such clock setting */
1284
1285 if (new_line.loopback != 0 && new_line.loopback != 1)
1286 return -EINVAL;
1287
1288 port->clock_type = clk; /* Update settings */
5dbc4650
KH
1289 if (clk == CLOCK_INT)
1290 find_best_clock(new_line.clock_rate, &port->clock_rate,
1291 &port->clock_reg);
1292 else {
1293 port->clock_rate = 0;
1294 port->clock_reg = CLK42X_SPEED_2048KHZ;
1295 }
f5b89e41
KH
1296 port->loopback = new_line.loopback;
1297
1298 spin_lock_irqsave(&npe_lock, flags);
1299
1300 if (dev->flags & IFF_UP)
1301 hss_config(port);
1302
1303 if (port->loopback || port->carrier)
1304 netif_carrier_on(port->netdev);
1305 else
1306 netif_carrier_off(port->netdev);
1307 spin_unlock_irqrestore(&npe_lock, flags);
1308
1309 return 0;
1310
1311 default:
1312 return hdlc_ioctl(dev, ifr, cmd);
1313 }
1314}
1315
1316/*****************************************************************************
1317 * initialization
1318 ****************************************************************************/
1319
991990a1
KH
1320static const struct net_device_ops hss_hdlc_ops = {
1321 .ndo_open = hss_hdlc_open,
1322 .ndo_stop = hss_hdlc_close,
1323 .ndo_change_mtu = hdlc_change_mtu,
1324 .ndo_start_xmit = hdlc_start_xmit,
1325 .ndo_do_ioctl = hss_hdlc_ioctl,
1326};
1327
f5b89e41
KH
1328static int __devinit hss_init_one(struct platform_device *pdev)
1329{
1330 struct port *port;
1331 struct net_device *dev;
1332 hdlc_device *hdlc;
1333 int err;
1334
1335 if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
1336 return -ENOMEM;
1337
1338 if ((port->npe = npe_request(0)) == NULL) {
3ba8c792 1339 err = -ENODEV;
f5b89e41
KH
1340 goto err_free;
1341 }
1342
1343 if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
1344 err = -ENOMEM;
1345 goto err_plat;
1346 }
1347
1348 SET_NETDEV_DEV(dev, &pdev->dev);
1349 hdlc = dev_to_hdlc(dev);
1350 hdlc->attach = hss_hdlc_attach;
1351 hdlc->xmit = hss_hdlc_xmit;
991990a1 1352 dev->netdev_ops = &hss_hdlc_ops;
f5b89e41
KH
1353 dev->tx_queue_len = 100;
1354 port->clock_type = CLOCK_EXT;
5dbc4650
KH
1355 port->clock_rate = 0;
1356 port->clock_reg = CLK42X_SPEED_2048KHZ;
f5b89e41
KH
1357 port->id = pdev->id;
1358 port->dev = &pdev->dev;
1359 port->plat = pdev->dev.platform_data;
1360 netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
1361
1362 if ((err = register_hdlc_device(dev)))
1363 goto err_free_netdev;
1364
1365 platform_set_drvdata(pdev, port);
1366
1367 printk(KERN_INFO "%s: HSS-%i\n", dev->name, port->id);
1368 return 0;
1369
1370err_free_netdev:
1371 free_netdev(dev);
1372err_plat:
1373 npe_release(port->npe);
1374err_free:
1375 kfree(port);
1376 return err;
1377}
1378
1379static int __devexit hss_remove_one(struct platform_device *pdev)
1380{
1381 struct port *port = platform_get_drvdata(pdev);
1382
1383 unregister_hdlc_device(port->netdev);
1384 free_netdev(port->netdev);
1385 npe_release(port->npe);
1386 platform_set_drvdata(pdev, NULL);
1387 kfree(port);
1388 return 0;
1389}
1390
1391static struct platform_driver ixp4xx_hss_driver = {
1392 .driver.name = DRV_NAME,
1393 .probe = hss_init_one,
1394 .remove = hss_remove_one,
1395};
1396
1397static int __init hss_init_module(void)
1398{
1399 if ((ixp4xx_read_feature_bits() &
1400 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
1401 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
3ba8c792 1402 return -ENODEV;
f5b89e41
KH
1403
1404 spin_lock_init(&npe_lock);
1405
1406 return platform_driver_register(&ixp4xx_hss_driver);
1407}
1408
1409static void __exit hss_cleanup_module(void)
1410{
1411 platform_driver_unregister(&ixp4xx_hss_driver);
1412}
1413
1414MODULE_AUTHOR("Krzysztof Halasa");
1415MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
1416MODULE_LICENSE("GPL v2");
1417MODULE_ALIAS("platform:ixp4xx_hss");
1418module_init(hss_init_module);
1419module_exit(hss_cleanup_module);
This page took 0.28483 seconds and 5 git commands to generate.