Merge branch 'for-next' of git://github.com/rydberg/linux into next
[deliverable/linux.git] / drivers / net / ethernet / micrel / ks8851_mll.c
1 /**
2 * drivers/net/ethernet/micrel/ks8851_mll.c
3 * Copyright (c) 2009 Micrel Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19 /* Supports:
20 * KS8851 16bit MLL chip from Micrel Inc.
21 */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/cache.h>
32 #include <linux/crc32.h>
33 #include <linux/mii.h>
34 #include <linux/platform_device.h>
35 #include <linux/delay.h>
36 #include <linux/slab.h>
37 #include <linux/ks8851_mll.h>
38
39 #define DRV_NAME "ks8851_mll"
40
41 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
42 #define MAX_RECV_FRAMES 255
43 #define MAX_BUF_SIZE 2048
44 #define TX_BUF_SIZE 2000
45 #define RX_BUF_SIZE 2000
46
47 #define KS_CCR 0x08
48 #define CCR_EEPROM (1 << 9)
49 #define CCR_SPI (1 << 8)
50 #define CCR_8BIT (1 << 7)
51 #define CCR_16BIT (1 << 6)
52 #define CCR_32BIT (1 << 5)
53 #define CCR_SHARED (1 << 4)
54 #define CCR_32PIN (1 << 0)
55
56 /* MAC address registers */
57 #define KS_MARL 0x10
58 #define KS_MARM 0x12
59 #define KS_MARH 0x14
60
61 #define KS_OBCR 0x20
62 #define OBCR_ODS_16MA (1 << 6)
63
64 #define KS_EEPCR 0x22
65 #define EEPCR_EESA (1 << 4)
66 #define EEPCR_EESB (1 << 3)
67 #define EEPCR_EEDO (1 << 2)
68 #define EEPCR_EESCK (1 << 1)
69 #define EEPCR_EECS (1 << 0)
70
71 #define KS_MBIR 0x24
72 #define MBIR_TXMBF (1 << 12)
73 #define MBIR_TXMBFA (1 << 11)
74 #define MBIR_RXMBF (1 << 4)
75 #define MBIR_RXMBFA (1 << 3)
76
77 #define KS_GRR 0x26
78 #define GRR_QMU (1 << 1)
79 #define GRR_GSR (1 << 0)
80
81 #define KS_WFCR 0x2A
82 #define WFCR_MPRXE (1 << 7)
83 #define WFCR_WF3E (1 << 3)
84 #define WFCR_WF2E (1 << 2)
85 #define WFCR_WF1E (1 << 1)
86 #define WFCR_WF0E (1 << 0)
87
88 #define KS_WF0CRC0 0x30
89 #define KS_WF0CRC1 0x32
90 #define KS_WF0BM0 0x34
91 #define KS_WF0BM1 0x36
92 #define KS_WF0BM2 0x38
93 #define KS_WF0BM3 0x3A
94
95 #define KS_WF1CRC0 0x40
96 #define KS_WF1CRC1 0x42
97 #define KS_WF1BM0 0x44
98 #define KS_WF1BM1 0x46
99 #define KS_WF1BM2 0x48
100 #define KS_WF1BM3 0x4A
101
102 #define KS_WF2CRC0 0x50
103 #define KS_WF2CRC1 0x52
104 #define KS_WF2BM0 0x54
105 #define KS_WF2BM1 0x56
106 #define KS_WF2BM2 0x58
107 #define KS_WF2BM3 0x5A
108
109 #define KS_WF3CRC0 0x60
110 #define KS_WF3CRC1 0x62
111 #define KS_WF3BM0 0x64
112 #define KS_WF3BM1 0x66
113 #define KS_WF3BM2 0x68
114 #define KS_WF3BM3 0x6A
115
116 #define KS_TXCR 0x70
117 #define TXCR_TCGICMP (1 << 8)
118 #define TXCR_TCGUDP (1 << 7)
119 #define TXCR_TCGTCP (1 << 6)
120 #define TXCR_TCGIP (1 << 5)
121 #define TXCR_FTXQ (1 << 4)
122 #define TXCR_TXFCE (1 << 3)
123 #define TXCR_TXPE (1 << 2)
124 #define TXCR_TXCRC (1 << 1)
125 #define TXCR_TXE (1 << 0)
126
127 #define KS_TXSR 0x72
128 #define TXSR_TXLC (1 << 13)
129 #define TXSR_TXMC (1 << 12)
130 #define TXSR_TXFID_MASK (0x3f << 0)
131 #define TXSR_TXFID_SHIFT (0)
132 #define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
133
134
135 #define KS_RXCR1 0x74
136 #define RXCR1_FRXQ (1 << 15)
137 #define RXCR1_RXUDPFCC (1 << 14)
138 #define RXCR1_RXTCPFCC (1 << 13)
139 #define RXCR1_RXIPFCC (1 << 12)
140 #define RXCR1_RXPAFMA (1 << 11)
141 #define RXCR1_RXFCE (1 << 10)
142 #define RXCR1_RXEFE (1 << 9)
143 #define RXCR1_RXMAFMA (1 << 8)
144 #define RXCR1_RXBE (1 << 7)
145 #define RXCR1_RXME (1 << 6)
146 #define RXCR1_RXUE (1 << 5)
147 #define RXCR1_RXAE (1 << 4)
148 #define RXCR1_RXINVF (1 << 1)
149 #define RXCR1_RXE (1 << 0)
150 #define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
151 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
152
153 #define KS_RXCR2 0x76
154 #define RXCR2_SRDBL_MASK (0x7 << 5)
155 #define RXCR2_SRDBL_SHIFT (5)
156 #define RXCR2_SRDBL_4B (0x0 << 5)
157 #define RXCR2_SRDBL_8B (0x1 << 5)
158 #define RXCR2_SRDBL_16B (0x2 << 5)
159 #define RXCR2_SRDBL_32B (0x3 << 5)
160 /* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
161 #define RXCR2_IUFFP (1 << 4)
162 #define RXCR2_RXIUFCEZ (1 << 3)
163 #define RXCR2_UDPLFE (1 << 2)
164 #define RXCR2_RXICMPFCC (1 << 1)
165 #define RXCR2_RXSAF (1 << 0)
166
167 #define KS_TXMIR 0x78
168
169 #define KS_RXFHSR 0x7C
170 #define RXFSHR_RXFV (1 << 15)
171 #define RXFSHR_RXICMPFCS (1 << 13)
172 #define RXFSHR_RXIPFCS (1 << 12)
173 #define RXFSHR_RXTCPFCS (1 << 11)
174 #define RXFSHR_RXUDPFCS (1 << 10)
175 #define RXFSHR_RXBF (1 << 7)
176 #define RXFSHR_RXMF (1 << 6)
177 #define RXFSHR_RXUF (1 << 5)
178 #define RXFSHR_RXMR (1 << 4)
179 #define RXFSHR_RXFT (1 << 3)
180 #define RXFSHR_RXFTL (1 << 2)
181 #define RXFSHR_RXRF (1 << 1)
182 #define RXFSHR_RXCE (1 << 0)
183 #define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
184 RXFSHR_RXFTL | RXFSHR_RXMR |\
185 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
186 RXFSHR_RXTCPFCS)
187 #define KS_RXFHBCR 0x7E
188 #define RXFHBCR_CNT_MASK 0x0FFF
189
190 #define KS_TXQCR 0x80
191 #define TXQCR_AETFE (1 << 2)
192 #define TXQCR_TXQMAM (1 << 1)
193 #define TXQCR_METFE (1 << 0)
194
195 #define KS_RXQCR 0x82
196 #define RXQCR_RXDTTS (1 << 12)
197 #define RXQCR_RXDBCTS (1 << 11)
198 #define RXQCR_RXFCTS (1 << 10)
199 #define RXQCR_RXIPHTOE (1 << 9)
200 #define RXQCR_RXDTTE (1 << 7)
201 #define RXQCR_RXDBCTE (1 << 6)
202 #define RXQCR_RXFCTE (1 << 5)
203 #define RXQCR_ADRFE (1 << 4)
204 #define RXQCR_SDA (1 << 3)
205 #define RXQCR_RRXEF (1 << 0)
206 #define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
207
208 #define KS_TXFDPR 0x84
209 #define TXFDPR_TXFPAI (1 << 14)
210 #define TXFDPR_TXFP_MASK (0x7ff << 0)
211 #define TXFDPR_TXFP_SHIFT (0)
212
213 #define KS_RXFDPR 0x86
214 #define RXFDPR_RXFPAI (1 << 14)
215
216 #define KS_RXDTTR 0x8C
217 #define KS_RXDBCTR 0x8E
218
219 #define KS_IER 0x90
220 #define KS_ISR 0x92
221 #define IRQ_LCI (1 << 15)
222 #define IRQ_TXI (1 << 14)
223 #define IRQ_RXI (1 << 13)
224 #define IRQ_RXOI (1 << 11)
225 #define IRQ_TXPSI (1 << 9)
226 #define IRQ_RXPSI (1 << 8)
227 #define IRQ_TXSAI (1 << 6)
228 #define IRQ_RXWFDI (1 << 5)
229 #define IRQ_RXMPDI (1 << 4)
230 #define IRQ_LDI (1 << 3)
231 #define IRQ_EDI (1 << 2)
232 #define IRQ_SPIBEI (1 << 1)
233 #define IRQ_DEDI (1 << 0)
234
235 #define KS_RXFCTR 0x9C
236 #define RXFCTR_THRESHOLD_MASK 0x00FF
237
238 #define KS_RXFC 0x9D
239 #define RXFCTR_RXFC_MASK (0xff << 8)
240 #define RXFCTR_RXFC_SHIFT (8)
241 #define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
242 #define RXFCTR_RXFCT_MASK (0xff << 0)
243 #define RXFCTR_RXFCT_SHIFT (0)
244
245 #define KS_TXNTFSR 0x9E
246
247 #define KS_MAHTR0 0xA0
248 #define KS_MAHTR1 0xA2
249 #define KS_MAHTR2 0xA4
250 #define KS_MAHTR3 0xA6
251
252 #define KS_FCLWR 0xB0
253 #define KS_FCHWR 0xB2
254 #define KS_FCOWR 0xB4
255
256 #define KS_CIDER 0xC0
257 #define CIDER_ID 0x8870
258 #define CIDER_REV_MASK (0x7 << 1)
259 #define CIDER_REV_SHIFT (1)
260 #define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
261
262 #define KS_CGCR 0xC6
263 #define KS_IACR 0xC8
264 #define IACR_RDEN (1 << 12)
265 #define IACR_TSEL_MASK (0x3 << 10)
266 #define IACR_TSEL_SHIFT (10)
267 #define IACR_TSEL_MIB (0x3 << 10)
268 #define IACR_ADDR_MASK (0x1f << 0)
269 #define IACR_ADDR_SHIFT (0)
270
271 #define KS_IADLR 0xD0
272 #define KS_IAHDR 0xD2
273
274 #define KS_PMECR 0xD4
275 #define PMECR_PME_DELAY (1 << 14)
276 #define PMECR_PME_POL (1 << 12)
277 #define PMECR_WOL_WAKEUP (1 << 11)
278 #define PMECR_WOL_MAGICPKT (1 << 10)
279 #define PMECR_WOL_LINKUP (1 << 9)
280 #define PMECR_WOL_ENERGY (1 << 8)
281 #define PMECR_AUTO_WAKE_EN (1 << 7)
282 #define PMECR_WAKEUP_NORMAL (1 << 6)
283 #define PMECR_WKEVT_MASK (0xf << 2)
284 #define PMECR_WKEVT_SHIFT (2)
285 #define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
286 #define PMECR_WKEVT_ENERGY (0x1 << 2)
287 #define PMECR_WKEVT_LINK (0x2 << 2)
288 #define PMECR_WKEVT_MAGICPKT (0x4 << 2)
289 #define PMECR_WKEVT_FRAME (0x8 << 2)
290 #define PMECR_PM_MASK (0x3 << 0)
291 #define PMECR_PM_SHIFT (0)
292 #define PMECR_PM_NORMAL (0x0 << 0)
293 #define PMECR_PM_ENERGY (0x1 << 0)
294 #define PMECR_PM_SOFTDOWN (0x2 << 0)
295 #define PMECR_PM_POWERSAVE (0x3 << 0)
296
297 /* Standard MII PHY data */
298 #define KS_P1MBCR 0xE4
299 #define P1MBCR_FORCE_FDX (1 << 8)
300
301 #define KS_P1MBSR 0xE6
302 #define P1MBSR_AN_COMPLETE (1 << 5)
303 #define P1MBSR_AN_CAPABLE (1 << 3)
304 #define P1MBSR_LINK_UP (1 << 2)
305
306 #define KS_PHY1ILR 0xE8
307 #define KS_PHY1IHR 0xEA
308 #define KS_P1ANAR 0xEC
309 #define KS_P1ANLPR 0xEE
310
311 #define KS_P1SCLMD 0xF4
312 #define P1SCLMD_LEDOFF (1 << 15)
313 #define P1SCLMD_TXIDS (1 << 14)
314 #define P1SCLMD_RESTARTAN (1 << 13)
315 #define P1SCLMD_DISAUTOMDIX (1 << 10)
316 #define P1SCLMD_FORCEMDIX (1 << 9)
317 #define P1SCLMD_AUTONEGEN (1 << 7)
318 #define P1SCLMD_FORCE100 (1 << 6)
319 #define P1SCLMD_FORCEFDX (1 << 5)
320 #define P1SCLMD_ADV_FLOW (1 << 4)
321 #define P1SCLMD_ADV_100BT_FDX (1 << 3)
322 #define P1SCLMD_ADV_100BT_HDX (1 << 2)
323 #define P1SCLMD_ADV_10BT_FDX (1 << 1)
324 #define P1SCLMD_ADV_10BT_HDX (1 << 0)
325
326 #define KS_P1CR 0xF6
327 #define P1CR_HP_MDIX (1 << 15)
328 #define P1CR_REV_POL (1 << 13)
329 #define P1CR_OP_100M (1 << 10)
330 #define P1CR_OP_FDX (1 << 9)
331 #define P1CR_OP_MDI (1 << 7)
332 #define P1CR_AN_DONE (1 << 6)
333 #define P1CR_LINK_GOOD (1 << 5)
334 #define P1CR_PNTR_FLOW (1 << 4)
335 #define P1CR_PNTR_100BT_FDX (1 << 3)
336 #define P1CR_PNTR_100BT_HDX (1 << 2)
337 #define P1CR_PNTR_10BT_FDX (1 << 1)
338 #define P1CR_PNTR_10BT_HDX (1 << 0)
339
340 /* TX Frame control */
341
342 #define TXFR_TXIC (1 << 15)
343 #define TXFR_TXFID_MASK (0x3f << 0)
344 #define TXFR_TXFID_SHIFT (0)
345
346 #define KS_P1SR 0xF8
347 #define P1SR_HP_MDIX (1 << 15)
348 #define P1SR_REV_POL (1 << 13)
349 #define P1SR_OP_100M (1 << 10)
350 #define P1SR_OP_FDX (1 << 9)
351 #define P1SR_OP_MDI (1 << 7)
352 #define P1SR_AN_DONE (1 << 6)
353 #define P1SR_LINK_GOOD (1 << 5)
354 #define P1SR_PNTR_FLOW (1 << 4)
355 #define P1SR_PNTR_100BT_FDX (1 << 3)
356 #define P1SR_PNTR_100BT_HDX (1 << 2)
357 #define P1SR_PNTR_10BT_FDX (1 << 1)
358 #define P1SR_PNTR_10BT_HDX (1 << 0)
359
360 #define ENUM_BUS_NONE 0
361 #define ENUM_BUS_8BIT 1
362 #define ENUM_BUS_16BIT 2
363 #define ENUM_BUS_32BIT 3
364
365 #define MAX_MCAST_LST 32
366 #define HW_MCAST_SIZE 8
367
368 /**
369 * union ks_tx_hdr - tx header data
370 * @txb: The header as bytes
371 * @txw: The header as 16bit, little-endian words
372 *
373 * A dual representation of the tx header data to allow
374 * access to individual bytes, and to allow 16bit accesses
375 * with 16bit alignment.
376 */
377 union ks_tx_hdr {
378 u8 txb[4];
379 __le16 txw[2];
380 };
381
382 /**
383 * struct ks_net - KS8851 driver private data
384 * @net_device : The network device we're bound to
385 * @hw_addr : start address of data register.
386 * @hw_addr_cmd : start address of command register.
387 * @txh : temporaly buffer to save status/length.
388 * @lock : Lock to ensure that the device is not accessed when busy.
389 * @pdev : Pointer to platform device.
390 * @mii : The MII state information for the mii calls.
391 * @frame_head_info : frame header information for multi-pkt rx.
392 * @statelock : Lock on this structure for tx list.
393 * @msg_enable : The message flags controlling driver output (see ethtool).
394 * @frame_cnt : number of frames received.
395 * @bus_width : i/o bus width.
396 * @rc_rxqcr : Cached copy of KS_RXQCR.
397 * @rc_txcr : Cached copy of KS_TXCR.
398 * @rc_ier : Cached copy of KS_IER.
399 * @sharedbus : Multipex(addr and data bus) mode indicator.
400 * @cmd_reg_cache : command register cached.
401 * @cmd_reg_cache_int : command register cached. Used in the irq handler.
402 * @promiscuous : promiscuous mode indicator.
403 * @all_mcast : mutlicast indicator.
404 * @mcast_lst_size : size of multicast list.
405 * @mcast_lst : multicast list.
406 * @mcast_bits : multicast enabed.
407 * @mac_addr : MAC address assigned to this device.
408 * @fid : frame id.
409 * @extra_byte : number of extra byte prepended rx pkt.
410 * @enabled : indicator this device works.
411 *
412 * The @lock ensures that the chip is protected when certain operations are
413 * in progress. When the read or write packet transfer is in progress, most
414 * of the chip registers are not accessible until the transfer is finished and
415 * the DMA has been de-asserted.
416 *
417 * The @statelock is used to protect information in the structure which may
418 * need to be accessed via several sources, such as the network driver layer
419 * or one of the work queues.
420 *
421 */
422
423 /* Receive multiplex framer header info */
424 struct type_frame_head {
425 u16 sts; /* Frame status */
426 u16 len; /* Byte count */
427 };
428
429 struct ks_net {
430 struct net_device *netdev;
431 void __iomem *hw_addr;
432 void __iomem *hw_addr_cmd;
433 union ks_tx_hdr txh ____cacheline_aligned;
434 struct mutex lock; /* spinlock to be interrupt safe */
435 struct platform_device *pdev;
436 struct mii_if_info mii;
437 struct type_frame_head *frame_head_info;
438 spinlock_t statelock;
439 u32 msg_enable;
440 u32 frame_cnt;
441 int bus_width;
442
443 u16 rc_rxqcr;
444 u16 rc_txcr;
445 u16 rc_ier;
446 u16 sharedbus;
447 u16 cmd_reg_cache;
448 u16 cmd_reg_cache_int;
449 u16 promiscuous;
450 u16 all_mcast;
451 u16 mcast_lst_size;
452 u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
453 u8 mcast_bits[HW_MCAST_SIZE];
454 u8 mac_addr[6];
455 u8 fid;
456 u8 extra_byte;
457 u8 enabled;
458 };
459
460 static int msg_enable;
461
462 #define BE3 0x8000 /* Byte Enable 3 */
463 #define BE2 0x4000 /* Byte Enable 2 */
464 #define BE1 0x2000 /* Byte Enable 1 */
465 #define BE0 0x1000 /* Byte Enable 0 */
466
467 /* register read/write calls.
468 *
469 * All these calls issue transactions to access the chip's registers. They
470 * all require that the necessary lock is held to prevent accesses when the
471 * chip is busy transferring packet data (RX/TX FIFO accesses).
472 */
473
474 /**
475 * ks_rdreg8 - read 8 bit register from device
476 * @ks : The chip information
477 * @offset: The register address
478 *
479 * Read a 8bit register from the chip, returning the result
480 */
481 static u8 ks_rdreg8(struct ks_net *ks, int offset)
482 {
483 u16 data;
484 u8 shift_bit = offset & 0x03;
485 u8 shift_data = (offset & 1) << 3;
486 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
487 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
488 data = ioread16(ks->hw_addr);
489 return (u8)(data >> shift_data);
490 }
491
492 /**
493 * ks_rdreg16 - read 16 bit register from device
494 * @ks : The chip information
495 * @offset: The register address
496 *
497 * Read a 16bit register from the chip, returning the result
498 */
499
500 static u16 ks_rdreg16(struct ks_net *ks, int offset)
501 {
502 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
503 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
504 return ioread16(ks->hw_addr);
505 }
506
507 /**
508 * ks_wrreg8 - write 8bit register value to chip
509 * @ks: The chip information
510 * @offset: The register address
511 * @value: The value to write
512 *
513 */
514 static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
515 {
516 u8 shift_bit = (offset & 0x03);
517 u16 value_write = (u16)(value << ((offset & 1) << 3));
518 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
519 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
520 iowrite16(value_write, ks->hw_addr);
521 }
522
523 /**
524 * ks_wrreg16 - write 16bit register value to chip
525 * @ks: The chip information
526 * @offset: The register address
527 * @value: The value to write
528 *
529 */
530
531 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
532 {
533 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
534 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
535 iowrite16(value, ks->hw_addr);
536 }
537
538 /**
539 * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
540 * @ks: The chip state
541 * @wptr: buffer address to save data
542 * @len: length in byte to read
543 *
544 */
545 static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
546 {
547 len >>= 1;
548 while (len--)
549 *wptr++ = (u16)ioread16(ks->hw_addr);
550 }
551
552 /**
553 * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
554 * @ks: The chip information
555 * @wptr: buffer address
556 * @len: length in byte to write
557 *
558 */
559 static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
560 {
561 len >>= 1;
562 while (len--)
563 iowrite16(*wptr++, ks->hw_addr);
564 }
565
566 static void ks_disable_int(struct ks_net *ks)
567 {
568 ks_wrreg16(ks, KS_IER, 0x0000);
569 } /* ks_disable_int */
570
571 static void ks_enable_int(struct ks_net *ks)
572 {
573 ks_wrreg16(ks, KS_IER, ks->rc_ier);
574 } /* ks_enable_int */
575
576 /**
577 * ks_tx_fifo_space - return the available hardware buffer size.
578 * @ks: The chip information
579 *
580 */
581 static inline u16 ks_tx_fifo_space(struct ks_net *ks)
582 {
583 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
584 }
585
586 /**
587 * ks_save_cmd_reg - save the command register from the cache.
588 * @ks: The chip information
589 *
590 */
591 static inline void ks_save_cmd_reg(struct ks_net *ks)
592 {
593 /*ks8851 MLL has a bug to read back the command register.
594 * So rely on software to save the content of command register.
595 */
596 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
597 }
598
599 /**
600 * ks_restore_cmd_reg - restore the command register from the cache and
601 * write to hardware register.
602 * @ks: The chip information
603 *
604 */
605 static inline void ks_restore_cmd_reg(struct ks_net *ks)
606 {
607 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
608 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
609 }
610
611 /**
612 * ks_set_powermode - set power mode of the device
613 * @ks: The chip information
614 * @pwrmode: The power mode value to write to KS_PMECR.
615 *
616 * Change the power mode of the chip.
617 */
618 static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
619 {
620 unsigned pmecr;
621
622 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
623
624 ks_rdreg16(ks, KS_GRR);
625 pmecr = ks_rdreg16(ks, KS_PMECR);
626 pmecr &= ~PMECR_PM_MASK;
627 pmecr |= pwrmode;
628
629 ks_wrreg16(ks, KS_PMECR, pmecr);
630 }
631
632 /**
633 * ks_read_config - read chip configuration of bus width.
634 * @ks: The chip information
635 *
636 */
637 static void ks_read_config(struct ks_net *ks)
638 {
639 u16 reg_data = 0;
640
641 /* Regardless of bus width, 8 bit read should always work.*/
642 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
643 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
644
645 /* addr/data bus are multiplexed */
646 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
647
648 /* There are garbage data when reading data from QMU,
649 depending on bus-width.
650 */
651
652 if (reg_data & CCR_8BIT) {
653 ks->bus_width = ENUM_BUS_8BIT;
654 ks->extra_byte = 1;
655 } else if (reg_data & CCR_16BIT) {
656 ks->bus_width = ENUM_BUS_16BIT;
657 ks->extra_byte = 2;
658 } else {
659 ks->bus_width = ENUM_BUS_32BIT;
660 ks->extra_byte = 4;
661 }
662 }
663
664 /**
665 * ks_soft_reset - issue one of the soft reset to the device
666 * @ks: The device state.
667 * @op: The bit(s) to set in the GRR
668 *
669 * Issue the relevant soft-reset command to the device's GRR register
670 * specified by @op.
671 *
672 * Note, the delays are in there as a caution to ensure that the reset
673 * has time to take effect and then complete. Since the datasheet does
674 * not currently specify the exact sequence, we have chosen something
675 * that seems to work with our device.
676 */
677 static void ks_soft_reset(struct ks_net *ks, unsigned op)
678 {
679 /* Disable interrupt first */
680 ks_wrreg16(ks, KS_IER, 0x0000);
681 ks_wrreg16(ks, KS_GRR, op);
682 mdelay(10); /* wait a short time to effect reset */
683 ks_wrreg16(ks, KS_GRR, 0);
684 mdelay(1); /* wait for condition to clear */
685 }
686
687
688 void ks_enable_qmu(struct ks_net *ks)
689 {
690 u16 w;
691
692 w = ks_rdreg16(ks, KS_TXCR);
693 /* Enables QMU Transmit (TXCR). */
694 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
695
696 /*
697 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
698 * Enable
699 */
700
701 w = ks_rdreg16(ks, KS_RXQCR);
702 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
703
704 /* Enables QMU Receive (RXCR1). */
705 w = ks_rdreg16(ks, KS_RXCR1);
706 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
707 ks->enabled = true;
708 } /* ks_enable_qmu */
709
710 static void ks_disable_qmu(struct ks_net *ks)
711 {
712 u16 w;
713
714 w = ks_rdreg16(ks, KS_TXCR);
715
716 /* Disables QMU Transmit (TXCR). */
717 w &= ~TXCR_TXE;
718 ks_wrreg16(ks, KS_TXCR, w);
719
720 /* Disables QMU Receive (RXCR1). */
721 w = ks_rdreg16(ks, KS_RXCR1);
722 w &= ~RXCR1_RXE ;
723 ks_wrreg16(ks, KS_RXCR1, w);
724
725 ks->enabled = false;
726
727 } /* ks_disable_qmu */
728
729 /**
730 * ks_read_qmu - read 1 pkt data from the QMU.
731 * @ks: The chip information
732 * @buf: buffer address to save 1 pkt
733 * @len: Pkt length
734 * Here is the sequence to read 1 pkt:
735 * 1. set sudo DMA mode
736 * 2. read prepend data
737 * 3. read pkt data
738 * 4. reset sudo DMA Mode
739 */
740 static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
741 {
742 u32 r = ks->extra_byte & 0x1 ;
743 u32 w = ks->extra_byte - r;
744
745 /* 1. set sudo DMA mode */
746 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
747 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
748
749 /* 2. read prepend data */
750 /**
751 * read 4 + extra bytes and discard them.
752 * extra bytes for dummy, 2 for status, 2 for len
753 */
754
755 /* use likely(r) for 8 bit access for performance */
756 if (unlikely(r))
757 ioread8(ks->hw_addr);
758 ks_inblk(ks, buf, w + 2 + 2);
759
760 /* 3. read pkt data */
761 ks_inblk(ks, buf, ALIGN(len, 4));
762
763 /* 4. reset sudo DMA Mode */
764 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
765 }
766
767 /**
768 * ks_rcv - read multiple pkts data from the QMU.
769 * @ks: The chip information
770 * @netdev: The network device being opened.
771 *
772 * Read all of header information before reading pkt content.
773 * It is not allowed only port of pkts in QMU after issuing
774 * interrupt ack.
775 */
776 static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
777 {
778 u32 i;
779 struct type_frame_head *frame_hdr = ks->frame_head_info;
780 struct sk_buff *skb;
781
782 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
783
784 /* read all header information */
785 for (i = 0; i < ks->frame_cnt; i++) {
786 /* Checking Received packet status */
787 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
788 /* Get packet len from hardware */
789 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
790 frame_hdr++;
791 }
792
793 frame_hdr = ks->frame_head_info;
794 while (ks->frame_cnt--) {
795 if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
796 frame_hdr->len >= RX_BUF_SIZE ||
797 frame_hdr->len <= 0)) {
798
799 /* discard an invalid packet */
800 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
801 netdev->stats.rx_dropped++;
802 if (!(frame_hdr->sts & RXFSHR_RXFV))
803 netdev->stats.rx_frame_errors++;
804 else
805 netdev->stats.rx_length_errors++;
806 frame_hdr++;
807 continue;
808 }
809
810 skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
811 if (likely(skb)) {
812 skb_reserve(skb, 2);
813 /* read data block including CRC 4 bytes */
814 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
815 skb_put(skb, frame_hdr->len - 4);
816 skb->protocol = eth_type_trans(skb, netdev);
817 netif_rx(skb);
818 /* exclude CRC size */
819 netdev->stats.rx_bytes += frame_hdr->len - 4;
820 netdev->stats.rx_packets++;
821 } else {
822 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
823 netdev->stats.rx_dropped++;
824 }
825 frame_hdr++;
826 }
827 }
828
829 /**
830 * ks_update_link_status - link status update.
831 * @netdev: The network device being opened.
832 * @ks: The chip information
833 *
834 */
835
836 static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
837 {
838 /* check the status of the link */
839 u32 link_up_status;
840 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
841 netif_carrier_on(netdev);
842 link_up_status = true;
843 } else {
844 netif_carrier_off(netdev);
845 link_up_status = false;
846 }
847 netif_dbg(ks, link, ks->netdev,
848 "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
849 }
850
851 /**
852 * ks_irq - device interrupt handler
853 * @irq: Interrupt number passed from the IRQ handler.
854 * @pw: The private word passed to register_irq(), our struct ks_net.
855 *
856 * This is the handler invoked to find out what happened
857 *
858 * Read the interrupt status, work out what needs to be done and then clear
859 * any of the interrupts that are not needed.
860 */
861
862 static irqreturn_t ks_irq(int irq, void *pw)
863 {
864 struct net_device *netdev = pw;
865 struct ks_net *ks = netdev_priv(netdev);
866 u16 status;
867
868 /*this should be the first in IRQ handler */
869 ks_save_cmd_reg(ks);
870
871 status = ks_rdreg16(ks, KS_ISR);
872 if (unlikely(!status)) {
873 ks_restore_cmd_reg(ks);
874 return IRQ_NONE;
875 }
876
877 ks_wrreg16(ks, KS_ISR, status);
878
879 if (likely(status & IRQ_RXI))
880 ks_rcv(ks, netdev);
881
882 if (unlikely(status & IRQ_LCI))
883 ks_update_link_status(netdev, ks);
884
885 if (unlikely(status & IRQ_TXI))
886 netif_wake_queue(netdev);
887
888 if (unlikely(status & IRQ_LDI)) {
889
890 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
891 pmecr &= ~PMECR_WKEVT_MASK;
892 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
893 }
894
895 if (unlikely(status & IRQ_RXOI))
896 ks->netdev->stats.rx_over_errors++;
897 /* this should be the last in IRQ handler*/
898 ks_restore_cmd_reg(ks);
899 return IRQ_HANDLED;
900 }
901
902
903 /**
904 * ks_net_open - open network device
905 * @netdev: The network device being opened.
906 *
907 * Called when the network device is marked active, such as a user executing
908 * 'ifconfig up' on the device.
909 */
910 static int ks_net_open(struct net_device *netdev)
911 {
912 struct ks_net *ks = netdev_priv(netdev);
913 int err;
914
915 #define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
916 /* lock the card, even if we may not actually do anything
917 * else at the moment.
918 */
919
920 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
921
922 /* reset the HW */
923 err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
924
925 if (err) {
926 pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
927 return err;
928 }
929
930 /* wake up powermode to normal mode */
931 ks_set_powermode(ks, PMECR_PM_NORMAL);
932 mdelay(1); /* wait for normal mode to take effect */
933
934 ks_wrreg16(ks, KS_ISR, 0xffff);
935 ks_enable_int(ks);
936 ks_enable_qmu(ks);
937 netif_start_queue(ks->netdev);
938
939 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
940
941 return 0;
942 }
943
944 /**
945 * ks_net_stop - close network device
946 * @netdev: The device being closed.
947 *
948 * Called to close down a network device which has been active. Cancell any
949 * work, shutdown the RX and TX process and then place the chip into a low
950 * power state whilst it is not being used.
951 */
952 static int ks_net_stop(struct net_device *netdev)
953 {
954 struct ks_net *ks = netdev_priv(netdev);
955
956 netif_info(ks, ifdown, netdev, "shutting down\n");
957
958 netif_stop_queue(netdev);
959
960 mutex_lock(&ks->lock);
961
962 /* turn off the IRQs and ack any outstanding */
963 ks_wrreg16(ks, KS_IER, 0x0000);
964 ks_wrreg16(ks, KS_ISR, 0xffff);
965
966 /* shutdown RX/TX QMU */
967 ks_disable_qmu(ks);
968
969 /* set powermode to soft power down to save power */
970 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
971 free_irq(netdev->irq, netdev);
972 mutex_unlock(&ks->lock);
973 return 0;
974 }
975
976
977 /**
978 * ks_write_qmu - write 1 pkt data to the QMU.
979 * @ks: The chip information
980 * @pdata: buffer address to save 1 pkt
981 * @len: Pkt length in byte
982 * Here is the sequence to write 1 pkt:
983 * 1. set sudo DMA mode
984 * 2. write status/length
985 * 3. write pkt data
986 * 4. reset sudo DMA Mode
987 * 5. reset sudo DMA mode
988 * 6. Wait until pkt is out
989 */
990 static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
991 {
992 /* start header at txb[0] to align txw entries */
993 ks->txh.txw[0] = 0;
994 ks->txh.txw[1] = cpu_to_le16(len);
995
996 /* 1. set sudo-DMA mode */
997 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
998 /* 2. write status/lenth info */
999 ks_outblk(ks, ks->txh.txw, 4);
1000 /* 3. write pkt data */
1001 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
1002 /* 4. reset sudo-DMA mode */
1003 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
1004 /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
1005 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
1006 /* 6. wait until TXQCR_METFE is auto-cleared */
1007 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
1008 ;
1009 }
1010
1011 /**
1012 * ks_start_xmit - transmit packet
1013 * @skb : The buffer to transmit
1014 * @netdev : The device used to transmit the packet.
1015 *
1016 * Called by the network layer to transmit the @skb.
1017 * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
1018 * So while tx is in-progress, prevent IRQ interrupt from happenning.
1019 */
1020 static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1021 {
1022 int retv = NETDEV_TX_OK;
1023 struct ks_net *ks = netdev_priv(netdev);
1024
1025 disable_irq(netdev->irq);
1026 ks_disable_int(ks);
1027 spin_lock(&ks->statelock);
1028
1029 /* Extra space are required:
1030 * 4 byte for alignment, 4 for status/length, 4 for CRC
1031 */
1032
1033 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
1034 ks_write_qmu(ks, skb->data, skb->len);
1035 /* add tx statistics */
1036 netdev->stats.tx_bytes += skb->len;
1037 netdev->stats.tx_packets++;
1038 dev_kfree_skb(skb);
1039 } else
1040 retv = NETDEV_TX_BUSY;
1041 spin_unlock(&ks->statelock);
1042 ks_enable_int(ks);
1043 enable_irq(netdev->irq);
1044 return retv;
1045 }
1046
1047 /**
1048 * ks_start_rx - ready to serve pkts
1049 * @ks : The chip information
1050 *
1051 */
1052 static void ks_start_rx(struct ks_net *ks)
1053 {
1054 u16 cntl;
1055
1056 /* Enables QMU Receive (RXCR1). */
1057 cntl = ks_rdreg16(ks, KS_RXCR1);
1058 cntl |= RXCR1_RXE ;
1059 ks_wrreg16(ks, KS_RXCR1, cntl);
1060 } /* ks_start_rx */
1061
1062 /**
1063 * ks_stop_rx - stop to serve pkts
1064 * @ks : The chip information
1065 *
1066 */
1067 static void ks_stop_rx(struct ks_net *ks)
1068 {
1069 u16 cntl;
1070
1071 /* Disables QMU Receive (RXCR1). */
1072 cntl = ks_rdreg16(ks, KS_RXCR1);
1073 cntl &= ~RXCR1_RXE ;
1074 ks_wrreg16(ks, KS_RXCR1, cntl);
1075
1076 } /* ks_stop_rx */
1077
1078 static unsigned long const ethernet_polynomial = 0x04c11db7U;
1079
1080 static unsigned long ether_gen_crc(int length, u8 *data)
1081 {
1082 long crc = -1;
1083 while (--length >= 0) {
1084 u8 current_octet = *data++;
1085 int bit;
1086
1087 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1088 crc = (crc << 1) ^
1089 ((crc < 0) ^ (current_octet & 1) ?
1090 ethernet_polynomial : 0);
1091 }
1092 }
1093 return (unsigned long)crc;
1094 } /* ether_gen_crc */
1095
1096 /**
1097 * ks_set_grpaddr - set multicast information
1098 * @ks : The chip information
1099 */
1100
1101 static void ks_set_grpaddr(struct ks_net *ks)
1102 {
1103 u8 i;
1104 u32 index, position, value;
1105
1106 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1107
1108 for (i = 0; i < ks->mcast_lst_size; i++) {
1109 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1110 index = position >> 3;
1111 value = 1 << (position & 7);
1112 ks->mcast_bits[index] |= (u8)value;
1113 }
1114
1115 for (i = 0; i < HW_MCAST_SIZE; i++) {
1116 if (i & 1) {
1117 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1118 (ks->mcast_bits[i] << 8) |
1119 ks->mcast_bits[i - 1]);
1120 }
1121 }
1122 } /* ks_set_grpaddr */
1123
1124 /**
1125 * ks_clear_mcast - clear multicast information
1126 *
1127 * @ks : The chip information
1128 * This routine removes all mcast addresses set in the hardware.
1129 */
1130
1131 static void ks_clear_mcast(struct ks_net *ks)
1132 {
1133 u16 i, mcast_size;
1134 for (i = 0; i < HW_MCAST_SIZE; i++)
1135 ks->mcast_bits[i] = 0;
1136
1137 mcast_size = HW_MCAST_SIZE >> 2;
1138 for (i = 0; i < mcast_size; i++)
1139 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1140 }
1141
1142 static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1143 {
1144 u16 cntl;
1145 ks->promiscuous = promiscuous_mode;
1146 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1147 cntl = ks_rdreg16(ks, KS_RXCR1);
1148
1149 cntl &= ~RXCR1_FILTER_MASK;
1150 if (promiscuous_mode)
1151 /* Enable Promiscuous mode */
1152 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1153 else
1154 /* Disable Promiscuous mode (default normal mode) */
1155 cntl |= RXCR1_RXPAFMA;
1156
1157 ks_wrreg16(ks, KS_RXCR1, cntl);
1158
1159 if (ks->enabled)
1160 ks_start_rx(ks);
1161
1162 } /* ks_set_promis */
1163
1164 static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1165 {
1166 u16 cntl;
1167
1168 ks->all_mcast = mcast;
1169 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1170 cntl = ks_rdreg16(ks, KS_RXCR1);
1171 cntl &= ~RXCR1_FILTER_MASK;
1172 if (mcast)
1173 /* Enable "Perfect with Multicast address passed mode" */
1174 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1175 else
1176 /**
1177 * Disable "Perfect with Multicast address passed
1178 * mode" (normal mode).
1179 */
1180 cntl |= RXCR1_RXPAFMA;
1181
1182 ks_wrreg16(ks, KS_RXCR1, cntl);
1183
1184 if (ks->enabled)
1185 ks_start_rx(ks);
1186 } /* ks_set_mcast */
1187
1188 static void ks_set_rx_mode(struct net_device *netdev)
1189 {
1190 struct ks_net *ks = netdev_priv(netdev);
1191 struct netdev_hw_addr *ha;
1192
1193 /* Turn on/off promiscuous mode. */
1194 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1195 ks_set_promis(ks,
1196 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1197 /* Turn on/off all mcast mode. */
1198 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1199 ks_set_mcast(ks,
1200 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1201 else
1202 ks_set_promis(ks, false);
1203
1204 if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
1205 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
1206 int i = 0;
1207
1208 netdev_for_each_mc_addr(ha, netdev) {
1209 if (i >= MAX_MCAST_LST)
1210 break;
1211 memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
1212 }
1213 ks->mcast_lst_size = (u8)i;
1214 ks_set_grpaddr(ks);
1215 } else {
1216 /**
1217 * List too big to support so
1218 * turn on all mcast mode.
1219 */
1220 ks->mcast_lst_size = MAX_MCAST_LST;
1221 ks_set_mcast(ks, true);
1222 }
1223 } else {
1224 ks->mcast_lst_size = 0;
1225 ks_clear_mcast(ks);
1226 }
1227 } /* ks_set_rx_mode */
1228
1229 static void ks_set_mac(struct ks_net *ks, u8 *data)
1230 {
1231 u16 *pw = (u16 *)data;
1232 u16 w, u;
1233
1234 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1235
1236 u = *pw++;
1237 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1238 ks_wrreg16(ks, KS_MARH, w);
1239
1240 u = *pw++;
1241 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1242 ks_wrreg16(ks, KS_MARM, w);
1243
1244 u = *pw;
1245 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1246 ks_wrreg16(ks, KS_MARL, w);
1247
1248 memcpy(ks->mac_addr, data, 6);
1249
1250 if (ks->enabled)
1251 ks_start_rx(ks);
1252 }
1253
1254 static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1255 {
1256 struct ks_net *ks = netdev_priv(netdev);
1257 struct sockaddr *addr = paddr;
1258 u8 *da;
1259
1260 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1261
1262 da = (u8 *)netdev->dev_addr;
1263
1264 ks_set_mac(ks, da);
1265 return 0;
1266 }
1267
1268 static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1269 {
1270 struct ks_net *ks = netdev_priv(netdev);
1271
1272 if (!netif_running(netdev))
1273 return -EINVAL;
1274
1275 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1276 }
1277
1278 static const struct net_device_ops ks_netdev_ops = {
1279 .ndo_open = ks_net_open,
1280 .ndo_stop = ks_net_stop,
1281 .ndo_do_ioctl = ks_net_ioctl,
1282 .ndo_start_xmit = ks_start_xmit,
1283 .ndo_set_mac_address = ks_set_mac_address,
1284 .ndo_set_rx_mode = ks_set_rx_mode,
1285 .ndo_change_mtu = eth_change_mtu,
1286 .ndo_validate_addr = eth_validate_addr,
1287 };
1288
1289 /* ethtool support */
1290
1291 static void ks_get_drvinfo(struct net_device *netdev,
1292 struct ethtool_drvinfo *di)
1293 {
1294 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1295 strlcpy(di->version, "1.00", sizeof(di->version));
1296 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1297 sizeof(di->bus_info));
1298 }
1299
1300 static u32 ks_get_msglevel(struct net_device *netdev)
1301 {
1302 struct ks_net *ks = netdev_priv(netdev);
1303 return ks->msg_enable;
1304 }
1305
1306 static void ks_set_msglevel(struct net_device *netdev, u32 to)
1307 {
1308 struct ks_net *ks = netdev_priv(netdev);
1309 ks->msg_enable = to;
1310 }
1311
1312 static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1313 {
1314 struct ks_net *ks = netdev_priv(netdev);
1315 return mii_ethtool_gset(&ks->mii, cmd);
1316 }
1317
1318 static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1319 {
1320 struct ks_net *ks = netdev_priv(netdev);
1321 return mii_ethtool_sset(&ks->mii, cmd);
1322 }
1323
1324 static u32 ks_get_link(struct net_device *netdev)
1325 {
1326 struct ks_net *ks = netdev_priv(netdev);
1327 return mii_link_ok(&ks->mii);
1328 }
1329
1330 static int ks_nway_reset(struct net_device *netdev)
1331 {
1332 struct ks_net *ks = netdev_priv(netdev);
1333 return mii_nway_restart(&ks->mii);
1334 }
1335
1336 static const struct ethtool_ops ks_ethtool_ops = {
1337 .get_drvinfo = ks_get_drvinfo,
1338 .get_msglevel = ks_get_msglevel,
1339 .set_msglevel = ks_set_msglevel,
1340 .get_settings = ks_get_settings,
1341 .set_settings = ks_set_settings,
1342 .get_link = ks_get_link,
1343 .nway_reset = ks_nway_reset,
1344 };
1345
1346 /* MII interface controls */
1347
1348 /**
1349 * ks_phy_reg - convert MII register into a KS8851 register
1350 * @reg: MII register number.
1351 *
1352 * Return the KS8851 register number for the corresponding MII PHY register
1353 * if possible. Return zero if the MII register has no direct mapping to the
1354 * KS8851 register set.
1355 */
1356 static int ks_phy_reg(int reg)
1357 {
1358 switch (reg) {
1359 case MII_BMCR:
1360 return KS_P1MBCR;
1361 case MII_BMSR:
1362 return KS_P1MBSR;
1363 case MII_PHYSID1:
1364 return KS_PHY1ILR;
1365 case MII_PHYSID2:
1366 return KS_PHY1IHR;
1367 case MII_ADVERTISE:
1368 return KS_P1ANAR;
1369 case MII_LPA:
1370 return KS_P1ANLPR;
1371 }
1372
1373 return 0x0;
1374 }
1375
1376 /**
1377 * ks_phy_read - MII interface PHY register read.
1378 * @netdev: The network device the PHY is on.
1379 * @phy_addr: Address of PHY (ignored as we only have one)
1380 * @reg: The register to read.
1381 *
1382 * This call reads data from the PHY register specified in @reg. Since the
1383 * device does not support all the MII registers, the non-existent values
1384 * are always returned as zero.
1385 *
1386 * We return zero for unsupported registers as the MII code does not check
1387 * the value returned for any error status, and simply returns it to the
1388 * caller. The mii-tool that the driver was tested with takes any -ve error
1389 * as real PHY capabilities, thus displaying incorrect data to the user.
1390 */
1391 static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1392 {
1393 struct ks_net *ks = netdev_priv(netdev);
1394 int ksreg;
1395 int result;
1396
1397 ksreg = ks_phy_reg(reg);
1398 if (!ksreg)
1399 return 0x0; /* no error return allowed, so use zero */
1400
1401 mutex_lock(&ks->lock);
1402 result = ks_rdreg16(ks, ksreg);
1403 mutex_unlock(&ks->lock);
1404
1405 return result;
1406 }
1407
1408 static void ks_phy_write(struct net_device *netdev,
1409 int phy, int reg, int value)
1410 {
1411 struct ks_net *ks = netdev_priv(netdev);
1412 int ksreg;
1413
1414 ksreg = ks_phy_reg(reg);
1415 if (ksreg) {
1416 mutex_lock(&ks->lock);
1417 ks_wrreg16(ks, ksreg, value);
1418 mutex_unlock(&ks->lock);
1419 }
1420 }
1421
1422 /**
1423 * ks_read_selftest - read the selftest memory info.
1424 * @ks: The device state
1425 *
1426 * Read and check the TX/RX memory selftest information.
1427 */
1428 static int ks_read_selftest(struct ks_net *ks)
1429 {
1430 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1431 int ret = 0;
1432 unsigned rd;
1433
1434 rd = ks_rdreg16(ks, KS_MBIR);
1435
1436 if ((rd & both_done) != both_done) {
1437 netdev_warn(ks->netdev, "Memory selftest not finished\n");
1438 return 0;
1439 }
1440
1441 if (rd & MBIR_TXMBFA) {
1442 netdev_err(ks->netdev, "TX memory selftest fails\n");
1443 ret |= 1;
1444 }
1445
1446 if (rd & MBIR_RXMBFA) {
1447 netdev_err(ks->netdev, "RX memory selftest fails\n");
1448 ret |= 2;
1449 }
1450
1451 netdev_info(ks->netdev, "the selftest passes\n");
1452 return ret;
1453 }
1454
1455 static void ks_setup(struct ks_net *ks)
1456 {
1457 u16 w;
1458
1459 /**
1460 * Configure QMU Transmit
1461 */
1462
1463 /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1464 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1465
1466 /* Setup Receive Frame Data Pointer Auto-Increment */
1467 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1468
1469 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1470 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1471
1472 /* Setup RxQ Command Control (RXQCR) */
1473 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1474 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1475
1476 /**
1477 * set the force mode to half duplex, default is full duplex
1478 * because if the auto-negotiation fails, most switch uses
1479 * half-duplex.
1480 */
1481
1482 w = ks_rdreg16(ks, KS_P1MBCR);
1483 w &= ~P1MBCR_FORCE_FDX;
1484 ks_wrreg16(ks, KS_P1MBCR, w);
1485
1486 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1487 ks_wrreg16(ks, KS_TXCR, w);
1488
1489 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1490
1491 if (ks->promiscuous) /* bPromiscuous */
1492 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1493 else if (ks->all_mcast) /* Multicast address passed mode */
1494 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1495 else /* Normal mode */
1496 w |= RXCR1_RXPAFMA;
1497
1498 ks_wrreg16(ks, KS_RXCR1, w);
1499 } /*ks_setup */
1500
1501
1502 static void ks_setup_int(struct ks_net *ks)
1503 {
1504 ks->rc_ier = 0x00;
1505 /* Clear the interrupts status of the hardware. */
1506 ks_wrreg16(ks, KS_ISR, 0xffff);
1507
1508 /* Enables the interrupts of the hardware. */
1509 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1510 } /* ks_setup_int */
1511
1512 static int ks_hw_init(struct ks_net *ks)
1513 {
1514 #define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1515 ks->promiscuous = 0;
1516 ks->all_mcast = 0;
1517 ks->mcast_lst_size = 0;
1518
1519 ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
1520 if (!ks->frame_head_info)
1521 return false;
1522
1523 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1524 return true;
1525 }
1526
1527
1528 static int ks8851_probe(struct platform_device *pdev)
1529 {
1530 int err = -ENOMEM;
1531 struct resource *io_d, *io_c;
1532 struct net_device *netdev;
1533 struct ks_net *ks;
1534 u16 id, data;
1535 struct ks8851_mll_platform_data *pdata;
1536
1537 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1538 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1539
1540 if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1541 goto err_mem_region;
1542
1543 if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1544 goto err_mem_region1;
1545
1546 netdev = alloc_etherdev(sizeof(struct ks_net));
1547 if (!netdev)
1548 goto err_alloc_etherdev;
1549
1550 SET_NETDEV_DEV(netdev, &pdev->dev);
1551
1552 ks = netdev_priv(netdev);
1553 ks->netdev = netdev;
1554 ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1555
1556 if (!ks->hw_addr)
1557 goto err_ioremap;
1558
1559 ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1560 if (!ks->hw_addr_cmd)
1561 goto err_ioremap1;
1562
1563 netdev->irq = platform_get_irq(pdev, 0);
1564
1565 if ((int)netdev->irq < 0) {
1566 err = netdev->irq;
1567 goto err_get_irq;
1568 }
1569
1570 ks->pdev = pdev;
1571
1572 mutex_init(&ks->lock);
1573 spin_lock_init(&ks->statelock);
1574
1575 netdev->netdev_ops = &ks_netdev_ops;
1576 netdev->ethtool_ops = &ks_ethtool_ops;
1577
1578 /* setup mii state */
1579 ks->mii.dev = netdev;
1580 ks->mii.phy_id = 1,
1581 ks->mii.phy_id_mask = 1;
1582 ks->mii.reg_num_mask = 0xf;
1583 ks->mii.mdio_read = ks_phy_read;
1584 ks->mii.mdio_write = ks_phy_write;
1585
1586 netdev_info(netdev, "message enable is %d\n", msg_enable);
1587 /* set the default message enable */
1588 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1589 NETIF_MSG_PROBE |
1590 NETIF_MSG_LINK));
1591 ks_read_config(ks);
1592
1593 /* simple check for a valid chip being connected to the bus */
1594 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1595 netdev_err(netdev, "failed to read device ID\n");
1596 err = -ENODEV;
1597 goto err_register;
1598 }
1599
1600 if (ks_read_selftest(ks)) {
1601 netdev_err(netdev, "failed to read device ID\n");
1602 err = -ENODEV;
1603 goto err_register;
1604 }
1605
1606 err = register_netdev(netdev);
1607 if (err)
1608 goto err_register;
1609
1610 platform_set_drvdata(pdev, netdev);
1611
1612 ks_soft_reset(ks, GRR_GSR);
1613 ks_hw_init(ks);
1614 ks_disable_qmu(ks);
1615 ks_setup(ks);
1616 ks_setup_int(ks);
1617
1618 data = ks_rdreg16(ks, KS_OBCR);
1619 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1620
1621 /* overwriting the default MAC address */
1622 pdata = pdev->dev.platform_data;
1623 if (!pdata) {
1624 netdev_err(netdev, "No platform data\n");
1625 err = -ENODEV;
1626 goto err_pdata;
1627 }
1628 memcpy(ks->mac_addr, pdata->mac_addr, 6);
1629 if (!is_valid_ether_addr(ks->mac_addr)) {
1630 /* Use random MAC address if none passed */
1631 eth_random_addr(ks->mac_addr);
1632 netdev_info(netdev, "Using random mac address\n");
1633 }
1634 netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1635
1636 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1637
1638 ks_set_mac(ks, netdev->dev_addr);
1639
1640 id = ks_rdreg16(ks, KS_CIDER);
1641
1642 netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1643 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1644 return 0;
1645
1646 err_pdata:
1647 unregister_netdev(netdev);
1648 err_register:
1649 err_get_irq:
1650 iounmap(ks->hw_addr_cmd);
1651 err_ioremap1:
1652 iounmap(ks->hw_addr);
1653 err_ioremap:
1654 free_netdev(netdev);
1655 err_alloc_etherdev:
1656 release_mem_region(io_c->start, resource_size(io_c));
1657 err_mem_region1:
1658 release_mem_region(io_d->start, resource_size(io_d));
1659 err_mem_region:
1660 return err;
1661 }
1662
1663 static int ks8851_remove(struct platform_device *pdev)
1664 {
1665 struct net_device *netdev = platform_get_drvdata(pdev);
1666 struct ks_net *ks = netdev_priv(netdev);
1667 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1668
1669 kfree(ks->frame_head_info);
1670 unregister_netdev(netdev);
1671 iounmap(ks->hw_addr);
1672 free_netdev(netdev);
1673 release_mem_region(iomem->start, resource_size(iomem));
1674 platform_set_drvdata(pdev, NULL);
1675 return 0;
1676
1677 }
1678
1679 static struct platform_driver ks8851_platform_driver = {
1680 .driver = {
1681 .name = DRV_NAME,
1682 .owner = THIS_MODULE,
1683 },
1684 .probe = ks8851_probe,
1685 .remove = ks8851_remove,
1686 };
1687
1688 module_platform_driver(ks8851_platform_driver);
1689
1690 MODULE_DESCRIPTION("KS8851 MLL Network driver");
1691 MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1692 MODULE_LICENSE("GPL");
1693 module_param_named(message, msg_enable, int, 0);
1694 MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1695
This page took 0.092808 seconds and 5 git commands to generate.