2 * Copyright (C) 2002 Intersil Americas Inc.
3 * Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/netdevice.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
27 #include <linux/if_arp.h>
29 #include "prismcompat.h"
31 #include "islpci_mgt.h"
32 #include "isl_oid.h" /* additional types and defs for isl38xx fw */
33 #include "isl_ioctl.h"
35 #include <net/iw_handler.h>
37 /******************************************************************************
38 Global variable definition section
39 ******************************************************************************/
40 int pc_debug
= VERBOSE
;
41 module_param(pc_debug
, int, 0);
43 /******************************************************************************
44 Driver general functions
45 ******************************************************************************/
46 #if VERBOSE > SHOW_ERROR_MESSAGES
48 display_buffer(char *buffer
, int length
)
50 if ((pc_debug
& SHOW_BUFFER_CONTENTS
) == 0)
54 printk("[%02x]", *buffer
& 255);
63 /*****************************************************************************
64 Queue handling for management frames
65 ******************************************************************************/
68 * Helper function to create a PIMFOR management frame header.
71 pimfor_encode_header(int operation
, u32 oid
, u32 length
, pimfor_header_t
*h
)
73 h
->version
= PIMFOR_VERSION
;
74 h
->operation
= operation
;
75 h
->device_id
= PIMFOR_DEV_ID_MHLI_MIB
;
77 h
->oid
= cpu_to_be32(oid
);
78 h
->length
= cpu_to_be32(length
);
82 * Helper function to analyze a PIMFOR management frame header.
84 static pimfor_header_t
*
85 pimfor_decode_header(void *data
, int len
)
87 pimfor_header_t
*h
= data
;
89 while ((void *) h
< data
+ len
) {
90 if (h
->flags
& PIMFOR_FLAG_LITTLE_ENDIAN
) {
91 le32_to_cpus(&h
->oid
);
92 le32_to_cpus(&h
->length
);
94 be32_to_cpus(&h
->oid
);
95 be32_to_cpus(&h
->length
);
97 if (h
->oid
!= OID_INL_TUNNEL
)
105 * Fill the receive queue for management frames with fresh buffers.
108 islpci_mgmt_rx_fill(struct net_device
*ndev
)
110 islpci_private
*priv
= netdev_priv(ndev
);
111 isl38xx_control_block
*cb
= /* volatile not needed */
112 (isl38xx_control_block
*) priv
->control_block
;
113 u32 curr
= le32_to_cpu(cb
->driver_curr_frag
[ISL38XX_CB_RX_MGMTQ
]);
115 #if VERBOSE > SHOW_ERROR_MESSAGES
116 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgmt_rx_fill\n");
119 while (curr
- priv
->index_mgmt_rx
< ISL38XX_CB_MGMT_QSIZE
) {
120 u32 index
= curr
% ISL38XX_CB_MGMT_QSIZE
;
121 struct islpci_membuf
*buf
= &priv
->mgmt_rx
[index
];
122 isl38xx_fragment
*frag
= &cb
->rx_data_mgmt
[index
];
124 if (buf
->mem
== NULL
) {
125 buf
->mem
= kmalloc(MGMT_FRAME_SIZE
, GFP_ATOMIC
);
128 "Error allocating management frame.\n");
131 buf
->size
= MGMT_FRAME_SIZE
;
133 if (buf
->pci_addr
== 0) {
134 buf
->pci_addr
= pci_map_single(priv
->pdev
, buf
->mem
,
137 if (!buf
->pci_addr
) {
139 "Failed to make memory DMA'able.\n");
144 /* be safe: always reset control block information */
145 frag
->size
= cpu_to_le16(MGMT_FRAME_SIZE
);
147 frag
->address
= cpu_to_le32(buf
->pci_addr
);
150 /* The fragment address in the control block must have
151 * been written before announcing the frame buffer to
154 cb
->driver_curr_frag
[ISL38XX_CB_RX_MGMTQ
] = cpu_to_le32(curr
);
160 * Create and transmit a management frame using "operation" and "oid",
161 * with arguments data/length.
162 * We either return an error and free the frame, or we return 0 and
163 * islpci_mgt_cleanup_transmit() frees the frame in the tx-done
167 islpci_mgt_transmit(struct net_device
*ndev
, int operation
, unsigned long oid
,
168 void *data
, int length
)
170 islpci_private
*priv
= netdev_priv(ndev
);
171 isl38xx_control_block
*cb
=
172 (isl38xx_control_block
*) priv
->control_block
;
176 isl38xx_fragment
*frag
;
177 struct islpci_membuf buf
;
180 int frag_len
= length
+ PIMFOR_HEADER_SIZE
;
182 #if VERBOSE > SHOW_ERROR_MESSAGES
183 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_transmit\n");
186 if (frag_len
> MGMT_FRAME_SIZE
) {
187 printk(KERN_DEBUG
"%s: mgmt frame too large %d\n",
188 ndev
->name
, frag_len
);
193 p
= buf
.mem
= kmalloc(frag_len
, GFP_KERNEL
);
199 /* create the header directly in the fragment data area */
200 pimfor_encode_header(operation
, oid
, length
, (pimfor_header_t
*) p
);
201 p
+= PIMFOR_HEADER_SIZE
;
204 memcpy(p
, data
, length
);
206 memset(p
, 0, length
);
208 #if VERBOSE > SHOW_ERROR_MESSAGES
210 pimfor_header_t
*h
= buf
.mem
;
211 DEBUG(SHOW_PIMFOR_FRAMES
,
212 "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n",
213 h
->operation
, oid
, h
->device_id
, h
->flags
, length
);
215 /* display the buffer contents for debugging */
216 display_buffer((char *) h
, sizeof (pimfor_header_t
));
217 display_buffer(p
, length
);
222 buf
.pci_addr
= pci_map_single(priv
->pdev
, buf
.mem
, frag_len
,
225 printk(KERN_WARNING
"%s: cannot map PCI memory for mgmt\n",
230 /* Protect the control block modifications against interrupts. */
231 spin_lock_irqsave(&priv
->slock
, flags
);
232 curr_frag
= le32_to_cpu(cb
->driver_curr_frag
[ISL38XX_CB_TX_MGMTQ
]);
233 if (curr_frag
- priv
->index_mgmt_tx
>= ISL38XX_CB_MGMT_QSIZE
) {
234 printk(KERN_WARNING
"%s: mgmt tx queue is still full\n",
239 /* commit the frame to the tx device queue */
240 index
= curr_frag
% ISL38XX_CB_MGMT_QSIZE
;
241 priv
->mgmt_tx
[index
] = buf
;
242 frag
= &cb
->tx_data_mgmt
[index
];
243 frag
->size
= cpu_to_le16(frag_len
);
244 frag
->flags
= 0; /* for any other than the last fragment, set to 1 */
245 frag
->address
= cpu_to_le32(buf
.pci_addr
);
247 /* The fragment address in the control block must have
248 * been written before announcing the frame buffer to
251 cb
->driver_curr_frag
[ISL38XX_CB_TX_MGMTQ
] = cpu_to_le32(curr_frag
+ 1);
252 spin_unlock_irqrestore(&priv
->slock
, flags
);
254 /* trigger the device */
255 islpci_trigger(priv
);
259 spin_unlock_irqrestore(&priv
->slock
, flags
);
267 * Receive a management frame from the device.
268 * This can be an arbitrary number of traps, and at most one response
269 * frame for a previous request sent via islpci_mgt_transmit().
272 islpci_mgt_receive(struct net_device
*ndev
)
274 islpci_private
*priv
= netdev_priv(ndev
);
275 isl38xx_control_block
*cb
=
276 (isl38xx_control_block
*) priv
->control_block
;
279 #if VERBOSE > SHOW_ERROR_MESSAGES
280 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_receive\n");
283 /* Only once per interrupt, determine fragment range to
284 * process. This avoids an endless loop (i.e. lockup) if
285 * frames come in faster than we can process them. */
286 curr_frag
= le32_to_cpu(cb
->device_curr_frag
[ISL38XX_CB_RX_MGMTQ
]);
289 for (; priv
->index_mgmt_rx
< curr_frag
; priv
->index_mgmt_rx
++) {
290 pimfor_header_t
*header
;
291 u32 index
= priv
->index_mgmt_rx
% ISL38XX_CB_MGMT_QSIZE
;
292 struct islpci_membuf
*buf
= &priv
->mgmt_rx
[index
];
295 struct islpci_mgmtframe
*frame
;
297 /* I have no idea (and no documentation) if flags != 0
298 * is possible. Drop the frame, reuse the buffer. */
299 if (le16_to_cpu(cb
->rx_data_mgmt
[index
].flags
) != 0) {
300 printk(KERN_WARNING
"%s: unknown flags 0x%04x\n",
302 le16_to_cpu(cb
->rx_data_mgmt
[index
].flags
));
306 /* The device only returns the size of the header(s) here. */
307 frag_len
= le16_to_cpu(cb
->rx_data_mgmt
[index
].size
);
310 * We appear to have no way to tell the device the
311 * size of a receive buffer. Thus, if this check
312 * triggers, we likely have kernel heap corruption. */
313 if (frag_len
> MGMT_FRAME_SIZE
) {
315 "%s: Bogus packet size of %d (%#x).\n",
316 ndev
->name
, frag_len
, frag_len
);
317 frag_len
= MGMT_FRAME_SIZE
;
320 /* Ensure the results of device DMA are visible to the CPU. */
321 pci_dma_sync_single_for_cpu(priv
->pdev
, buf
->pci_addr
,
322 buf
->size
, PCI_DMA_FROMDEVICE
);
324 /* Perform endianess conversion for PIMFOR header in-place. */
325 header
= pimfor_decode_header(buf
->mem
, frag_len
);
327 printk(KERN_WARNING
"%s: no PIMFOR header found\n",
332 /* The device ID from the PIMFOR packet received from
333 * the MVC is always 0. We forward a sensible device_id.
334 * Not that anyone upstream would care... */
335 header
->device_id
= priv
->ndev
->ifindex
;
337 #if VERBOSE > SHOW_ERROR_MESSAGES
338 DEBUG(SHOW_PIMFOR_FRAMES
,
339 "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n",
340 header
->operation
, header
->oid
, header
->device_id
,
341 header
->flags
, header
->length
);
343 /* display the buffer contents for debugging */
344 display_buffer((char *) header
, PIMFOR_HEADER_SIZE
);
345 display_buffer((char *) header
+ PIMFOR_HEADER_SIZE
,
349 /* nobody sends these */
350 if (header
->flags
& PIMFOR_FLAG_APPLIC_ORIGIN
) {
352 "%s: errant PIMFOR application frame\n",
357 /* Determine frame size, skipping OID_INL_TUNNEL headers. */
358 size
= PIMFOR_HEADER_SIZE
+ header
->length
;
359 frame
= kmalloc(sizeof (struct islpci_mgmtframe
) + size
,
363 "%s: Out of memory, cannot handle oid 0x%08x\n",
364 ndev
->name
, header
->oid
);
368 memcpy(&frame
->buf
, header
, size
);
369 frame
->header
= (pimfor_header_t
*) frame
->buf
;
370 frame
->data
= frame
->buf
+ PIMFOR_HEADER_SIZE
;
372 #if VERBOSE > SHOW_ERROR_MESSAGES
373 DEBUG(SHOW_PIMFOR_FRAMES
,
374 "frame: header: %p, data: %p, size: %d\n",
375 frame
->header
, frame
->data
, size
);
378 if (header
->operation
== PIMFOR_OP_TRAP
) {
379 #if VERBOSE > SHOW_ERROR_MESSAGES
381 "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
382 header
->oid
, header
->device_id
, header
->flags
,
386 /* Create work to handle trap out of interrupt
388 INIT_WORK(&frame
->ws
, prism54_process_trap
);
389 schedule_work(&frame
->ws
);
392 /* Signal the one waiting process that a response
393 * has been received. */
394 if ((frame
= xchg(&priv
->mgmt_received
, frame
)) != NULL
) {
396 "%s: mgmt response not collected\n",
400 #if VERBOSE > SHOW_ERROR_MESSAGES
401 DEBUG(SHOW_TRACING
, "Wake up Mgmt Queue\n");
403 wake_up(&priv
->mgmt_wqueue
);
412 * Cleanup the transmit queue by freeing all frames handled by the device.
415 islpci_mgt_cleanup_transmit(struct net_device
*ndev
)
417 islpci_private
*priv
= netdev_priv(ndev
);
418 isl38xx_control_block
*cb
= /* volatile not needed */
419 (isl38xx_control_block
*) priv
->control_block
;
422 #if VERBOSE > SHOW_ERROR_MESSAGES
423 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_cleanup_transmit\n");
426 /* Only once per cleanup, determine fragment range to
427 * process. This avoids an endless loop (i.e. lockup) if
428 * the device became confused, incrementing device_curr_frag
430 curr_frag
= le32_to_cpu(cb
->device_curr_frag
[ISL38XX_CB_TX_MGMTQ
]);
433 for (; priv
->index_mgmt_tx
< curr_frag
; priv
->index_mgmt_tx
++) {
434 int index
= priv
->index_mgmt_tx
% ISL38XX_CB_MGMT_QSIZE
;
435 struct islpci_membuf
*buf
= &priv
->mgmt_tx
[index
];
436 pci_unmap_single(priv
->pdev
, buf
->pci_addr
, buf
->size
,
446 * Perform one request-response transaction to the device.
449 islpci_mgt_transaction(struct net_device
*ndev
,
450 int operation
, unsigned long oid
,
451 void *senddata
, int sendlen
,
452 struct islpci_mgmtframe
**recvframe
)
454 islpci_private
*priv
= netdev_priv(ndev
);
455 const long wait_cycle_jiffies
= msecs_to_jiffies(ISL38XX_WAIT_CYCLE
* 10);
456 long timeout_left
= ISL38XX_MAX_WAIT_CYCLES
* wait_cycle_jiffies
;
462 if (mutex_lock_interruptible(&priv
->mgmt_lock
))
465 prepare_to_wait(&priv
->mgmt_wqueue
, &wait
, TASK_UNINTERRUPTIBLE
);
466 err
= islpci_mgt_transmit(ndev
, operation
, oid
, senddata
, sendlen
);
471 while (timeout_left
> 0) {
473 struct islpci_mgmtframe
*frame
;
475 timeleft
= schedule_timeout_uninterruptible(wait_cycle_jiffies
);
476 frame
= xchg(&priv
->mgmt_received
, NULL
);
478 if (frame
->header
->oid
== oid
) {
484 "%s: expecting oid 0x%x, received 0x%x.\n",
485 ndev
->name
, (unsigned int) oid
,
493 "%s: timeout waiting for mgmt response %lu, "
494 "triggering device\n",
495 ndev
->name
, timeout_left
);
496 islpci_trigger(priv
);
498 timeout_left
+= timeleft
- wait_cycle_jiffies
;
500 printk(KERN_WARNING
"%s: timeout waiting for mgmt response\n",
503 /* TODO: we should reset the device here */
505 finish_wait(&priv
->mgmt_wqueue
, &wait
);
506 mutex_unlock(&priv
->mgmt_lock
);