2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/etherdevice.h>
19 #include <linux/module.h>
20 #include <net/cfg80211.h>
21 #include <net/rtnetlink.h>
22 #include <brcmu_utils.h>
23 #include <brcmu_wifi.h>
28 #include "fwil_types.h"
30 #include "wl_cfg80211.h"
34 MODULE_AUTHOR("Broadcom Corporation");
35 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
36 MODULE_LICENSE("Dual BSD/GPL");
38 #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
40 /* AMPDU rx reordering definitions */
41 #define BRCMF_RXREORDER_FLOWID_OFFSET 0
42 #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
43 #define BRCMF_RXREORDER_FLAGS_OFFSET 4
44 #define BRCMF_RXREORDER_CURIDX_OFFSET 6
45 #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
47 #define BRCMF_RXREORDER_DEL_FLOW 0x01
48 #define BRCMF_RXREORDER_FLUSH_ALL 0x02
49 #define BRCMF_RXREORDER_CURIDX_VALID 0x04
50 #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
51 #define BRCMF_RXREORDER_NEW_HOLE 0x10
55 module_param_named(debug
, brcmf_msg_level
, int, S_IRUSR
| S_IWUSR
);
56 MODULE_PARM_DESC(debug
, "level of debug output");
59 static int brcmf_p2p_enable
;
61 module_param_named(p2pon
, brcmf_p2p_enable
, int, 0);
62 MODULE_PARM_DESC(p2pon
, "enable p2p management functionality");
65 char *brcmf_ifname(struct brcmf_pub
*drvr
, int ifidx
)
67 if (ifidx
< 0 || ifidx
>= BRCMF_MAX_IFS
) {
68 brcmf_err("ifidx %d out of range\n", ifidx
);
72 if (drvr
->iflist
[ifidx
] == NULL
) {
73 brcmf_err("null i/f %d\n", ifidx
);
77 if (drvr
->iflist
[ifidx
]->ndev
)
78 return drvr
->iflist
[ifidx
]->ndev
->name
;
83 static void _brcmf_set_multicast_list(struct work_struct
*work
)
86 struct net_device
*ndev
;
87 struct netdev_hw_addr
*ha
;
94 ifp
= container_of(work
, struct brcmf_if
, multicast_work
);
96 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
100 /* Determine initial value of allmulti flag */
101 cmd_value
= (ndev
->flags
& IFF_ALLMULTI
) ? true : false;
103 /* Send down the multicast list first. */
104 cnt
= netdev_mc_count(ndev
);
105 buflen
= sizeof(cnt
) + (cnt
* ETH_ALEN
);
106 buf
= kmalloc(buflen
, GFP_ATOMIC
);
111 cnt_le
= cpu_to_le32(cnt
);
112 memcpy(bufp
, &cnt_le
, sizeof(cnt_le
));
113 bufp
+= sizeof(cnt_le
);
115 netdev_for_each_mc_addr(ha
, ndev
) {
118 memcpy(bufp
, ha
->addr
, ETH_ALEN
);
123 err
= brcmf_fil_iovar_data_set(ifp
, "mcast_list", buf
, buflen
);
125 brcmf_err("Setting mcast_list failed, %d\n", err
);
126 cmd_value
= cnt
? true : cmd_value
;
132 * Now send the allmulti setting. This is based on the setting in the
133 * net_device flags, but might be modified above to be turned on if we
134 * were trying to set some addresses and dongle rejected it...
136 err
= brcmf_fil_iovar_int_set(ifp
, "allmulti", cmd_value
);
138 brcmf_err("Setting allmulti failed, %d\n", err
);
140 /*Finally, pick up the PROMISC flag */
141 cmd_value
= (ndev
->flags
& IFF_PROMISC
) ? true : false;
142 err
= brcmf_fil_cmd_int_set(ifp
, BRCMF_C_SET_PROMISC
, cmd_value
);
144 brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
149 _brcmf_set_mac_address(struct work_struct
*work
)
151 struct brcmf_if
*ifp
;
154 ifp
= container_of(work
, struct brcmf_if
, setmacaddr_work
);
156 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
158 err
= brcmf_fil_iovar_data_set(ifp
, "cur_etheraddr", ifp
->mac_addr
,
161 brcmf_err("Setting cur_etheraddr failed, %d\n", err
);
163 brcmf_dbg(TRACE
, "MAC address updated to %pM\n",
165 memcpy(ifp
->ndev
->dev_addr
, ifp
->mac_addr
, ETH_ALEN
);
169 static int brcmf_netdev_set_mac_address(struct net_device
*ndev
, void *addr
)
171 struct brcmf_if
*ifp
= netdev_priv(ndev
);
172 struct sockaddr
*sa
= (struct sockaddr
*)addr
;
174 memcpy(&ifp
->mac_addr
, sa
->sa_data
, ETH_ALEN
);
175 schedule_work(&ifp
->setmacaddr_work
);
179 static void brcmf_netdev_set_multicast_list(struct net_device
*ndev
)
181 struct brcmf_if
*ifp
= netdev_priv(ndev
);
183 schedule_work(&ifp
->multicast_work
);
186 static netdev_tx_t
brcmf_netdev_start_xmit(struct sk_buff
*skb
,
187 struct net_device
*ndev
)
190 struct brcmf_if
*ifp
= netdev_priv(ndev
);
191 struct brcmf_pub
*drvr
= ifp
->drvr
;
194 brcmf_dbg(DATA
, "Enter, idx=%d\n", ifp
->bssidx
);
196 /* Can the device send data? */
197 if (drvr
->bus_if
->state
!= BRCMF_BUS_DATA
) {
198 brcmf_err("xmit rejected state=%d\n", drvr
->bus_if
->state
);
199 netif_stop_queue(ndev
);
205 if (!drvr
->iflist
[ifp
->bssidx
]) {
206 brcmf_err("bad ifidx %d\n", ifp
->bssidx
);
207 netif_stop_queue(ndev
);
213 /* Make sure there's enough room for any header */
214 if (skb_headroom(skb
) < drvr
->hdrlen
) {
215 struct sk_buff
*skb2
;
217 brcmf_dbg(INFO
, "%s: insufficient headroom\n",
218 brcmf_ifname(drvr
, ifp
->bssidx
));
219 drvr
->bus_if
->tx_realloc
++;
220 skb2
= skb_realloc_headroom(skb
, drvr
->hdrlen
);
224 brcmf_err("%s: skb_realloc_headroom failed\n",
225 brcmf_ifname(drvr
, ifp
->bssidx
));
231 /* validate length for ether packet */
232 if (skb
->len
< sizeof(*eh
)) {
238 ret
= brcmf_fws_process_skb(ifp
, skb
);
242 ifp
->stats
.tx_dropped
++;
244 ifp
->stats
.tx_packets
++;
245 ifp
->stats
.tx_bytes
+= skb
->len
;
248 /* Return ok: we always eat the packet */
252 void brcmf_txflowblock_if(struct brcmf_if
*ifp
,
253 enum brcmf_netif_stop_reason reason
, bool state
)
257 if (!ifp
|| !ifp
->ndev
)
260 brcmf_dbg(TRACE
, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
261 ifp
->bssidx
, ifp
->netif_stop
, reason
, state
);
263 spin_lock_irqsave(&ifp
->netif_stop_lock
, flags
);
265 if (!ifp
->netif_stop
)
266 netif_stop_queue(ifp
->ndev
);
267 ifp
->netif_stop
|= reason
;
269 ifp
->netif_stop
&= ~reason
;
270 if (!ifp
->netif_stop
)
271 netif_wake_queue(ifp
->ndev
);
273 spin_unlock_irqrestore(&ifp
->netif_stop_lock
, flags
);
276 void brcmf_txflowblock(struct device
*dev
, bool state
)
278 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
279 struct brcmf_pub
*drvr
= bus_if
->drvr
;
281 brcmf_dbg(TRACE
, "Enter\n");
283 brcmf_fws_bus_blocked(drvr
, state
);
286 static void brcmf_netif_rx(struct brcmf_if
*ifp
, struct sk_buff
*skb
)
288 skb
->dev
= ifp
->ndev
;
289 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
291 if (skb
->pkt_type
== PACKET_MULTICAST
)
292 ifp
->stats
.multicast
++;
294 /* Process special event packets */
295 brcmf_fweh_process_skb(ifp
->drvr
, skb
);
297 if (!(ifp
->ndev
->flags
& IFF_UP
)) {
298 brcmu_pkt_buf_free_skb(skb
);
302 ifp
->stats
.rx_bytes
+= skb
->len
;
303 ifp
->stats
.rx_packets
++;
305 brcmf_dbg(DATA
, "rx proto=0x%X\n", ntohs(skb
->protocol
));
309 /* If the receive is not processed inside an ISR,
310 * the softirqd must be woken explicitly to service
311 * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
316 static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder
*rfi
,
318 struct sk_buff_head
*skb_list
)
320 /* initialize return list */
321 __skb_queue_head_init(skb_list
);
323 if (rfi
->pend_pkts
== 0) {
324 brcmf_dbg(INFO
, "no packets in reorder queue\n");
329 if (rfi
->pktslots
[start
]) {
330 __skb_queue_tail(skb_list
, rfi
->pktslots
[start
]);
331 rfi
->pktslots
[start
] = NULL
;
334 if (start
> rfi
->max_idx
)
336 } while (start
!= end
);
337 rfi
->pend_pkts
-= skb_queue_len(skb_list
);
340 static void brcmf_rxreorder_process_info(struct brcmf_if
*ifp
, u8
*reorder_data
,
343 u8 flow_id
, max_idx
, cur_idx
, exp_idx
, end_idx
;
344 struct brcmf_ampdu_rx_reorder
*rfi
;
345 struct sk_buff_head reorder_list
;
346 struct sk_buff
*pnext
;
350 flow_id
= reorder_data
[BRCMF_RXREORDER_FLOWID_OFFSET
];
351 flags
= reorder_data
[BRCMF_RXREORDER_FLAGS_OFFSET
];
353 /* validate flags and flow id */
355 brcmf_err("invalid flags...so ignore this packet\n");
356 brcmf_netif_rx(ifp
, pkt
);
360 rfi
= ifp
->drvr
->reorder_flows
[flow_id
];
361 if (flags
& BRCMF_RXREORDER_DEL_FLOW
) {
362 brcmf_dbg(INFO
, "flow-%d: delete\n",
366 brcmf_dbg(INFO
, "received flags to cleanup, but no flow (%d) yet\n",
368 brcmf_netif_rx(ifp
, pkt
);
372 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
, rfi
->exp_idx
,
374 /* add the last packet */
375 __skb_queue_tail(&reorder_list
, pkt
);
377 ifp
->drvr
->reorder_flows
[flow_id
] = NULL
;
380 /* from here on we need a flow reorder instance */
382 buf_size
= sizeof(*rfi
);
383 max_idx
= reorder_data
[BRCMF_RXREORDER_MAXIDX_OFFSET
];
385 buf_size
+= (max_idx
+ 1) * sizeof(pkt
);
387 /* allocate space for flow reorder info */
388 brcmf_dbg(INFO
, "flow-%d: start, maxidx %d\n",
390 rfi
= kzalloc(buf_size
, GFP_ATOMIC
);
392 brcmf_err("failed to alloc buffer\n");
393 brcmf_netif_rx(ifp
, pkt
);
397 ifp
->drvr
->reorder_flows
[flow_id
] = rfi
;
398 rfi
->pktslots
= (struct sk_buff
**)(rfi
+1);
399 rfi
->max_idx
= max_idx
;
401 if (flags
& BRCMF_RXREORDER_NEW_HOLE
) {
402 if (rfi
->pend_pkts
) {
403 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
,
406 WARN_ON(rfi
->pend_pkts
);
408 __skb_queue_head_init(&reorder_list
);
410 rfi
->cur_idx
= reorder_data
[BRCMF_RXREORDER_CURIDX_OFFSET
];
411 rfi
->exp_idx
= reorder_data
[BRCMF_RXREORDER_EXPIDX_OFFSET
];
412 rfi
->max_idx
= reorder_data
[BRCMF_RXREORDER_MAXIDX_OFFSET
];
413 rfi
->pktslots
[rfi
->cur_idx
] = pkt
;
415 brcmf_dbg(DATA
, "flow-%d: new hole %d (%d), pending %d\n",
416 flow_id
, rfi
->cur_idx
, rfi
->exp_idx
, rfi
->pend_pkts
);
417 } else if (flags
& BRCMF_RXREORDER_CURIDX_VALID
) {
418 cur_idx
= reorder_data
[BRCMF_RXREORDER_CURIDX_OFFSET
];
419 exp_idx
= reorder_data
[BRCMF_RXREORDER_EXPIDX_OFFSET
];
421 if ((exp_idx
== rfi
->exp_idx
) && (cur_idx
!= rfi
->exp_idx
)) {
422 /* still in the current hole */
423 /* enqueue the current on the buffer chain */
424 if (rfi
->pktslots
[cur_idx
] != NULL
) {
425 brcmf_dbg(INFO
, "HOLE: ERROR buffer pending..free it\n");
426 brcmu_pkt_buf_free_skb(rfi
->pktslots
[cur_idx
]);
427 rfi
->pktslots
[cur_idx
] = NULL
;
429 rfi
->pktslots
[cur_idx
] = pkt
;
431 rfi
->cur_idx
= cur_idx
;
432 brcmf_dbg(DATA
, "flow-%d: store pkt %d (%d), pending %d\n",
433 flow_id
, cur_idx
, exp_idx
, rfi
->pend_pkts
);
435 /* can return now as there is no reorder
440 if (rfi
->exp_idx
== cur_idx
) {
441 if (rfi
->pktslots
[cur_idx
] != NULL
) {
442 brcmf_dbg(INFO
, "error buffer pending..free it\n");
443 brcmu_pkt_buf_free_skb(rfi
->pktslots
[cur_idx
]);
444 rfi
->pktslots
[cur_idx
] = NULL
;
446 rfi
->pktslots
[cur_idx
] = pkt
;
449 /* got the expected one. flush from current to expected
450 * and update expected
452 brcmf_dbg(DATA
, "flow-%d: expected %d (%d), pending %d\n",
453 flow_id
, cur_idx
, exp_idx
, rfi
->pend_pkts
);
455 rfi
->cur_idx
= cur_idx
;
456 rfi
->exp_idx
= exp_idx
;
458 brcmf_rxreorder_get_skb_list(rfi
, cur_idx
, exp_idx
,
460 brcmf_dbg(DATA
, "flow-%d: freeing buffers %d, pending %d\n",
461 flow_id
, skb_queue_len(&reorder_list
),
466 brcmf_dbg(DATA
, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
467 flow_id
, flags
, rfi
->cur_idx
, rfi
->exp_idx
,
469 if (flags
& BRCMF_RXREORDER_FLUSH_ALL
)
470 end_idx
= rfi
->exp_idx
;
474 /* flush pkts first */
475 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
, end_idx
,
478 if (exp_idx
== ((cur_idx
+ 1) % (rfi
->max_idx
+ 1))) {
479 __skb_queue_tail(&reorder_list
, pkt
);
481 rfi
->pktslots
[cur_idx
] = pkt
;
484 rfi
->exp_idx
= exp_idx
;
485 rfi
->cur_idx
= cur_idx
;
488 /* explicity window move updating the expected index */
489 exp_idx
= reorder_data
[BRCMF_RXREORDER_EXPIDX_OFFSET
];
491 brcmf_dbg(DATA
, "flow-%d (0x%x): change expected: %d -> %d\n",
492 flow_id
, flags
, rfi
->exp_idx
, exp_idx
);
493 if (flags
& BRCMF_RXREORDER_FLUSH_ALL
)
494 end_idx
= rfi
->exp_idx
;
498 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
, end_idx
,
500 __skb_queue_tail(&reorder_list
, pkt
);
501 /* set the new expected idx */
502 rfi
->exp_idx
= exp_idx
;
505 skb_queue_walk_safe(&reorder_list
, pkt
, pnext
) {
506 __skb_unlink(pkt
, &reorder_list
);
507 brcmf_netif_rx(ifp
, pkt
);
511 void brcmf_rx_frame(struct device
*dev
, struct sk_buff
*skb
)
513 struct brcmf_if
*ifp
;
514 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
515 struct brcmf_pub
*drvr
= bus_if
->drvr
;
516 struct brcmf_skb_reorder_data
*rd
;
520 brcmf_dbg(DATA
, "Enter: %s: rxp=%p\n", dev_name(dev
), skb
);
522 /* process and remove protocol-specific header */
523 ret
= brcmf_proto_hdrpull(drvr
, true, &ifidx
, skb
);
524 ifp
= drvr
->iflist
[ifidx
];
526 if (ret
|| !ifp
|| !ifp
->ndev
) {
527 if ((ret
!= -ENODATA
) && ifp
)
528 ifp
->stats
.rx_errors
++;
529 brcmu_pkt_buf_free_skb(skb
);
533 rd
= (struct brcmf_skb_reorder_data
*)skb
->cb
;
535 brcmf_rxreorder_process_info(ifp
, rd
->reorder
, skb
);
537 brcmf_netif_rx(ifp
, skb
);
540 void brcmf_txfinalize(struct brcmf_pub
*drvr
, struct sk_buff
*txp
,
543 struct brcmf_if
*ifp
;
549 res
= brcmf_proto_hdrpull(drvr
, false, &ifidx
, txp
);
551 ifp
= drvr
->iflist
[ifidx
];
556 eh
= (struct ethhdr
*)(txp
->data
);
557 type
= ntohs(eh
->h_proto
);
559 if (type
== ETH_P_PAE
) {
560 atomic_dec(&ifp
->pend_8021x_cnt
);
561 if (waitqueue_active(&ifp
->pend_8021x_wait
))
562 wake_up(&ifp
->pend_8021x_wait
);
566 ifp
->stats
.tx_errors
++;
568 brcmu_pkt_buf_free_skb(txp
);
571 void brcmf_txcomplete(struct device
*dev
, struct sk_buff
*txp
, bool success
)
573 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
574 struct brcmf_pub
*drvr
= bus_if
->drvr
;
576 /* await txstatus signal for firmware if active */
577 if (brcmf_fws_fc_active(drvr
->fws
)) {
579 brcmf_fws_bustxfail(drvr
->fws
, txp
);
581 brcmf_txfinalize(drvr
, txp
, success
);
585 static struct net_device_stats
*brcmf_netdev_get_stats(struct net_device
*ndev
)
587 struct brcmf_if
*ifp
= netdev_priv(ndev
);
589 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
595 * Set current toe component enables in toe_ol iovar,
596 * and set toe global enable iovar
598 static int brcmf_toe_set(struct brcmf_if
*ifp
, u32 toe_ol
)
602 err
= brcmf_fil_iovar_int_set(ifp
, "toe_ol", toe_ol
);
604 brcmf_err("Setting toe_ol failed, %d\n", err
);
608 err
= brcmf_fil_iovar_int_set(ifp
, "toe", (toe_ol
!= 0));
610 brcmf_err("Setting toe failed, %d\n", err
);
616 static void brcmf_ethtool_get_drvinfo(struct net_device
*ndev
,
617 struct ethtool_drvinfo
*info
)
619 struct brcmf_if
*ifp
= netdev_priv(ndev
);
620 struct brcmf_pub
*drvr
= ifp
->drvr
;
622 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
623 snprintf(info
->version
, sizeof(info
->version
), "%lu",
625 strlcpy(info
->bus_info
, dev_name(drvr
->bus_if
->dev
),
626 sizeof(info
->bus_info
));
629 static const struct ethtool_ops brcmf_ethtool_ops
= {
630 .get_drvinfo
= brcmf_ethtool_get_drvinfo
,
633 static int brcmf_ethtool(struct brcmf_if
*ifp
, void __user
*uaddr
)
635 struct brcmf_pub
*drvr
= ifp
->drvr
;
636 struct ethtool_drvinfo info
;
637 char drvname
[sizeof(info
.driver
)];
639 struct ethtool_value edata
;
640 u32 toe_cmpnt
, csum_dir
;
643 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
645 /* all ethtool calls start with a cmd word */
646 if (copy_from_user(&cmd
, uaddr
, sizeof(u32
)))
650 case ETHTOOL_GDRVINFO
:
651 /* Copy out any request driver name */
652 if (copy_from_user(&info
, uaddr
, sizeof(info
)))
654 strncpy(drvname
, info
.driver
, sizeof(info
.driver
));
655 drvname
[sizeof(info
.driver
) - 1] = '\0';
657 /* clear struct for return */
658 memset(&info
, 0, sizeof(info
));
661 /* if requested, identify ourselves */
662 if (strcmp(drvname
, "?dhd") == 0) {
663 sprintf(info
.driver
, "dhd");
664 strcpy(info
.version
, BRCMF_VERSION_STR
);
666 /* report dongle driver type */
668 sprintf(info
.driver
, "wl");
670 sprintf(info
.version
, "%lu", drvr
->drv_version
);
671 if (copy_to_user(uaddr
, &info
, sizeof(info
)))
673 brcmf_dbg(TRACE
, "given %*s, returning %s\n",
674 (int)sizeof(drvname
), drvname
, info
.driver
);
677 /* Get toe offload components from dongle */
678 case ETHTOOL_GRXCSUM
:
679 case ETHTOOL_GTXCSUM
:
680 ret
= brcmf_fil_iovar_int_get(ifp
, "toe_ol", &toe_cmpnt
);
685 (cmd
== ETHTOOL_GTXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
688 edata
.data
= (toe_cmpnt
& csum_dir
) ? 1 : 0;
690 if (copy_to_user(uaddr
, &edata
, sizeof(edata
)))
694 /* Set toe offload components in dongle */
695 case ETHTOOL_SRXCSUM
:
696 case ETHTOOL_STXCSUM
:
697 if (copy_from_user(&edata
, uaddr
, sizeof(edata
)))
700 /* Read the current settings, update and write back */
701 ret
= brcmf_fil_iovar_int_get(ifp
, "toe_ol", &toe_cmpnt
);
706 (cmd
== ETHTOOL_STXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
709 toe_cmpnt
|= csum_dir
;
711 toe_cmpnt
&= ~csum_dir
;
713 ret
= brcmf_toe_set(ifp
, toe_cmpnt
);
717 /* If setting TX checksum mode, tell Linux the new mode */
718 if (cmd
== ETHTOOL_STXCSUM
) {
720 ifp
->ndev
->features
|= NETIF_F_IP_CSUM
;
722 ifp
->ndev
->features
&= ~NETIF_F_IP_CSUM
;
734 static int brcmf_netdev_ioctl_entry(struct net_device
*ndev
, struct ifreq
*ifr
,
737 struct brcmf_if
*ifp
= netdev_priv(ndev
);
738 struct brcmf_pub
*drvr
= ifp
->drvr
;
740 brcmf_dbg(TRACE
, "Enter, idx=%d, cmd=0x%04x\n", ifp
->bssidx
, cmd
);
742 if (!drvr
->iflist
[ifp
->bssidx
])
745 if (cmd
== SIOCETHTOOL
)
746 return brcmf_ethtool(ifp
, ifr
->ifr_data
);
751 static int brcmf_netdev_stop(struct net_device
*ndev
)
753 struct brcmf_if
*ifp
= netdev_priv(ndev
);
755 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
757 brcmf_cfg80211_down(ndev
);
759 /* Set state and stop OS transmissions */
760 netif_stop_queue(ndev
);
765 static int brcmf_netdev_open(struct net_device
*ndev
)
767 struct brcmf_if
*ifp
= netdev_priv(ndev
);
768 struct brcmf_pub
*drvr
= ifp
->drvr
;
769 struct brcmf_bus
*bus_if
= drvr
->bus_if
;
772 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
774 /* If bus is not ready, can't continue */
775 if (bus_if
->state
!= BRCMF_BUS_DATA
) {
776 brcmf_err("failed bus is not ready\n");
780 atomic_set(&ifp
->pend_8021x_cnt
, 0);
782 /* Get current TOE mode from dongle */
783 if (brcmf_fil_iovar_int_get(ifp
, "toe_ol", &toe_ol
) >= 0
784 && (toe_ol
& TOE_TX_CSUM_OL
) != 0)
785 ndev
->features
|= NETIF_F_IP_CSUM
;
787 ndev
->features
&= ~NETIF_F_IP_CSUM
;
789 if (brcmf_cfg80211_up(ndev
)) {
790 brcmf_err("failed to bring up cfg80211\n");
794 /* Allow transmit calls */
795 netif_start_queue(ndev
);
799 static const struct net_device_ops brcmf_netdev_ops_pri
= {
800 .ndo_open
= brcmf_netdev_open
,
801 .ndo_stop
= brcmf_netdev_stop
,
802 .ndo_get_stats
= brcmf_netdev_get_stats
,
803 .ndo_do_ioctl
= brcmf_netdev_ioctl_entry
,
804 .ndo_start_xmit
= brcmf_netdev_start_xmit
,
805 .ndo_set_mac_address
= brcmf_netdev_set_mac_address
,
806 .ndo_set_rx_mode
= brcmf_netdev_set_multicast_list
809 int brcmf_net_attach(struct brcmf_if
*ifp
, bool rtnl_locked
)
811 struct brcmf_pub
*drvr
= ifp
->drvr
;
812 struct net_device
*ndev
;
815 brcmf_dbg(TRACE
, "Enter, idx=%d mac=%pM\n", ifp
->bssidx
,
819 /* set appropriate operations */
820 ndev
->netdev_ops
= &brcmf_netdev_ops_pri
;
822 ndev
->hard_header_len
+= drvr
->hdrlen
;
823 ndev
->ethtool_ops
= &brcmf_ethtool_ops
;
825 drvr
->rxsz
= ndev
->mtu
+ ndev
->hard_header_len
+
828 /* set the mac address */
829 memcpy(ndev
->dev_addr
, ifp
->mac_addr
, ETH_ALEN
);
831 INIT_WORK(&ifp
->setmacaddr_work
, _brcmf_set_mac_address
);
832 INIT_WORK(&ifp
->multicast_work
, _brcmf_set_multicast_list
);
835 err
= register_netdevice(ndev
);
837 err
= register_netdev(ndev
);
839 brcmf_err("couldn't register the net device\n");
843 brcmf_dbg(INFO
, "%s: Broadcom Dongle Host Driver\n", ndev
->name
);
845 ndev
->destructor
= free_netdev
;
849 drvr
->iflist
[ifp
->bssidx
] = NULL
;
850 ndev
->netdev_ops
= NULL
;
855 static int brcmf_net_p2p_open(struct net_device
*ndev
)
857 brcmf_dbg(TRACE
, "Enter\n");
859 return brcmf_cfg80211_up(ndev
);
862 static int brcmf_net_p2p_stop(struct net_device
*ndev
)
864 brcmf_dbg(TRACE
, "Enter\n");
866 return brcmf_cfg80211_down(ndev
);
869 static int brcmf_net_p2p_do_ioctl(struct net_device
*ndev
,
870 struct ifreq
*ifr
, int cmd
)
872 brcmf_dbg(TRACE
, "Enter\n");
876 static netdev_tx_t
brcmf_net_p2p_start_xmit(struct sk_buff
*skb
,
877 struct net_device
*ndev
)
880 dev_kfree_skb_any(skb
);
885 static const struct net_device_ops brcmf_netdev_ops_p2p
= {
886 .ndo_open
= brcmf_net_p2p_open
,
887 .ndo_stop
= brcmf_net_p2p_stop
,
888 .ndo_do_ioctl
= brcmf_net_p2p_do_ioctl
,
889 .ndo_start_xmit
= brcmf_net_p2p_start_xmit
892 static int brcmf_net_p2p_attach(struct brcmf_if
*ifp
)
894 struct net_device
*ndev
;
896 brcmf_dbg(TRACE
, "Enter, idx=%d mac=%pM\n", ifp
->bssidx
,
900 ndev
->netdev_ops
= &brcmf_netdev_ops_p2p
;
902 /* set the mac address */
903 memcpy(ndev
->dev_addr
, ifp
->mac_addr
, ETH_ALEN
);
905 if (register_netdev(ndev
) != 0) {
906 brcmf_err("couldn't register the p2p net device\n");
910 brcmf_dbg(INFO
, "%s: Broadcom Dongle Host Driver\n", ndev
->name
);
915 ifp
->drvr
->iflist
[ifp
->bssidx
] = NULL
;
916 ndev
->netdev_ops
= NULL
;
921 struct brcmf_if
*brcmf_add_if(struct brcmf_pub
*drvr
, s32 bssidx
, s32 ifidx
,
922 char *name
, u8
*mac_addr
)
924 struct brcmf_if
*ifp
;
925 struct net_device
*ndev
;
927 brcmf_dbg(TRACE
, "Enter, idx=%d, ifidx=%d\n", bssidx
, ifidx
);
929 ifp
= drvr
->iflist
[bssidx
];
931 * Delete the existing interface before overwriting it
932 * in case we missed the BRCMF_E_IF_DEL event.
935 brcmf_err("ERROR: netdev:%s already exists\n",
938 netif_stop_queue(ifp
->ndev
);
939 unregister_netdev(ifp
->ndev
);
940 free_netdev(ifp
->ndev
);
941 drvr
->iflist
[bssidx
] = NULL
;
943 brcmf_err("ignore IF event\n");
944 return ERR_PTR(-EINVAL
);
948 if (!brcmf_p2p_enable
&& bssidx
== 1) {
949 /* this is P2P_DEVICE interface */
950 brcmf_dbg(INFO
, "allocate non-netdev interface\n");
951 ifp
= kzalloc(sizeof(*ifp
), GFP_KERNEL
);
953 return ERR_PTR(-ENOMEM
);
955 brcmf_dbg(INFO
, "allocate netdev interface\n");
956 /* Allocate netdev, including space for private structure */
957 ndev
= alloc_netdev(sizeof(*ifp
), name
, ether_setup
);
959 return ERR_PTR(-ENOMEM
);
961 ifp
= netdev_priv(ndev
);
966 drvr
->iflist
[bssidx
] = ifp
;
968 ifp
->bssidx
= bssidx
;
970 init_waitqueue_head(&ifp
->pend_8021x_wait
);
971 spin_lock_init(&ifp
->netif_stop_lock
);
973 if (mac_addr
!= NULL
)
974 memcpy(ifp
->mac_addr
, mac_addr
, ETH_ALEN
);
976 brcmf_dbg(TRACE
, " ==== pid:%x, if:%s (%pM) created ===\n",
977 current
->pid
, name
, ifp
->mac_addr
);
982 void brcmf_del_if(struct brcmf_pub
*drvr
, s32 bssidx
)
984 struct brcmf_if
*ifp
;
986 ifp
= drvr
->iflist
[bssidx
];
987 drvr
->iflist
[bssidx
] = NULL
;
989 brcmf_err("Null interface, idx=%d\n", bssidx
);
992 brcmf_dbg(TRACE
, "Enter, idx=%d, ifidx=%d\n", bssidx
, ifp
->ifidx
);
995 if (ifp
->ndev
->netdev_ops
== &brcmf_netdev_ops_pri
) {
997 brcmf_netdev_stop(ifp
->ndev
);
1001 netif_stop_queue(ifp
->ndev
);
1004 if (ifp
->ndev
->netdev_ops
== &brcmf_netdev_ops_pri
) {
1005 cancel_work_sync(&ifp
->setmacaddr_work
);
1006 cancel_work_sync(&ifp
->multicast_work
);
1008 /* unregister will take care of freeing it */
1009 unregister_netdev(ifp
->ndev
);
1011 brcmf_cfg80211_detach(drvr
->config
);
1017 int brcmf_attach(struct device
*dev
)
1019 struct brcmf_pub
*drvr
= NULL
;
1022 brcmf_dbg(TRACE
, "Enter\n");
1024 /* Allocate primary brcmf_info */
1025 drvr
= kzalloc(sizeof(struct brcmf_pub
), GFP_ATOMIC
);
1029 mutex_init(&drvr
->proto_block
);
1031 /* Link to bus module */
1033 drvr
->bus_if
= dev_get_drvdata(dev
);
1034 drvr
->bus_if
->drvr
= drvr
;
1036 /* create device debugfs folder */
1037 brcmf_debugfs_attach(drvr
);
1039 /* Attach and link in the protocol */
1040 ret
= brcmf_proto_attach(drvr
);
1042 brcmf_err("brcmf_prot_attach failed\n");
1046 /* attach firmware event handler */
1047 brcmf_fweh_attach(drvr
);
1057 int brcmf_bus_start(struct device
*dev
)
1060 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1061 struct brcmf_pub
*drvr
= bus_if
->drvr
;
1062 struct brcmf_if
*ifp
;
1063 struct brcmf_if
*p2p_ifp
;
1065 brcmf_dbg(TRACE
, "\n");
1067 /* Bring up the bus */
1068 ret
= brcmf_bus_init(bus_if
);
1070 brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret
);
1074 /* add primary networking interface */
1075 ifp
= brcmf_add_if(drvr
, 0, 0, "wlan%d", NULL
);
1077 return PTR_ERR(ifp
);
1079 if (brcmf_p2p_enable
)
1080 p2p_ifp
= brcmf_add_if(drvr
, 1, 0, "p2p%d", NULL
);
1083 if (IS_ERR(p2p_ifp
))
1086 /* signal bus ready */
1087 bus_if
->state
= BRCMF_BUS_DATA
;
1089 /* Bus is ready, do any initialization */
1090 ret
= brcmf_c_preinit_dcmds(ifp
);
1094 ret
= brcmf_fws_init(drvr
);
1098 brcmf_fws_add_interface(ifp
);
1100 drvr
->config
= brcmf_cfg80211_attach(drvr
, bus_if
->dev
);
1101 if (drvr
->config
== NULL
) {
1106 ret
= brcmf_fweh_activate_events(ifp
);
1110 ret
= brcmf_net_attach(ifp
, false);
1113 brcmf_err("failed: %d\n", ret
);
1115 brcmf_cfg80211_detach(drvr
->config
);
1117 brcmf_fws_del_interface(ifp
);
1118 brcmf_fws_deinit(drvr
);
1120 if (drvr
->iflist
[0]) {
1121 free_netdev(ifp
->ndev
);
1122 drvr
->iflist
[0] = NULL
;
1125 free_netdev(p2p_ifp
->ndev
);
1126 drvr
->iflist
[1] = NULL
;
1130 if ((brcmf_p2p_enable
) && (p2p_ifp
))
1131 if (brcmf_net_p2p_attach(p2p_ifp
) < 0)
1132 brcmf_p2p_enable
= 0;
1137 void brcmf_bus_add_txhdrlen(struct device
*dev
, uint len
)
1139 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1140 struct brcmf_pub
*drvr
= bus_if
->drvr
;
1143 drvr
->hdrlen
+= len
;
1147 static void brcmf_bus_detach(struct brcmf_pub
*drvr
)
1149 brcmf_dbg(TRACE
, "Enter\n");
1152 /* Stop the bus module */
1153 brcmf_bus_stop(drvr
->bus_if
);
1157 void brcmf_dev_reset(struct device
*dev
)
1159 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1160 struct brcmf_pub
*drvr
= bus_if
->drvr
;
1165 if (drvr
->iflist
[0])
1166 brcmf_fil_cmd_int_set(drvr
->iflist
[0], BRCMF_C_TERMINATED
, 1);
1169 void brcmf_detach(struct device
*dev
)
1172 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1173 struct brcmf_pub
*drvr
= bus_if
->drvr
;
1175 brcmf_dbg(TRACE
, "Enter\n");
1180 /* stop firmware event handling */
1181 brcmf_fweh_detach(drvr
);
1183 /* make sure primary interface removed last */
1184 for (i
= BRCMF_MAX_IFS
-1; i
> -1; i
--)
1185 if (drvr
->iflist
[i
]) {
1186 brcmf_fws_del_interface(drvr
->iflist
[i
]);
1187 brcmf_del_if(drvr
, i
);
1190 brcmf_bus_detach(drvr
);
1193 brcmf_proto_detach(drvr
);
1195 brcmf_fws_deinit(drvr
);
1197 brcmf_debugfs_detach(drvr
);
1198 bus_if
->drvr
= NULL
;
1202 s32
brcmf_iovar_data_set(struct device
*dev
, char *name
, void *data
, u32 len
)
1204 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1205 struct brcmf_if
*ifp
= bus_if
->drvr
->iflist
[0];
1207 return brcmf_fil_iovar_data_set(ifp
, name
, data
, len
);
1210 static int brcmf_get_pend_8021x_cnt(struct brcmf_if
*ifp
)
1212 return atomic_read(&ifp
->pend_8021x_cnt
);
1215 int brcmf_netdev_wait_pend8021x(struct net_device
*ndev
)
1217 struct brcmf_if
*ifp
= netdev_priv(ndev
);
1220 err
= wait_event_timeout(ifp
->pend_8021x_wait
,
1221 !brcmf_get_pend_8021x_cnt(ifp
),
1222 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX
));
1230 * return chip id and rev of the device encoded in u32.
1232 u32
brcmf_get_chip_info(struct brcmf_if
*ifp
)
1234 struct brcmf_bus
*bus
= ifp
->drvr
->bus_if
;
1236 return bus
->chip
<< 4 | bus
->chiprev
;
1239 static void brcmf_driver_register(struct work_struct
*work
)
1241 #ifdef CONFIG_BRCMFMAC_SDIO
1242 brcmf_sdio_register();
1244 #ifdef CONFIG_BRCMFMAC_USB
1245 brcmf_usb_register();
1248 static DECLARE_WORK(brcmf_driver_work
, brcmf_driver_register
);
1250 static int __init
brcmfmac_module_init(void)
1252 brcmf_debugfs_init();
1253 #ifdef CONFIG_BRCMFMAC_SDIO
1256 if (!schedule_work(&brcmf_driver_work
))
1262 static void __exit
brcmfmac_module_exit(void)
1264 cancel_work_sync(&brcmf_driver_work
);
1266 #ifdef CONFIG_BRCMFMAC_SDIO
1269 #ifdef CONFIG_BRCMFMAC_USB
1272 brcmf_debugfs_exit();
1275 module_init(brcmfmac_module_init
);
1276 module_exit(brcmfmac_module_exit
);