2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/etherdevice.h>
19 #include <linux/module.h>
20 #include <net/cfg80211.h>
21 #include <net/rtnetlink.h>
22 #include <brcmu_utils.h>
23 #include <brcmu_wifi.h>
28 #include "fwil_types.h"
30 #include "wl_cfg80211.h"
37 MODULE_AUTHOR("Broadcom Corporation");
38 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
39 MODULE_LICENSE("Dual BSD/GPL");
41 #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
43 /* AMPDU rx reordering definitions */
44 #define BRCMF_RXREORDER_FLOWID_OFFSET 0
45 #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
46 #define BRCMF_RXREORDER_FLAGS_OFFSET 4
47 #define BRCMF_RXREORDER_CURIDX_OFFSET 6
48 #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
50 #define BRCMF_RXREORDER_DEL_FLOW 0x01
51 #define BRCMF_RXREORDER_FLUSH_ALL 0x02
52 #define BRCMF_RXREORDER_CURIDX_VALID 0x04
53 #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
54 #define BRCMF_RXREORDER_NEW_HOLE 0x10
58 module_param_named(debug
, brcmf_msg_level
, int, S_IRUSR
| S_IWUSR
);
59 MODULE_PARM_DESC(debug
, "level of debug output");
62 static int brcmf_p2p_enable
;
64 module_param_named(p2pon
, brcmf_p2p_enable
, int, 0);
65 MODULE_PARM_DESC(p2pon
, "enable p2p management functionality");
68 char *brcmf_ifname(struct brcmf_pub
*drvr
, int ifidx
)
70 if (ifidx
< 0 || ifidx
>= BRCMF_MAX_IFS
) {
71 brcmf_err("ifidx %d out of range\n", ifidx
);
75 if (drvr
->iflist
[ifidx
] == NULL
) {
76 brcmf_err("null i/f %d\n", ifidx
);
80 if (drvr
->iflist
[ifidx
]->ndev
)
81 return drvr
->iflist
[ifidx
]->ndev
->name
;
86 static void _brcmf_set_multicast_list(struct work_struct
*work
)
89 struct net_device
*ndev
;
90 struct netdev_hw_addr
*ha
;
97 ifp
= container_of(work
, struct brcmf_if
, multicast_work
);
99 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
103 /* Determine initial value of allmulti flag */
104 cmd_value
= (ndev
->flags
& IFF_ALLMULTI
) ? true : false;
106 /* Send down the multicast list first. */
107 cnt
= netdev_mc_count(ndev
);
108 buflen
= sizeof(cnt
) + (cnt
* ETH_ALEN
);
109 buf
= kmalloc(buflen
, GFP_ATOMIC
);
114 cnt_le
= cpu_to_le32(cnt
);
115 memcpy(bufp
, &cnt_le
, sizeof(cnt_le
));
116 bufp
+= sizeof(cnt_le
);
118 netdev_for_each_mc_addr(ha
, ndev
) {
121 memcpy(bufp
, ha
->addr
, ETH_ALEN
);
126 err
= brcmf_fil_iovar_data_set(ifp
, "mcast_list", buf
, buflen
);
128 brcmf_err("Setting mcast_list failed, %d\n", err
);
129 cmd_value
= cnt
? true : cmd_value
;
135 * Now send the allmulti setting. This is based on the setting in the
136 * net_device flags, but might be modified above to be turned on if we
137 * were trying to set some addresses and dongle rejected it...
139 err
= brcmf_fil_iovar_int_set(ifp
, "allmulti", cmd_value
);
141 brcmf_err("Setting allmulti failed, %d\n", err
);
143 /*Finally, pick up the PROMISC flag */
144 cmd_value
= (ndev
->flags
& IFF_PROMISC
) ? true : false;
145 err
= brcmf_fil_cmd_int_set(ifp
, BRCMF_C_SET_PROMISC
, cmd_value
);
147 brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
152 _brcmf_set_mac_address(struct work_struct
*work
)
154 struct brcmf_if
*ifp
;
157 ifp
= container_of(work
, struct brcmf_if
, setmacaddr_work
);
159 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
161 err
= brcmf_fil_iovar_data_set(ifp
, "cur_etheraddr", ifp
->mac_addr
,
164 brcmf_err("Setting cur_etheraddr failed, %d\n", err
);
166 brcmf_dbg(TRACE
, "MAC address updated to %pM\n",
168 memcpy(ifp
->ndev
->dev_addr
, ifp
->mac_addr
, ETH_ALEN
);
172 static int brcmf_netdev_set_mac_address(struct net_device
*ndev
, void *addr
)
174 struct brcmf_if
*ifp
= netdev_priv(ndev
);
175 struct sockaddr
*sa
= (struct sockaddr
*)addr
;
177 memcpy(&ifp
->mac_addr
, sa
->sa_data
, ETH_ALEN
);
178 schedule_work(&ifp
->setmacaddr_work
);
182 static void brcmf_netdev_set_multicast_list(struct net_device
*ndev
)
184 struct brcmf_if
*ifp
= netdev_priv(ndev
);
186 schedule_work(&ifp
->multicast_work
);
189 static netdev_tx_t
brcmf_netdev_start_xmit(struct sk_buff
*skb
,
190 struct net_device
*ndev
)
193 struct brcmf_if
*ifp
= netdev_priv(ndev
);
194 struct brcmf_pub
*drvr
= ifp
->drvr
;
195 struct ethhdr
*eh
= (struct ethhdr
*)(skb
->data
);
197 brcmf_dbg(DATA
, "Enter, idx=%d\n", ifp
->bssidx
);
199 /* Can the device send data? */
200 if (drvr
->bus_if
->state
!= BRCMF_BUS_DATA
) {
201 brcmf_err("xmit rejected state=%d\n", drvr
->bus_if
->state
);
202 netif_stop_queue(ndev
);
208 if (!drvr
->iflist
[ifp
->bssidx
]) {
209 brcmf_err("bad ifidx %d\n", ifp
->bssidx
);
210 netif_stop_queue(ndev
);
216 /* Make sure there's enough room for any header */
217 if (skb_headroom(skb
) < drvr
->hdrlen
) {
218 struct sk_buff
*skb2
;
220 brcmf_dbg(INFO
, "%s: insufficient headroom\n",
221 brcmf_ifname(drvr
, ifp
->bssidx
));
222 drvr
->bus_if
->tx_realloc
++;
223 skb2
= skb_realloc_headroom(skb
, drvr
->hdrlen
);
227 brcmf_err("%s: skb_realloc_headroom failed\n",
228 brcmf_ifname(drvr
, ifp
->bssidx
));
234 /* validate length for ether packet */
235 if (skb
->len
< sizeof(*eh
)) {
241 if (eh
->h_proto
== htons(ETH_P_PAE
))
242 atomic_inc(&ifp
->pend_8021x_cnt
);
244 ret
= brcmf_fws_process_skb(ifp
, skb
);
248 ifp
->stats
.tx_dropped
++;
250 ifp
->stats
.tx_packets
++;
251 ifp
->stats
.tx_bytes
+= skb
->len
;
254 /* Return ok: we always eat the packet */
258 void brcmf_txflowblock_if(struct brcmf_if
*ifp
,
259 enum brcmf_netif_stop_reason reason
, bool state
)
263 if (!ifp
|| !ifp
->ndev
)
266 brcmf_dbg(TRACE
, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
267 ifp
->bssidx
, ifp
->netif_stop
, reason
, state
);
269 spin_lock_irqsave(&ifp
->netif_stop_lock
, flags
);
271 if (!ifp
->netif_stop
)
272 netif_stop_queue(ifp
->ndev
);
273 ifp
->netif_stop
|= reason
;
275 ifp
->netif_stop
&= ~reason
;
276 if (!ifp
->netif_stop
)
277 netif_wake_queue(ifp
->ndev
);
279 spin_unlock_irqrestore(&ifp
->netif_stop_lock
, flags
);
282 void brcmf_txflowblock(struct device
*dev
, bool state
)
284 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
285 struct brcmf_pub
*drvr
= bus_if
->drvr
;
287 brcmf_dbg(TRACE
, "Enter\n");
289 brcmf_fws_bus_blocked(drvr
, state
);
292 void brcmf_netif_rx(struct brcmf_if
*ifp
, struct sk_buff
*skb
)
294 skb
->dev
= ifp
->ndev
;
295 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
297 if (skb
->pkt_type
== PACKET_MULTICAST
)
298 ifp
->stats
.multicast
++;
300 /* Process special event packets */
301 brcmf_fweh_process_skb(ifp
->drvr
, skb
);
303 if (!(ifp
->ndev
->flags
& IFF_UP
)) {
304 brcmu_pkt_buf_free_skb(skb
);
308 ifp
->stats
.rx_bytes
+= skb
->len
;
309 ifp
->stats
.rx_packets
++;
311 brcmf_dbg(DATA
, "rx proto=0x%X\n", ntohs(skb
->protocol
));
315 /* If the receive is not processed inside an ISR,
316 * the softirqd must be woken explicitly to service
317 * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
322 static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder
*rfi
,
324 struct sk_buff_head
*skb_list
)
326 /* initialize return list */
327 __skb_queue_head_init(skb_list
);
329 if (rfi
->pend_pkts
== 0) {
330 brcmf_dbg(INFO
, "no packets in reorder queue\n");
335 if (rfi
->pktslots
[start
]) {
336 __skb_queue_tail(skb_list
, rfi
->pktslots
[start
]);
337 rfi
->pktslots
[start
] = NULL
;
340 if (start
> rfi
->max_idx
)
342 } while (start
!= end
);
343 rfi
->pend_pkts
-= skb_queue_len(skb_list
);
346 static void brcmf_rxreorder_process_info(struct brcmf_if
*ifp
, u8
*reorder_data
,
349 u8 flow_id
, max_idx
, cur_idx
, exp_idx
, end_idx
;
350 struct brcmf_ampdu_rx_reorder
*rfi
;
351 struct sk_buff_head reorder_list
;
352 struct sk_buff
*pnext
;
356 flow_id
= reorder_data
[BRCMF_RXREORDER_FLOWID_OFFSET
];
357 flags
= reorder_data
[BRCMF_RXREORDER_FLAGS_OFFSET
];
359 /* validate flags and flow id */
361 brcmf_err("invalid flags...so ignore this packet\n");
362 brcmf_netif_rx(ifp
, pkt
);
366 rfi
= ifp
->drvr
->reorder_flows
[flow_id
];
367 if (flags
& BRCMF_RXREORDER_DEL_FLOW
) {
368 brcmf_dbg(INFO
, "flow-%d: delete\n",
372 brcmf_dbg(INFO
, "received flags to cleanup, but no flow (%d) yet\n",
374 brcmf_netif_rx(ifp
, pkt
);
378 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
, rfi
->exp_idx
,
380 /* add the last packet */
381 __skb_queue_tail(&reorder_list
, pkt
);
383 ifp
->drvr
->reorder_flows
[flow_id
] = NULL
;
386 /* from here on we need a flow reorder instance */
388 buf_size
= sizeof(*rfi
);
389 max_idx
= reorder_data
[BRCMF_RXREORDER_MAXIDX_OFFSET
];
391 buf_size
+= (max_idx
+ 1) * sizeof(pkt
);
393 /* allocate space for flow reorder info */
394 brcmf_dbg(INFO
, "flow-%d: start, maxidx %d\n",
396 rfi
= kzalloc(buf_size
, GFP_ATOMIC
);
398 brcmf_err("failed to alloc buffer\n");
399 brcmf_netif_rx(ifp
, pkt
);
403 ifp
->drvr
->reorder_flows
[flow_id
] = rfi
;
404 rfi
->pktslots
= (struct sk_buff
**)(rfi
+1);
405 rfi
->max_idx
= max_idx
;
407 if (flags
& BRCMF_RXREORDER_NEW_HOLE
) {
408 if (rfi
->pend_pkts
) {
409 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
,
412 WARN_ON(rfi
->pend_pkts
);
414 __skb_queue_head_init(&reorder_list
);
416 rfi
->cur_idx
= reorder_data
[BRCMF_RXREORDER_CURIDX_OFFSET
];
417 rfi
->exp_idx
= reorder_data
[BRCMF_RXREORDER_EXPIDX_OFFSET
];
418 rfi
->max_idx
= reorder_data
[BRCMF_RXREORDER_MAXIDX_OFFSET
];
419 rfi
->pktslots
[rfi
->cur_idx
] = pkt
;
421 brcmf_dbg(DATA
, "flow-%d: new hole %d (%d), pending %d\n",
422 flow_id
, rfi
->cur_idx
, rfi
->exp_idx
, rfi
->pend_pkts
);
423 } else if (flags
& BRCMF_RXREORDER_CURIDX_VALID
) {
424 cur_idx
= reorder_data
[BRCMF_RXREORDER_CURIDX_OFFSET
];
425 exp_idx
= reorder_data
[BRCMF_RXREORDER_EXPIDX_OFFSET
];
427 if ((exp_idx
== rfi
->exp_idx
) && (cur_idx
!= rfi
->exp_idx
)) {
428 /* still in the current hole */
429 /* enqueue the current on the buffer chain */
430 if (rfi
->pktslots
[cur_idx
] != NULL
) {
431 brcmf_dbg(INFO
, "HOLE: ERROR buffer pending..free it\n");
432 brcmu_pkt_buf_free_skb(rfi
->pktslots
[cur_idx
]);
433 rfi
->pktslots
[cur_idx
] = NULL
;
435 rfi
->pktslots
[cur_idx
] = pkt
;
437 rfi
->cur_idx
= cur_idx
;
438 brcmf_dbg(DATA
, "flow-%d: store pkt %d (%d), pending %d\n",
439 flow_id
, cur_idx
, exp_idx
, rfi
->pend_pkts
);
441 /* can return now as there is no reorder
446 if (rfi
->exp_idx
== cur_idx
) {
447 if (rfi
->pktslots
[cur_idx
] != NULL
) {
448 brcmf_dbg(INFO
, "error buffer pending..free it\n");
449 brcmu_pkt_buf_free_skb(rfi
->pktslots
[cur_idx
]);
450 rfi
->pktslots
[cur_idx
] = NULL
;
452 rfi
->pktslots
[cur_idx
] = pkt
;
455 /* got the expected one. flush from current to expected
456 * and update expected
458 brcmf_dbg(DATA
, "flow-%d: expected %d (%d), pending %d\n",
459 flow_id
, cur_idx
, exp_idx
, rfi
->pend_pkts
);
461 rfi
->cur_idx
= cur_idx
;
462 rfi
->exp_idx
= exp_idx
;
464 brcmf_rxreorder_get_skb_list(rfi
, cur_idx
, exp_idx
,
466 brcmf_dbg(DATA
, "flow-%d: freeing buffers %d, pending %d\n",
467 flow_id
, skb_queue_len(&reorder_list
),
472 brcmf_dbg(DATA
, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
473 flow_id
, flags
, rfi
->cur_idx
, rfi
->exp_idx
,
475 if (flags
& BRCMF_RXREORDER_FLUSH_ALL
)
476 end_idx
= rfi
->exp_idx
;
480 /* flush pkts first */
481 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
, end_idx
,
484 if (exp_idx
== ((cur_idx
+ 1) % (rfi
->max_idx
+ 1))) {
485 __skb_queue_tail(&reorder_list
, pkt
);
487 rfi
->pktslots
[cur_idx
] = pkt
;
490 rfi
->exp_idx
= exp_idx
;
491 rfi
->cur_idx
= cur_idx
;
494 /* explicity window move updating the expected index */
495 exp_idx
= reorder_data
[BRCMF_RXREORDER_EXPIDX_OFFSET
];
497 brcmf_dbg(DATA
, "flow-%d (0x%x): change expected: %d -> %d\n",
498 flow_id
, flags
, rfi
->exp_idx
, exp_idx
);
499 if (flags
& BRCMF_RXREORDER_FLUSH_ALL
)
500 end_idx
= rfi
->exp_idx
;
504 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
, end_idx
,
506 __skb_queue_tail(&reorder_list
, pkt
);
507 /* set the new expected idx */
508 rfi
->exp_idx
= exp_idx
;
511 skb_queue_walk_safe(&reorder_list
, pkt
, pnext
) {
512 __skb_unlink(pkt
, &reorder_list
);
513 brcmf_netif_rx(ifp
, pkt
);
517 void brcmf_rx_frame(struct device
*dev
, struct sk_buff
*skb
)
519 struct brcmf_if
*ifp
;
520 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
521 struct brcmf_pub
*drvr
= bus_if
->drvr
;
522 struct brcmf_skb_reorder_data
*rd
;
526 brcmf_dbg(DATA
, "Enter: %s: rxp=%p\n", dev_name(dev
), skb
);
528 /* process and remove protocol-specific header */
529 ret
= brcmf_proto_hdrpull(drvr
, true, &ifidx
, skb
);
530 ifp
= drvr
->iflist
[ifidx
];
532 if (ret
|| !ifp
|| !ifp
->ndev
) {
533 if ((ret
!= -ENODATA
) && ifp
)
534 ifp
->stats
.rx_errors
++;
535 brcmu_pkt_buf_free_skb(skb
);
539 rd
= (struct brcmf_skb_reorder_data
*)skb
->cb
;
541 brcmf_rxreorder_process_info(ifp
, rd
->reorder
, skb
);
543 brcmf_netif_rx(ifp
, skb
);
546 void brcmf_txfinalize(struct brcmf_pub
*drvr
, struct sk_buff
*txp
, u8 ifidx
,
549 struct brcmf_if
*ifp
;
553 ifp
= drvr
->iflist
[ifidx
];
557 eh
= (struct ethhdr
*)(txp
->data
);
558 type
= ntohs(eh
->h_proto
);
560 if (type
== ETH_P_PAE
) {
561 atomic_dec(&ifp
->pend_8021x_cnt
);
562 if (waitqueue_active(&ifp
->pend_8021x_wait
))
563 wake_up(&ifp
->pend_8021x_wait
);
567 ifp
->stats
.tx_errors
++;
569 brcmu_pkt_buf_free_skb(txp
);
572 void brcmf_txcomplete(struct device
*dev
, struct sk_buff
*txp
, bool success
)
574 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
575 struct brcmf_pub
*drvr
= bus_if
->drvr
;
578 /* await txstatus signal for firmware if active */
579 if (brcmf_fws_fc_active(drvr
->fws
)) {
581 brcmf_fws_bustxfail(drvr
->fws
, txp
);
583 if (brcmf_proto_hdrpull(drvr
, false, &ifidx
, txp
))
584 brcmu_pkt_buf_free_skb(txp
);
586 brcmf_txfinalize(drvr
, txp
, ifidx
, success
);
590 static struct net_device_stats
*brcmf_netdev_get_stats(struct net_device
*ndev
)
592 struct brcmf_if
*ifp
= netdev_priv(ndev
);
594 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
599 static void brcmf_ethtool_get_drvinfo(struct net_device
*ndev
,
600 struct ethtool_drvinfo
*info
)
602 struct brcmf_if
*ifp
= netdev_priv(ndev
);
603 struct brcmf_pub
*drvr
= ifp
->drvr
;
605 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
606 snprintf(info
->version
, sizeof(info
->version
), "n/a");
607 strlcpy(info
->fw_version
, drvr
->fwver
, sizeof(info
->fw_version
));
608 strlcpy(info
->bus_info
, dev_name(drvr
->bus_if
->dev
),
609 sizeof(info
->bus_info
));
612 static const struct ethtool_ops brcmf_ethtool_ops
= {
613 .get_drvinfo
= brcmf_ethtool_get_drvinfo
,
616 static int brcmf_netdev_stop(struct net_device
*ndev
)
618 struct brcmf_if
*ifp
= netdev_priv(ndev
);
620 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
622 brcmf_cfg80211_down(ndev
);
624 /* Set state and stop OS transmissions */
625 netif_stop_queue(ndev
);
630 static int brcmf_netdev_open(struct net_device
*ndev
)
632 struct brcmf_if
*ifp
= netdev_priv(ndev
);
633 struct brcmf_pub
*drvr
= ifp
->drvr
;
634 struct brcmf_bus
*bus_if
= drvr
->bus_if
;
637 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
639 /* If bus is not ready, can't continue */
640 if (bus_if
->state
!= BRCMF_BUS_DATA
) {
641 brcmf_err("failed bus is not ready\n");
645 atomic_set(&ifp
->pend_8021x_cnt
, 0);
647 /* Get current TOE mode from dongle */
648 if (brcmf_fil_iovar_int_get(ifp
, "toe_ol", &toe_ol
) >= 0
649 && (toe_ol
& TOE_TX_CSUM_OL
) != 0)
650 ndev
->features
|= NETIF_F_IP_CSUM
;
652 ndev
->features
&= ~NETIF_F_IP_CSUM
;
654 if (brcmf_cfg80211_up(ndev
)) {
655 brcmf_err("failed to bring up cfg80211\n");
659 /* Allow transmit calls */
660 netif_start_queue(ndev
);
664 static const struct net_device_ops brcmf_netdev_ops_pri
= {
665 .ndo_open
= brcmf_netdev_open
,
666 .ndo_stop
= brcmf_netdev_stop
,
667 .ndo_get_stats
= brcmf_netdev_get_stats
,
668 .ndo_start_xmit
= brcmf_netdev_start_xmit
,
669 .ndo_set_mac_address
= brcmf_netdev_set_mac_address
,
670 .ndo_set_rx_mode
= brcmf_netdev_set_multicast_list
673 int brcmf_net_attach(struct brcmf_if
*ifp
, bool rtnl_locked
)
675 struct brcmf_pub
*drvr
= ifp
->drvr
;
676 struct net_device
*ndev
;
679 brcmf_dbg(TRACE
, "Enter, idx=%d mac=%pM\n", ifp
->bssidx
,
683 /* set appropriate operations */
684 ndev
->netdev_ops
= &brcmf_netdev_ops_pri
;
686 ndev
->hard_header_len
+= drvr
->hdrlen
;
687 ndev
->ethtool_ops
= &brcmf_ethtool_ops
;
689 drvr
->rxsz
= ndev
->mtu
+ ndev
->hard_header_len
+
692 /* set the mac address */
693 memcpy(ndev
->dev_addr
, ifp
->mac_addr
, ETH_ALEN
);
695 INIT_WORK(&ifp
->setmacaddr_work
, _brcmf_set_mac_address
);
696 INIT_WORK(&ifp
->multicast_work
, _brcmf_set_multicast_list
);
699 err
= register_netdevice(ndev
);
701 err
= register_netdev(ndev
);
703 brcmf_err("couldn't register the net device\n");
707 brcmf_dbg(INFO
, "%s: Broadcom Dongle Host Driver\n", ndev
->name
);
709 ndev
->destructor
= brcmf_cfg80211_free_netdev
;
713 drvr
->iflist
[ifp
->bssidx
] = NULL
;
714 ndev
->netdev_ops
= NULL
;
719 static int brcmf_net_p2p_open(struct net_device
*ndev
)
721 brcmf_dbg(TRACE
, "Enter\n");
723 return brcmf_cfg80211_up(ndev
);
726 static int brcmf_net_p2p_stop(struct net_device
*ndev
)
728 brcmf_dbg(TRACE
, "Enter\n");
730 return brcmf_cfg80211_down(ndev
);
733 static netdev_tx_t
brcmf_net_p2p_start_xmit(struct sk_buff
*skb
,
734 struct net_device
*ndev
)
737 dev_kfree_skb_any(skb
);
742 static const struct net_device_ops brcmf_netdev_ops_p2p
= {
743 .ndo_open
= brcmf_net_p2p_open
,
744 .ndo_stop
= brcmf_net_p2p_stop
,
745 .ndo_start_xmit
= brcmf_net_p2p_start_xmit
748 static int brcmf_net_p2p_attach(struct brcmf_if
*ifp
)
750 struct net_device
*ndev
;
752 brcmf_dbg(TRACE
, "Enter, idx=%d mac=%pM\n", ifp
->bssidx
,
756 ndev
->netdev_ops
= &brcmf_netdev_ops_p2p
;
758 /* set the mac address */
759 memcpy(ndev
->dev_addr
, ifp
->mac_addr
, ETH_ALEN
);
761 if (register_netdev(ndev
) != 0) {
762 brcmf_err("couldn't register the p2p net device\n");
766 brcmf_dbg(INFO
, "%s: Broadcom Dongle Host Driver\n", ndev
->name
);
771 ifp
->drvr
->iflist
[ifp
->bssidx
] = NULL
;
772 ndev
->netdev_ops
= NULL
;
777 struct brcmf_if
*brcmf_add_if(struct brcmf_pub
*drvr
, s32 bssidx
, s32 ifidx
,
778 char *name
, u8
*mac_addr
)
780 struct brcmf_if
*ifp
;
781 struct net_device
*ndev
;
783 brcmf_dbg(TRACE
, "Enter, idx=%d, ifidx=%d\n", bssidx
, ifidx
);
785 ifp
= drvr
->iflist
[bssidx
];
787 * Delete the existing interface before overwriting it
788 * in case we missed the BRCMF_E_IF_DEL event.
791 brcmf_err("ERROR: netdev:%s already exists\n",
794 netif_stop_queue(ifp
->ndev
);
795 unregister_netdev(ifp
->ndev
);
796 free_netdev(ifp
->ndev
);
797 drvr
->iflist
[bssidx
] = NULL
;
799 brcmf_err("ignore IF event\n");
800 return ERR_PTR(-EINVAL
);
804 if (!brcmf_p2p_enable
&& bssidx
== 1) {
805 /* this is P2P_DEVICE interface */
806 brcmf_dbg(INFO
, "allocate non-netdev interface\n");
807 ifp
= kzalloc(sizeof(*ifp
), GFP_KERNEL
);
809 return ERR_PTR(-ENOMEM
);
811 brcmf_dbg(INFO
, "allocate netdev interface\n");
812 /* Allocate netdev, including space for private structure */
813 ndev
= alloc_netdev(sizeof(*ifp
), name
, ether_setup
);
815 return ERR_PTR(-ENOMEM
);
817 ifp
= netdev_priv(ndev
);
822 drvr
->iflist
[bssidx
] = ifp
;
824 ifp
->bssidx
= bssidx
;
826 init_waitqueue_head(&ifp
->pend_8021x_wait
);
827 spin_lock_init(&ifp
->netif_stop_lock
);
829 if (mac_addr
!= NULL
)
830 memcpy(ifp
->mac_addr
, mac_addr
, ETH_ALEN
);
832 brcmf_dbg(TRACE
, " ==== pid:%x, if:%s (%pM) created ===\n",
833 current
->pid
, name
, ifp
->mac_addr
);
838 void brcmf_del_if(struct brcmf_pub
*drvr
, s32 bssidx
)
840 struct brcmf_if
*ifp
;
842 ifp
= drvr
->iflist
[bssidx
];
843 drvr
->iflist
[bssidx
] = NULL
;
845 brcmf_err("Null interface, idx=%d\n", bssidx
);
848 brcmf_dbg(TRACE
, "Enter, idx=%d, ifidx=%d\n", bssidx
, ifp
->ifidx
);
851 if (ifp
->ndev
->netdev_ops
== &brcmf_netdev_ops_pri
) {
853 brcmf_netdev_stop(ifp
->ndev
);
857 netif_stop_queue(ifp
->ndev
);
860 if (ifp
->ndev
->netdev_ops
== &brcmf_netdev_ops_pri
) {
861 cancel_work_sync(&ifp
->setmacaddr_work
);
862 cancel_work_sync(&ifp
->multicast_work
);
864 /* unregister will take care of freeing it */
865 unregister_netdev(ifp
->ndev
);
871 int brcmf_attach(struct device
*dev
)
873 struct brcmf_pub
*drvr
= NULL
;
876 brcmf_dbg(TRACE
, "Enter\n");
878 /* Allocate primary brcmf_info */
879 drvr
= kzalloc(sizeof(struct brcmf_pub
), GFP_ATOMIC
);
883 mutex_init(&drvr
->proto_block
);
885 /* Link to bus module */
887 drvr
->bus_if
= dev_get_drvdata(dev
);
888 drvr
->bus_if
->drvr
= drvr
;
890 /* create device debugfs folder */
891 brcmf_debugfs_attach(drvr
);
893 /* Attach and link in the protocol */
894 ret
= brcmf_proto_attach(drvr
);
896 brcmf_err("brcmf_prot_attach failed\n");
900 /* attach firmware event handler */
901 brcmf_fweh_attach(drvr
);
911 int brcmf_bus_start(struct device
*dev
)
914 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
915 struct brcmf_pub
*drvr
= bus_if
->drvr
;
916 struct brcmf_if
*ifp
;
917 struct brcmf_if
*p2p_ifp
;
919 brcmf_dbg(TRACE
, "\n");
921 /* add primary networking interface */
922 ifp
= brcmf_add_if(drvr
, 0, 0, "wlan%d", NULL
);
926 if (brcmf_p2p_enable
)
927 p2p_ifp
= brcmf_add_if(drvr
, 1, 0, "p2p%d", NULL
);
933 /* signal bus ready */
934 brcmf_bus_change_state(bus_if
, BRCMF_BUS_DATA
);
936 /* Bus is ready, do any initialization */
937 ret
= brcmf_c_preinit_dcmds(ifp
);
941 brcmf_feat_attach(drvr
);
943 ret
= brcmf_fws_init(drvr
);
947 brcmf_fws_add_interface(ifp
);
949 drvr
->config
= brcmf_cfg80211_attach(drvr
, bus_if
->dev
);
950 if (drvr
->config
== NULL
) {
955 ret
= brcmf_fweh_activate_events(ifp
);
959 ret
= brcmf_net_attach(ifp
, false);
962 brcmf_err("failed: %d\n", ret
);
963 brcmf_cfg80211_detach(drvr
->config
);
965 brcmf_fws_del_interface(ifp
);
966 brcmf_fws_deinit(drvr
);
968 if (drvr
->iflist
[0]) {
969 free_netdev(ifp
->ndev
);
970 drvr
->iflist
[0] = NULL
;
973 free_netdev(p2p_ifp
->ndev
);
974 drvr
->iflist
[1] = NULL
;
978 if ((brcmf_p2p_enable
) && (p2p_ifp
))
979 if (brcmf_net_p2p_attach(p2p_ifp
) < 0)
980 brcmf_p2p_enable
= 0;
985 void brcmf_bus_add_txhdrlen(struct device
*dev
, uint len
)
987 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
988 struct brcmf_pub
*drvr
= bus_if
->drvr
;
995 static void brcmf_bus_detach(struct brcmf_pub
*drvr
)
997 brcmf_dbg(TRACE
, "Enter\n");
1000 /* Stop the bus module */
1001 brcmf_bus_stop(drvr
->bus_if
);
1005 void brcmf_dev_reset(struct device
*dev
)
1007 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1008 struct brcmf_pub
*drvr
= bus_if
->drvr
;
1013 if (drvr
->iflist
[0])
1014 brcmf_fil_cmd_int_set(drvr
->iflist
[0], BRCMF_C_TERMINATED
, 1);
1017 void brcmf_detach(struct device
*dev
)
1020 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1021 struct brcmf_pub
*drvr
= bus_if
->drvr
;
1023 brcmf_dbg(TRACE
, "Enter\n");
1028 /* stop firmware event handling */
1029 brcmf_fweh_detach(drvr
);
1031 brcmf_bus_change_state(bus_if
, BRCMF_BUS_DOWN
);
1033 /* make sure primary interface removed last */
1034 for (i
= BRCMF_MAX_IFS
-1; i
> -1; i
--)
1035 if (drvr
->iflist
[i
]) {
1036 brcmf_fws_del_interface(drvr
->iflist
[i
]);
1037 brcmf_del_if(drvr
, i
);
1040 brcmf_cfg80211_detach(drvr
->config
);
1042 brcmf_fws_deinit(drvr
);
1044 brcmf_bus_detach(drvr
);
1046 brcmf_proto_detach(drvr
);
1048 brcmf_debugfs_detach(drvr
);
1049 bus_if
->drvr
= NULL
;
1053 s32
brcmf_iovar_data_set(struct device
*dev
, char *name
, void *data
, u32 len
)
1055 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1056 struct brcmf_if
*ifp
= bus_if
->drvr
->iflist
[0];
1058 return brcmf_fil_iovar_data_set(ifp
, name
, data
, len
);
1061 static int brcmf_get_pend_8021x_cnt(struct brcmf_if
*ifp
)
1063 return atomic_read(&ifp
->pend_8021x_cnt
);
1066 int brcmf_netdev_wait_pend8021x(struct net_device
*ndev
)
1068 struct brcmf_if
*ifp
= netdev_priv(ndev
);
1071 err
= wait_event_timeout(ifp
->pend_8021x_wait
,
1072 !brcmf_get_pend_8021x_cnt(ifp
),
1073 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX
));
1080 static void brcmf_driver_register(struct work_struct
*work
)
1082 #ifdef CONFIG_BRCMFMAC_SDIO
1083 brcmf_sdio_register();
1085 #ifdef CONFIG_BRCMFMAC_USB
1086 brcmf_usb_register();
1088 #ifdef CONFIG_BRCMFMAC_PCIE
1089 brcmf_pcie_register();
1092 static DECLARE_WORK(brcmf_driver_work
, brcmf_driver_register
);
1094 static int __init
brcmfmac_module_init(void)
1096 brcmf_debugfs_init();
1097 #ifdef CONFIG_BRCMFMAC_SDIO
1100 if (!schedule_work(&brcmf_driver_work
))
1106 static void __exit
brcmfmac_module_exit(void)
1108 cancel_work_sync(&brcmf_driver_work
);
1110 #ifdef CONFIG_BRCMFMAC_SDIO
1113 #ifdef CONFIG_BRCMFMAC_USB
1116 #ifdef CONFIG_BRCMFMAC_PCIE
1119 brcmf_debugfs_exit();
1122 module_init(brcmfmac_module_init
);
1123 module_exit(brcmfmac_module_exit
);