net: usb: Use eth_<foo>_addr instead of memset
[deliverable/linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / core.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/etherdevice.h>
19 #include <linux/module.h>
20 #include <net/cfg80211.h>
21 #include <net/rtnetlink.h>
22 #include <brcmu_utils.h>
23 #include <brcmu_wifi.h>
24
25 #include "core.h"
26 #include "bus.h"
27 #include "debug.h"
28 #include "fwil_types.h"
29 #include "p2p.h"
30 #include "cfg80211.h"
31 #include "fwil.h"
32 #include "fwsignal.h"
33 #include "feature.h"
34 #include "proto.h"
35 #include "pcie.h"
36
37 MODULE_AUTHOR("Broadcom Corporation");
38 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
39 MODULE_LICENSE("Dual BSD/GPL");
40
41 #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
42
43 /* AMPDU rx reordering definitions */
44 #define BRCMF_RXREORDER_FLOWID_OFFSET 0
45 #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
46 #define BRCMF_RXREORDER_FLAGS_OFFSET 4
47 #define BRCMF_RXREORDER_CURIDX_OFFSET 6
48 #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
49
50 #define BRCMF_RXREORDER_DEL_FLOW 0x01
51 #define BRCMF_RXREORDER_FLUSH_ALL 0x02
52 #define BRCMF_RXREORDER_CURIDX_VALID 0x04
53 #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
54 #define BRCMF_RXREORDER_NEW_HOLE 0x10
55
56 /* Error bits */
57 int brcmf_msg_level;
58 module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
59 MODULE_PARM_DESC(debug, "level of debug output");
60
61 /* P2P0 enable */
62 static int brcmf_p2p_enable;
63 #ifdef CONFIG_BRCMDBG
64 module_param_named(p2pon, brcmf_p2p_enable, int, 0);
65 MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
66 #endif
67
68 char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
69 {
70 if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
71 brcmf_err("ifidx %d out of range\n", ifidx);
72 return "<if_bad>";
73 }
74
75 if (drvr->iflist[ifidx] == NULL) {
76 brcmf_err("null i/f %d\n", ifidx);
77 return "<if_null>";
78 }
79
80 if (drvr->iflist[ifidx]->ndev)
81 return drvr->iflist[ifidx]->ndev->name;
82
83 return "<if_none>";
84 }
85
86 static void _brcmf_set_multicast_list(struct work_struct *work)
87 {
88 struct brcmf_if *ifp;
89 struct net_device *ndev;
90 struct netdev_hw_addr *ha;
91 u32 cmd_value, cnt;
92 __le32 cnt_le;
93 char *buf, *bufp;
94 u32 buflen;
95 s32 err;
96
97 ifp = container_of(work, struct brcmf_if, multicast_work);
98
99 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
100
101 ndev = ifp->ndev;
102
103 /* Determine initial value of allmulti flag */
104 cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
105
106 /* Send down the multicast list first. */
107 cnt = netdev_mc_count(ndev);
108 buflen = sizeof(cnt) + (cnt * ETH_ALEN);
109 buf = kmalloc(buflen, GFP_ATOMIC);
110 if (!buf)
111 return;
112 bufp = buf;
113
114 cnt_le = cpu_to_le32(cnt);
115 memcpy(bufp, &cnt_le, sizeof(cnt_le));
116 bufp += sizeof(cnt_le);
117
118 netdev_for_each_mc_addr(ha, ndev) {
119 if (!cnt)
120 break;
121 memcpy(bufp, ha->addr, ETH_ALEN);
122 bufp += ETH_ALEN;
123 cnt--;
124 }
125
126 err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
127 if (err < 0) {
128 brcmf_err("Setting mcast_list failed, %d\n", err);
129 cmd_value = cnt ? true : cmd_value;
130 }
131
132 kfree(buf);
133
134 /*
135 * Now send the allmulti setting. This is based on the setting in the
136 * net_device flags, but might be modified above to be turned on if we
137 * were trying to set some addresses and dongle rejected it...
138 */
139 err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
140 if (err < 0)
141 brcmf_err("Setting allmulti failed, %d\n", err);
142
143 /*Finally, pick up the PROMISC flag */
144 cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
145 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
146 if (err < 0)
147 brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
148 err);
149 }
150
151 static void
152 _brcmf_set_mac_address(struct work_struct *work)
153 {
154 struct brcmf_if *ifp;
155 s32 err;
156
157 ifp = container_of(work, struct brcmf_if, setmacaddr_work);
158
159 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
160
161 err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
162 ETH_ALEN);
163 if (err < 0) {
164 brcmf_err("Setting cur_etheraddr failed, %d\n", err);
165 } else {
166 brcmf_dbg(TRACE, "MAC address updated to %pM\n",
167 ifp->mac_addr);
168 memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
169 }
170 }
171
172 static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
173 {
174 struct brcmf_if *ifp = netdev_priv(ndev);
175 struct sockaddr *sa = (struct sockaddr *)addr;
176
177 memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
178 schedule_work(&ifp->setmacaddr_work);
179 return 0;
180 }
181
182 static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
183 {
184 struct brcmf_if *ifp = netdev_priv(ndev);
185
186 schedule_work(&ifp->multicast_work);
187 }
188
189 static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
190 struct net_device *ndev)
191 {
192 int ret;
193 struct brcmf_if *ifp = netdev_priv(ndev);
194 struct brcmf_pub *drvr = ifp->drvr;
195 struct ethhdr *eh = (struct ethhdr *)(skb->data);
196
197 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
198
199 /* Can the device send data? */
200 if (drvr->bus_if->state != BRCMF_BUS_UP) {
201 brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
202 netif_stop_queue(ndev);
203 dev_kfree_skb(skb);
204 ret = -ENODEV;
205 goto done;
206 }
207
208 if (!drvr->iflist[ifp->bssidx]) {
209 brcmf_err("bad ifidx %d\n", ifp->bssidx);
210 netif_stop_queue(ndev);
211 dev_kfree_skb(skb);
212 ret = -ENODEV;
213 goto done;
214 }
215
216 /* Make sure there's enough room for any header */
217 if (skb_headroom(skb) < drvr->hdrlen) {
218 struct sk_buff *skb2;
219
220 brcmf_dbg(INFO, "%s: insufficient headroom\n",
221 brcmf_ifname(drvr, ifp->bssidx));
222 drvr->bus_if->tx_realloc++;
223 skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
224 dev_kfree_skb(skb);
225 skb = skb2;
226 if (skb == NULL) {
227 brcmf_err("%s: skb_realloc_headroom failed\n",
228 brcmf_ifname(drvr, ifp->bssidx));
229 ret = -ENOMEM;
230 goto done;
231 }
232 }
233
234 /* validate length for ether packet */
235 if (skb->len < sizeof(*eh)) {
236 ret = -EINVAL;
237 dev_kfree_skb(skb);
238 goto done;
239 }
240
241 if (eh->h_proto == htons(ETH_P_PAE))
242 atomic_inc(&ifp->pend_8021x_cnt);
243
244 ret = brcmf_fws_process_skb(ifp, skb);
245
246 done:
247 if (ret) {
248 ifp->stats.tx_dropped++;
249 } else {
250 ifp->stats.tx_packets++;
251 ifp->stats.tx_bytes += skb->len;
252 }
253
254 /* Return ok: we always eat the packet */
255 return NETDEV_TX_OK;
256 }
257
258 void brcmf_txflowblock_if(struct brcmf_if *ifp,
259 enum brcmf_netif_stop_reason reason, bool state)
260 {
261 unsigned long flags;
262
263 if (!ifp || !ifp->ndev)
264 return;
265
266 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
267 ifp->bssidx, ifp->netif_stop, reason, state);
268
269 spin_lock_irqsave(&ifp->netif_stop_lock, flags);
270 if (state) {
271 if (!ifp->netif_stop)
272 netif_stop_queue(ifp->ndev);
273 ifp->netif_stop |= reason;
274 } else {
275 ifp->netif_stop &= ~reason;
276 if (!ifp->netif_stop)
277 netif_wake_queue(ifp->ndev);
278 }
279 spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
280 }
281
282 void brcmf_txflowblock(struct device *dev, bool state)
283 {
284 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
285 struct brcmf_pub *drvr = bus_if->drvr;
286
287 brcmf_dbg(TRACE, "Enter\n");
288
289 brcmf_fws_bus_blocked(drvr, state);
290 }
291
292 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
293 {
294 skb->dev = ifp->ndev;
295 skb->protocol = eth_type_trans(skb, skb->dev);
296
297 if (skb->pkt_type == PACKET_MULTICAST)
298 ifp->stats.multicast++;
299
300 /* Process special event packets */
301 brcmf_fweh_process_skb(ifp->drvr, skb);
302
303 if (!(ifp->ndev->flags & IFF_UP)) {
304 brcmu_pkt_buf_free_skb(skb);
305 return;
306 }
307
308 ifp->stats.rx_bytes += skb->len;
309 ifp->stats.rx_packets++;
310
311 brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
312 if (in_interrupt())
313 netif_rx(skb);
314 else
315 /* If the receive is not processed inside an ISR,
316 * the softirqd must be woken explicitly to service
317 * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
318 */
319 netif_rx_ni(skb);
320 }
321
322 static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
323 u8 start, u8 end,
324 struct sk_buff_head *skb_list)
325 {
326 /* initialize return list */
327 __skb_queue_head_init(skb_list);
328
329 if (rfi->pend_pkts == 0) {
330 brcmf_dbg(INFO, "no packets in reorder queue\n");
331 return;
332 }
333
334 do {
335 if (rfi->pktslots[start]) {
336 __skb_queue_tail(skb_list, rfi->pktslots[start]);
337 rfi->pktslots[start] = NULL;
338 }
339 start++;
340 if (start > rfi->max_idx)
341 start = 0;
342 } while (start != end);
343 rfi->pend_pkts -= skb_queue_len(skb_list);
344 }
345
346 static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
347 struct sk_buff *pkt)
348 {
349 u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
350 struct brcmf_ampdu_rx_reorder *rfi;
351 struct sk_buff_head reorder_list;
352 struct sk_buff *pnext;
353 u8 flags;
354 u32 buf_size;
355
356 flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
357 flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
358
359 /* validate flags and flow id */
360 if (flags == 0xFF) {
361 brcmf_err("invalid flags...so ignore this packet\n");
362 brcmf_netif_rx(ifp, pkt);
363 return;
364 }
365
366 rfi = ifp->drvr->reorder_flows[flow_id];
367 if (flags & BRCMF_RXREORDER_DEL_FLOW) {
368 brcmf_dbg(INFO, "flow-%d: delete\n",
369 flow_id);
370
371 if (rfi == NULL) {
372 brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
373 flow_id);
374 brcmf_netif_rx(ifp, pkt);
375 return;
376 }
377
378 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
379 &reorder_list);
380 /* add the last packet */
381 __skb_queue_tail(&reorder_list, pkt);
382 kfree(rfi);
383 ifp->drvr->reorder_flows[flow_id] = NULL;
384 goto netif_rx;
385 }
386 /* from here on we need a flow reorder instance */
387 if (rfi == NULL) {
388 buf_size = sizeof(*rfi);
389 max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
390
391 buf_size += (max_idx + 1) * sizeof(pkt);
392
393 /* allocate space for flow reorder info */
394 brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
395 flow_id, max_idx);
396 rfi = kzalloc(buf_size, GFP_ATOMIC);
397 if (rfi == NULL) {
398 brcmf_err("failed to alloc buffer\n");
399 brcmf_netif_rx(ifp, pkt);
400 return;
401 }
402
403 ifp->drvr->reorder_flows[flow_id] = rfi;
404 rfi->pktslots = (struct sk_buff **)(rfi+1);
405 rfi->max_idx = max_idx;
406 }
407 if (flags & BRCMF_RXREORDER_NEW_HOLE) {
408 if (rfi->pend_pkts) {
409 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
410 rfi->exp_idx,
411 &reorder_list);
412 WARN_ON(rfi->pend_pkts);
413 } else {
414 __skb_queue_head_init(&reorder_list);
415 }
416 rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
417 rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
418 rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
419 rfi->pktslots[rfi->cur_idx] = pkt;
420 rfi->pend_pkts++;
421 brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
422 flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
423 } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
424 cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
425 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
426
427 if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
428 /* still in the current hole */
429 /* enqueue the current on the buffer chain */
430 if (rfi->pktslots[cur_idx] != NULL) {
431 brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
432 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
433 rfi->pktslots[cur_idx] = NULL;
434 }
435 rfi->pktslots[cur_idx] = pkt;
436 rfi->pend_pkts++;
437 rfi->cur_idx = cur_idx;
438 brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
439 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
440
441 /* can return now as there is no reorder
442 * list to process.
443 */
444 return;
445 }
446 if (rfi->exp_idx == cur_idx) {
447 if (rfi->pktslots[cur_idx] != NULL) {
448 brcmf_dbg(INFO, "error buffer pending..free it\n");
449 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
450 rfi->pktslots[cur_idx] = NULL;
451 }
452 rfi->pktslots[cur_idx] = pkt;
453 rfi->pend_pkts++;
454
455 /* got the expected one. flush from current to expected
456 * and update expected
457 */
458 brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
459 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
460
461 rfi->cur_idx = cur_idx;
462 rfi->exp_idx = exp_idx;
463
464 brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
465 &reorder_list);
466 brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
467 flow_id, skb_queue_len(&reorder_list),
468 rfi->pend_pkts);
469 } else {
470 u8 end_idx;
471
472 brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
473 flow_id, flags, rfi->cur_idx, rfi->exp_idx,
474 cur_idx, exp_idx);
475 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
476 end_idx = rfi->exp_idx;
477 else
478 end_idx = exp_idx;
479
480 /* flush pkts first */
481 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
482 &reorder_list);
483
484 if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
485 __skb_queue_tail(&reorder_list, pkt);
486 } else {
487 rfi->pktslots[cur_idx] = pkt;
488 rfi->pend_pkts++;
489 }
490 rfi->exp_idx = exp_idx;
491 rfi->cur_idx = cur_idx;
492 }
493 } else {
494 /* explicity window move updating the expected index */
495 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
496
497 brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
498 flow_id, flags, rfi->exp_idx, exp_idx);
499 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
500 end_idx = rfi->exp_idx;
501 else
502 end_idx = exp_idx;
503
504 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
505 &reorder_list);
506 __skb_queue_tail(&reorder_list, pkt);
507 /* set the new expected idx */
508 rfi->exp_idx = exp_idx;
509 }
510 netif_rx:
511 skb_queue_walk_safe(&reorder_list, pkt, pnext) {
512 __skb_unlink(pkt, &reorder_list);
513 brcmf_netif_rx(ifp, pkt);
514 }
515 }
516
517 void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
518 {
519 struct brcmf_if *ifp;
520 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
521 struct brcmf_pub *drvr = bus_if->drvr;
522 struct brcmf_skb_reorder_data *rd;
523 u8 ifidx;
524 int ret;
525
526 brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
527
528 /* process and remove protocol-specific header */
529 ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
530 ifp = drvr->iflist[ifidx];
531
532 if (ret || !ifp || !ifp->ndev) {
533 if ((ret != -ENODATA) && ifp)
534 ifp->stats.rx_errors++;
535 brcmu_pkt_buf_free_skb(skb);
536 return;
537 }
538
539 rd = (struct brcmf_skb_reorder_data *)skb->cb;
540 if (rd->reorder)
541 brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
542 else
543 brcmf_netif_rx(ifp, skb);
544 }
545
546 void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
547 bool success)
548 {
549 struct brcmf_if *ifp;
550 struct ethhdr *eh;
551 u16 type;
552
553 ifp = drvr->iflist[ifidx];
554 if (!ifp)
555 goto done;
556
557 eh = (struct ethhdr *)(txp->data);
558 type = ntohs(eh->h_proto);
559
560 if (type == ETH_P_PAE) {
561 atomic_dec(&ifp->pend_8021x_cnt);
562 if (waitqueue_active(&ifp->pend_8021x_wait))
563 wake_up(&ifp->pend_8021x_wait);
564 }
565
566 if (!success)
567 ifp->stats.tx_errors++;
568 done:
569 brcmu_pkt_buf_free_skb(txp);
570 }
571
572 void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
573 {
574 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
575 struct brcmf_pub *drvr = bus_if->drvr;
576 u8 ifidx;
577
578 /* await txstatus signal for firmware if active */
579 if (brcmf_fws_fc_active(drvr->fws)) {
580 if (!success)
581 brcmf_fws_bustxfail(drvr->fws, txp);
582 } else {
583 if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
584 brcmu_pkt_buf_free_skb(txp);
585 else
586 brcmf_txfinalize(drvr, txp, ifidx, success);
587 }
588 }
589
590 static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
591 {
592 struct brcmf_if *ifp = netdev_priv(ndev);
593
594 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
595
596 return &ifp->stats;
597 }
598
599 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
600 struct ethtool_drvinfo *info)
601 {
602 struct brcmf_if *ifp = netdev_priv(ndev);
603 struct brcmf_pub *drvr = ifp->drvr;
604 char drev[BRCMU_DOTREV_LEN] = "n/a";
605
606 if (drvr->revinfo.result == 0)
607 brcmu_dotrev_str(drvr->revinfo.driverrev, drev);
608 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
609 strlcpy(info->version, drev, sizeof(info->version));
610 strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
611 strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
612 sizeof(info->bus_info));
613 }
614
615 static const struct ethtool_ops brcmf_ethtool_ops = {
616 .get_drvinfo = brcmf_ethtool_get_drvinfo,
617 };
618
619 static int brcmf_netdev_stop(struct net_device *ndev)
620 {
621 struct brcmf_if *ifp = netdev_priv(ndev);
622
623 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
624
625 brcmf_cfg80211_down(ndev);
626
627 /* Set state and stop OS transmissions */
628 netif_stop_queue(ndev);
629
630 return 0;
631 }
632
633 static int brcmf_netdev_open(struct net_device *ndev)
634 {
635 struct brcmf_if *ifp = netdev_priv(ndev);
636 struct brcmf_pub *drvr = ifp->drvr;
637 struct brcmf_bus *bus_if = drvr->bus_if;
638 u32 toe_ol;
639
640 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
641
642 /* If bus is not ready, can't continue */
643 if (bus_if->state != BRCMF_BUS_UP) {
644 brcmf_err("failed bus is not ready\n");
645 return -EAGAIN;
646 }
647
648 atomic_set(&ifp->pend_8021x_cnt, 0);
649
650 /* Get current TOE mode from dongle */
651 if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
652 && (toe_ol & TOE_TX_CSUM_OL) != 0)
653 ndev->features |= NETIF_F_IP_CSUM;
654 else
655 ndev->features &= ~NETIF_F_IP_CSUM;
656
657 if (brcmf_cfg80211_up(ndev)) {
658 brcmf_err("failed to bring up cfg80211\n");
659 return -EIO;
660 }
661
662 /* Allow transmit calls */
663 netif_start_queue(ndev);
664 return 0;
665 }
666
667 static const struct net_device_ops brcmf_netdev_ops_pri = {
668 .ndo_open = brcmf_netdev_open,
669 .ndo_stop = brcmf_netdev_stop,
670 .ndo_get_stats = brcmf_netdev_get_stats,
671 .ndo_start_xmit = brcmf_netdev_start_xmit,
672 .ndo_set_mac_address = brcmf_netdev_set_mac_address,
673 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
674 };
675
676 int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
677 {
678 struct brcmf_pub *drvr = ifp->drvr;
679 struct net_device *ndev;
680 s32 err;
681
682 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
683 ifp->mac_addr);
684 ndev = ifp->ndev;
685
686 /* set appropriate operations */
687 ndev->netdev_ops = &brcmf_netdev_ops_pri;
688
689 ndev->hard_header_len += drvr->hdrlen;
690 ndev->ethtool_ops = &brcmf_ethtool_ops;
691
692 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
693 drvr->hdrlen;
694
695 /* set the mac address */
696 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
697
698 INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
699 INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
700
701 if (rtnl_locked)
702 err = register_netdevice(ndev);
703 else
704 err = register_netdev(ndev);
705 if (err != 0) {
706 brcmf_err("couldn't register the net device\n");
707 goto fail;
708 }
709
710 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
711
712 ndev->destructor = brcmf_cfg80211_free_netdev;
713 return 0;
714
715 fail:
716 drvr->iflist[ifp->bssidx] = NULL;
717 ndev->netdev_ops = NULL;
718 free_netdev(ndev);
719 return -EBADE;
720 }
721
722 static int brcmf_net_p2p_open(struct net_device *ndev)
723 {
724 brcmf_dbg(TRACE, "Enter\n");
725
726 return brcmf_cfg80211_up(ndev);
727 }
728
729 static int brcmf_net_p2p_stop(struct net_device *ndev)
730 {
731 brcmf_dbg(TRACE, "Enter\n");
732
733 return brcmf_cfg80211_down(ndev);
734 }
735
736 static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
737 struct net_device *ndev)
738 {
739 if (skb)
740 dev_kfree_skb_any(skb);
741
742 return NETDEV_TX_OK;
743 }
744
745 static const struct net_device_ops brcmf_netdev_ops_p2p = {
746 .ndo_open = brcmf_net_p2p_open,
747 .ndo_stop = brcmf_net_p2p_stop,
748 .ndo_start_xmit = brcmf_net_p2p_start_xmit
749 };
750
751 static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
752 {
753 struct net_device *ndev;
754
755 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
756 ifp->mac_addr);
757 ndev = ifp->ndev;
758
759 ndev->netdev_ops = &brcmf_netdev_ops_p2p;
760
761 /* set the mac address */
762 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
763
764 if (register_netdev(ndev) != 0) {
765 brcmf_err("couldn't register the p2p net device\n");
766 goto fail;
767 }
768
769 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
770
771 return 0;
772
773 fail:
774 ifp->drvr->iflist[ifp->bssidx] = NULL;
775 ndev->netdev_ops = NULL;
776 free_netdev(ndev);
777 return -EBADE;
778 }
779
780 struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
781 char *name, u8 *mac_addr)
782 {
783 struct brcmf_if *ifp;
784 struct net_device *ndev;
785
786 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
787
788 ifp = drvr->iflist[bssidx];
789 /*
790 * Delete the existing interface before overwriting it
791 * in case we missed the BRCMF_E_IF_DEL event.
792 */
793 if (ifp) {
794 brcmf_err("ERROR: netdev:%s already exists\n",
795 ifp->ndev->name);
796 if (ifidx) {
797 netif_stop_queue(ifp->ndev);
798 unregister_netdev(ifp->ndev);
799 free_netdev(ifp->ndev);
800 drvr->iflist[bssidx] = NULL;
801 } else {
802 brcmf_err("ignore IF event\n");
803 return ERR_PTR(-EINVAL);
804 }
805 }
806
807 if (!brcmf_p2p_enable && bssidx == 1) {
808 /* this is P2P_DEVICE interface */
809 brcmf_dbg(INFO, "allocate non-netdev interface\n");
810 ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
811 if (!ifp)
812 return ERR_PTR(-ENOMEM);
813 } else {
814 brcmf_dbg(INFO, "allocate netdev interface\n");
815 /* Allocate netdev, including space for private structure */
816 ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN,
817 ether_setup);
818 if (!ndev)
819 return ERR_PTR(-ENOMEM);
820
821 ifp = netdev_priv(ndev);
822 ifp->ndev = ndev;
823 }
824
825 ifp->drvr = drvr;
826 drvr->iflist[bssidx] = ifp;
827 ifp->ifidx = ifidx;
828 ifp->bssidx = bssidx;
829
830 init_waitqueue_head(&ifp->pend_8021x_wait);
831 spin_lock_init(&ifp->netif_stop_lock);
832
833 if (mac_addr != NULL)
834 memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
835
836 brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
837 current->pid, name, ifp->mac_addr);
838
839 return ifp;
840 }
841
842 static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
843 {
844 struct brcmf_if *ifp;
845
846 ifp = drvr->iflist[bssidx];
847 drvr->iflist[bssidx] = NULL;
848 if (!ifp) {
849 brcmf_err("Null interface, idx=%d\n", bssidx);
850 return;
851 }
852 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
853 if (ifp->ndev) {
854 if (bssidx == 0) {
855 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
856 rtnl_lock();
857 brcmf_netdev_stop(ifp->ndev);
858 rtnl_unlock();
859 }
860 } else {
861 netif_stop_queue(ifp->ndev);
862 }
863
864 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
865 cancel_work_sync(&ifp->setmacaddr_work);
866 cancel_work_sync(&ifp->multicast_work);
867 }
868 /* unregister will take care of freeing it */
869 unregister_netdev(ifp->ndev);
870 } else {
871 kfree(ifp);
872 }
873 }
874
875 void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx)
876 {
877 if (drvr->iflist[bssidx]) {
878 brcmf_fws_del_interface(drvr->iflist[bssidx]);
879 brcmf_del_if(drvr, bssidx);
880 }
881 }
882
883 int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
884 {
885 int ifidx;
886 int bsscfgidx;
887 bool available;
888 int highest;
889
890 available = false;
891 bsscfgidx = 2;
892 highest = 2;
893 for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
894 if (drvr->iflist[ifidx]) {
895 if (drvr->iflist[ifidx]->bssidx == bsscfgidx)
896 bsscfgidx = highest + 1;
897 else if (drvr->iflist[ifidx]->bssidx > highest)
898 highest = drvr->iflist[ifidx]->bssidx;
899 } else {
900 available = true;
901 }
902 }
903
904 return available ? bsscfgidx : -ENOMEM;
905 }
906
907 int brcmf_attach(struct device *dev)
908 {
909 struct brcmf_pub *drvr = NULL;
910 int ret = 0;
911
912 brcmf_dbg(TRACE, "Enter\n");
913
914 /* Allocate primary brcmf_info */
915 drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
916 if (!drvr)
917 return -ENOMEM;
918
919 mutex_init(&drvr->proto_block);
920
921 /* Link to bus module */
922 drvr->hdrlen = 0;
923 drvr->bus_if = dev_get_drvdata(dev);
924 drvr->bus_if->drvr = drvr;
925
926 /* create device debugfs folder */
927 brcmf_debugfs_attach(drvr);
928
929 /* Attach and link in the protocol */
930 ret = brcmf_proto_attach(drvr);
931 if (ret != 0) {
932 brcmf_err("brcmf_prot_attach failed\n");
933 goto fail;
934 }
935
936 /* attach firmware event handler */
937 brcmf_fweh_attach(drvr);
938
939 return ret;
940
941 fail:
942 brcmf_detach(dev);
943
944 return ret;
945 }
946
947 int brcmf_bus_start(struct device *dev)
948 {
949 int ret = -1;
950 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
951 struct brcmf_pub *drvr = bus_if->drvr;
952 struct brcmf_if *ifp;
953 struct brcmf_if *p2p_ifp;
954
955 brcmf_dbg(TRACE, "\n");
956
957 /* add primary networking interface */
958 ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
959 if (IS_ERR(ifp))
960 return PTR_ERR(ifp);
961
962 if (brcmf_p2p_enable)
963 p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
964 else
965 p2p_ifp = NULL;
966 if (IS_ERR(p2p_ifp))
967 p2p_ifp = NULL;
968
969 /* signal bus ready */
970 brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
971
972 /* Bus is ready, do any initialization */
973 ret = brcmf_c_preinit_dcmds(ifp);
974 if (ret < 0)
975 goto fail;
976
977 /* assure we have chipid before feature attach */
978 if (!bus_if->chip) {
979 bus_if->chip = drvr->revinfo.chipnum;
980 bus_if->chiprev = drvr->revinfo.chiprev;
981 brcmf_dbg(INFO, "firmware revinfo: chip %x (%d) rev %d\n",
982 bus_if->chip, bus_if->chip, bus_if->chiprev);
983 }
984 brcmf_feat_attach(drvr);
985
986 ret = brcmf_fws_init(drvr);
987 if (ret < 0)
988 goto fail;
989
990 brcmf_fws_add_interface(ifp);
991
992 drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
993 if (drvr->config == NULL) {
994 ret = -ENOMEM;
995 goto fail;
996 }
997
998 ret = brcmf_fweh_activate_events(ifp);
999 if (ret < 0)
1000 goto fail;
1001
1002 ret = brcmf_net_attach(ifp, false);
1003 fail:
1004 if (ret < 0) {
1005 brcmf_err("failed: %d\n", ret);
1006 brcmf_cfg80211_detach(drvr->config);
1007 if (drvr->fws) {
1008 brcmf_fws_del_interface(ifp);
1009 brcmf_fws_deinit(drvr);
1010 }
1011 if (drvr->iflist[0]) {
1012 free_netdev(ifp->ndev);
1013 drvr->iflist[0] = NULL;
1014 }
1015 if (p2p_ifp) {
1016 free_netdev(p2p_ifp->ndev);
1017 drvr->iflist[1] = NULL;
1018 }
1019 return ret;
1020 }
1021 if ((brcmf_p2p_enable) && (p2p_ifp))
1022 if (brcmf_net_p2p_attach(p2p_ifp) < 0)
1023 brcmf_p2p_enable = 0;
1024
1025 return 0;
1026 }
1027
1028 void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
1029 {
1030 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1031 struct brcmf_pub *drvr = bus_if->drvr;
1032
1033 if (drvr) {
1034 drvr->hdrlen += len;
1035 }
1036 }
1037
1038 static void brcmf_bus_detach(struct brcmf_pub *drvr)
1039 {
1040 brcmf_dbg(TRACE, "Enter\n");
1041
1042 if (drvr) {
1043 /* Stop the bus module */
1044 brcmf_bus_stop(drvr->bus_if);
1045 }
1046 }
1047
1048 void brcmf_dev_reset(struct device *dev)
1049 {
1050 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1051 struct brcmf_pub *drvr = bus_if->drvr;
1052
1053 if (drvr == NULL)
1054 return;
1055
1056 if (drvr->iflist[0])
1057 brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
1058 }
1059
1060 void brcmf_detach(struct device *dev)
1061 {
1062 s32 i;
1063 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1064 struct brcmf_pub *drvr = bus_if->drvr;
1065
1066 brcmf_dbg(TRACE, "Enter\n");
1067
1068 if (drvr == NULL)
1069 return;
1070
1071 /* stop firmware event handling */
1072 brcmf_fweh_detach(drvr);
1073
1074 brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
1075
1076 /* make sure primary interface removed last */
1077 for (i = BRCMF_MAX_IFS-1; i > -1; i--)
1078 brcmf_remove_interface(drvr, i);
1079
1080 brcmf_cfg80211_detach(drvr->config);
1081
1082 brcmf_fws_deinit(drvr);
1083
1084 brcmf_bus_detach(drvr);
1085
1086 brcmf_proto_detach(drvr);
1087
1088 brcmf_debugfs_detach(drvr);
1089 bus_if->drvr = NULL;
1090 kfree(drvr);
1091 }
1092
1093 s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
1094 {
1095 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1096 struct brcmf_if *ifp = bus_if->drvr->iflist[0];
1097
1098 return brcmf_fil_iovar_data_set(ifp, name, data, len);
1099 }
1100
1101 static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
1102 {
1103 return atomic_read(&ifp->pend_8021x_cnt);
1104 }
1105
1106 int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
1107 {
1108 int err;
1109
1110 err = wait_event_timeout(ifp->pend_8021x_wait,
1111 !brcmf_get_pend_8021x_cnt(ifp),
1112 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
1113
1114 WARN_ON(!err);
1115
1116 return !err;
1117 }
1118
1119 void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
1120 {
1121 struct brcmf_pub *drvr = bus->drvr;
1122 struct net_device *ndev;
1123 int ifidx;
1124
1125 brcmf_dbg(TRACE, "%d -> %d\n", bus->state, state);
1126 bus->state = state;
1127
1128 if (state == BRCMF_BUS_UP) {
1129 for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
1130 if ((drvr->iflist[ifidx]) &&
1131 (drvr->iflist[ifidx]->ndev)) {
1132 ndev = drvr->iflist[ifidx]->ndev;
1133 if (netif_queue_stopped(ndev))
1134 netif_wake_queue(ndev);
1135 }
1136 }
1137 }
1138 }
1139
1140 static void brcmf_driver_register(struct work_struct *work)
1141 {
1142 #ifdef CONFIG_BRCMFMAC_SDIO
1143 brcmf_sdio_register();
1144 #endif
1145 #ifdef CONFIG_BRCMFMAC_USB
1146 brcmf_usb_register();
1147 #endif
1148 #ifdef CONFIG_BRCMFMAC_PCIE
1149 brcmf_pcie_register();
1150 #endif
1151 }
1152 static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
1153
1154 static int __init brcmfmac_module_init(void)
1155 {
1156 brcmf_debugfs_init();
1157 #ifdef CONFIG_BRCMFMAC_SDIO
1158 brcmf_sdio_init();
1159 #endif
1160 if (!schedule_work(&brcmf_driver_work))
1161 return -EBUSY;
1162
1163 return 0;
1164 }
1165
1166 static void __exit brcmfmac_module_exit(void)
1167 {
1168 cancel_work_sync(&brcmf_driver_work);
1169
1170 #ifdef CONFIG_BRCMFMAC_SDIO
1171 brcmf_sdio_exit();
1172 #endif
1173 #ifdef CONFIG_BRCMFMAC_USB
1174 brcmf_usb_exit();
1175 #endif
1176 #ifdef CONFIG_BRCMFMAC_PCIE
1177 brcmf_pcie_exit();
1178 #endif
1179 brcmf_debugfs_exit();
1180 }
1181
1182 module_init(brcmfmac_module_init);
1183 module_exit(brcmfmac_module_exit);
This page took 0.055361 seconds and 5 git commands to generate.