brcmfmac: remove unnecessary EXPORT_SYMBOL() usage
[deliverable/linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / dhd_linux.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/etherdevice.h>
19 #include <linux/module.h>
20 #include <net/cfg80211.h>
21 #include <net/rtnetlink.h>
22 #include <brcmu_utils.h>
23 #include <brcmu_wifi.h>
24
25 #include "dhd.h"
26 #include "dhd_bus.h"
27 #include "dhd_dbg.h"
28 #include "fwil_types.h"
29 #include "p2p.h"
30 #include "wl_cfg80211.h"
31 #include "fwil.h"
32 #include "fwsignal.h"
33 #include "proto.h"
34
35 MODULE_AUTHOR("Broadcom Corporation");
36 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
37 MODULE_LICENSE("Dual BSD/GPL");
38
39 #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
40
41 /* AMPDU rx reordering definitions */
42 #define BRCMF_RXREORDER_FLOWID_OFFSET 0
43 #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
44 #define BRCMF_RXREORDER_FLAGS_OFFSET 4
45 #define BRCMF_RXREORDER_CURIDX_OFFSET 6
46 #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
47
48 #define BRCMF_RXREORDER_DEL_FLOW 0x01
49 #define BRCMF_RXREORDER_FLUSH_ALL 0x02
50 #define BRCMF_RXREORDER_CURIDX_VALID 0x04
51 #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
52 #define BRCMF_RXREORDER_NEW_HOLE 0x10
53
54 /* Error bits */
55 int brcmf_msg_level;
56 module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
57 MODULE_PARM_DESC(debug, "level of debug output");
58
59 /* P2P0 enable */
60 static int brcmf_p2p_enable;
61 #ifdef CONFIG_BRCMDBG
62 module_param_named(p2pon, brcmf_p2p_enable, int, 0);
63 MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
64 #endif
65
66 char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
67 {
68 if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
69 brcmf_err("ifidx %d out of range\n", ifidx);
70 return "<if_bad>";
71 }
72
73 if (drvr->iflist[ifidx] == NULL) {
74 brcmf_err("null i/f %d\n", ifidx);
75 return "<if_null>";
76 }
77
78 if (drvr->iflist[ifidx]->ndev)
79 return drvr->iflist[ifidx]->ndev->name;
80
81 return "<if_none>";
82 }
83
84 static void _brcmf_set_multicast_list(struct work_struct *work)
85 {
86 struct brcmf_if *ifp;
87 struct net_device *ndev;
88 struct netdev_hw_addr *ha;
89 u32 cmd_value, cnt;
90 __le32 cnt_le;
91 char *buf, *bufp;
92 u32 buflen;
93 s32 err;
94
95 ifp = container_of(work, struct brcmf_if, multicast_work);
96
97 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
98
99 ndev = ifp->ndev;
100
101 /* Determine initial value of allmulti flag */
102 cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
103
104 /* Send down the multicast list first. */
105 cnt = netdev_mc_count(ndev);
106 buflen = sizeof(cnt) + (cnt * ETH_ALEN);
107 buf = kmalloc(buflen, GFP_ATOMIC);
108 if (!buf)
109 return;
110 bufp = buf;
111
112 cnt_le = cpu_to_le32(cnt);
113 memcpy(bufp, &cnt_le, sizeof(cnt_le));
114 bufp += sizeof(cnt_le);
115
116 netdev_for_each_mc_addr(ha, ndev) {
117 if (!cnt)
118 break;
119 memcpy(bufp, ha->addr, ETH_ALEN);
120 bufp += ETH_ALEN;
121 cnt--;
122 }
123
124 err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
125 if (err < 0) {
126 brcmf_err("Setting mcast_list failed, %d\n", err);
127 cmd_value = cnt ? true : cmd_value;
128 }
129
130 kfree(buf);
131
132 /*
133 * Now send the allmulti setting. This is based on the setting in the
134 * net_device flags, but might be modified above to be turned on if we
135 * were trying to set some addresses and dongle rejected it...
136 */
137 err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
138 if (err < 0)
139 brcmf_err("Setting allmulti failed, %d\n", err);
140
141 /*Finally, pick up the PROMISC flag */
142 cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
143 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
144 if (err < 0)
145 brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
146 err);
147 }
148
149 static void
150 _brcmf_set_mac_address(struct work_struct *work)
151 {
152 struct brcmf_if *ifp;
153 s32 err;
154
155 ifp = container_of(work, struct brcmf_if, setmacaddr_work);
156
157 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
158
159 err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
160 ETH_ALEN);
161 if (err < 0) {
162 brcmf_err("Setting cur_etheraddr failed, %d\n", err);
163 } else {
164 brcmf_dbg(TRACE, "MAC address updated to %pM\n",
165 ifp->mac_addr);
166 memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
167 }
168 }
169
170 static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
171 {
172 struct brcmf_if *ifp = netdev_priv(ndev);
173 struct sockaddr *sa = (struct sockaddr *)addr;
174
175 memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
176 schedule_work(&ifp->setmacaddr_work);
177 return 0;
178 }
179
180 static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
181 {
182 struct brcmf_if *ifp = netdev_priv(ndev);
183
184 schedule_work(&ifp->multicast_work);
185 }
186
187 static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
188 struct net_device *ndev)
189 {
190 int ret;
191 struct brcmf_if *ifp = netdev_priv(ndev);
192 struct brcmf_pub *drvr = ifp->drvr;
193 struct ethhdr *eh;
194
195 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
196
197 /* Can the device send data? */
198 if (drvr->bus_if->state != BRCMF_BUS_DATA) {
199 brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
200 netif_stop_queue(ndev);
201 dev_kfree_skb(skb);
202 ret = -ENODEV;
203 goto done;
204 }
205
206 if (!drvr->iflist[ifp->bssidx]) {
207 brcmf_err("bad ifidx %d\n", ifp->bssidx);
208 netif_stop_queue(ndev);
209 dev_kfree_skb(skb);
210 ret = -ENODEV;
211 goto done;
212 }
213
214 /* Make sure there's enough room for any header */
215 if (skb_headroom(skb) < drvr->hdrlen) {
216 struct sk_buff *skb2;
217
218 brcmf_dbg(INFO, "%s: insufficient headroom\n",
219 brcmf_ifname(drvr, ifp->bssidx));
220 drvr->bus_if->tx_realloc++;
221 skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
222 dev_kfree_skb(skb);
223 skb = skb2;
224 if (skb == NULL) {
225 brcmf_err("%s: skb_realloc_headroom failed\n",
226 brcmf_ifname(drvr, ifp->bssidx));
227 ret = -ENOMEM;
228 goto done;
229 }
230 }
231
232 /* validate length for ether packet */
233 if (skb->len < sizeof(*eh)) {
234 ret = -EINVAL;
235 dev_kfree_skb(skb);
236 goto done;
237 }
238
239 ret = brcmf_fws_process_skb(ifp, skb);
240
241 done:
242 if (ret) {
243 ifp->stats.tx_dropped++;
244 } else {
245 ifp->stats.tx_packets++;
246 ifp->stats.tx_bytes += skb->len;
247 }
248
249 /* Return ok: we always eat the packet */
250 return NETDEV_TX_OK;
251 }
252
253 void brcmf_txflowblock_if(struct brcmf_if *ifp,
254 enum brcmf_netif_stop_reason reason, bool state)
255 {
256 unsigned long flags;
257
258 if (!ifp || !ifp->ndev)
259 return;
260
261 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
262 ifp->bssidx, ifp->netif_stop, reason, state);
263
264 spin_lock_irqsave(&ifp->netif_stop_lock, flags);
265 if (state) {
266 if (!ifp->netif_stop)
267 netif_stop_queue(ifp->ndev);
268 ifp->netif_stop |= reason;
269 } else {
270 ifp->netif_stop &= ~reason;
271 if (!ifp->netif_stop)
272 netif_wake_queue(ifp->ndev);
273 }
274 spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
275 }
276
277 void brcmf_txflowblock(struct device *dev, bool state)
278 {
279 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
280 struct brcmf_pub *drvr = bus_if->drvr;
281
282 brcmf_dbg(TRACE, "Enter\n");
283
284 brcmf_fws_bus_blocked(drvr, state);
285 }
286
287 static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
288 {
289 skb->dev = ifp->ndev;
290 skb->protocol = eth_type_trans(skb, skb->dev);
291
292 if (skb->pkt_type == PACKET_MULTICAST)
293 ifp->stats.multicast++;
294
295 /* Process special event packets */
296 brcmf_fweh_process_skb(ifp->drvr, skb);
297
298 if (!(ifp->ndev->flags & IFF_UP)) {
299 brcmu_pkt_buf_free_skb(skb);
300 return;
301 }
302
303 ifp->stats.rx_bytes += skb->len;
304 ifp->stats.rx_packets++;
305
306 brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
307 if (in_interrupt())
308 netif_rx(skb);
309 else
310 /* If the receive is not processed inside an ISR,
311 * the softirqd must be woken explicitly to service
312 * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
313 */
314 netif_rx_ni(skb);
315 }
316
317 static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
318 u8 start, u8 end,
319 struct sk_buff_head *skb_list)
320 {
321 /* initialize return list */
322 __skb_queue_head_init(skb_list);
323
324 if (rfi->pend_pkts == 0) {
325 brcmf_dbg(INFO, "no packets in reorder queue\n");
326 return;
327 }
328
329 do {
330 if (rfi->pktslots[start]) {
331 __skb_queue_tail(skb_list, rfi->pktslots[start]);
332 rfi->pktslots[start] = NULL;
333 }
334 start++;
335 if (start > rfi->max_idx)
336 start = 0;
337 } while (start != end);
338 rfi->pend_pkts -= skb_queue_len(skb_list);
339 }
340
341 static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
342 struct sk_buff *pkt)
343 {
344 u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
345 struct brcmf_ampdu_rx_reorder *rfi;
346 struct sk_buff_head reorder_list;
347 struct sk_buff *pnext;
348 u8 flags;
349 u32 buf_size;
350
351 flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
352 flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
353
354 /* validate flags and flow id */
355 if (flags == 0xFF) {
356 brcmf_err("invalid flags...so ignore this packet\n");
357 brcmf_netif_rx(ifp, pkt);
358 return;
359 }
360
361 rfi = ifp->drvr->reorder_flows[flow_id];
362 if (flags & BRCMF_RXREORDER_DEL_FLOW) {
363 brcmf_dbg(INFO, "flow-%d: delete\n",
364 flow_id);
365
366 if (rfi == NULL) {
367 brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
368 flow_id);
369 brcmf_netif_rx(ifp, pkt);
370 return;
371 }
372
373 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
374 &reorder_list);
375 /* add the last packet */
376 __skb_queue_tail(&reorder_list, pkt);
377 kfree(rfi);
378 ifp->drvr->reorder_flows[flow_id] = NULL;
379 goto netif_rx;
380 }
381 /* from here on we need a flow reorder instance */
382 if (rfi == NULL) {
383 buf_size = sizeof(*rfi);
384 max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
385
386 buf_size += (max_idx + 1) * sizeof(pkt);
387
388 /* allocate space for flow reorder info */
389 brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
390 flow_id, max_idx);
391 rfi = kzalloc(buf_size, GFP_ATOMIC);
392 if (rfi == NULL) {
393 brcmf_err("failed to alloc buffer\n");
394 brcmf_netif_rx(ifp, pkt);
395 return;
396 }
397
398 ifp->drvr->reorder_flows[flow_id] = rfi;
399 rfi->pktslots = (struct sk_buff **)(rfi+1);
400 rfi->max_idx = max_idx;
401 }
402 if (flags & BRCMF_RXREORDER_NEW_HOLE) {
403 if (rfi->pend_pkts) {
404 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
405 rfi->exp_idx,
406 &reorder_list);
407 WARN_ON(rfi->pend_pkts);
408 } else {
409 __skb_queue_head_init(&reorder_list);
410 }
411 rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
412 rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
413 rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
414 rfi->pktslots[rfi->cur_idx] = pkt;
415 rfi->pend_pkts++;
416 brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
417 flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
418 } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
419 cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
420 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
421
422 if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
423 /* still in the current hole */
424 /* enqueue the current on the buffer chain */
425 if (rfi->pktslots[cur_idx] != NULL) {
426 brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
427 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
428 rfi->pktslots[cur_idx] = NULL;
429 }
430 rfi->pktslots[cur_idx] = pkt;
431 rfi->pend_pkts++;
432 rfi->cur_idx = cur_idx;
433 brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
434 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
435
436 /* can return now as there is no reorder
437 * list to process.
438 */
439 return;
440 }
441 if (rfi->exp_idx == cur_idx) {
442 if (rfi->pktslots[cur_idx] != NULL) {
443 brcmf_dbg(INFO, "error buffer pending..free it\n");
444 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
445 rfi->pktslots[cur_idx] = NULL;
446 }
447 rfi->pktslots[cur_idx] = pkt;
448 rfi->pend_pkts++;
449
450 /* got the expected one. flush from current to expected
451 * and update expected
452 */
453 brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
454 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
455
456 rfi->cur_idx = cur_idx;
457 rfi->exp_idx = exp_idx;
458
459 brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
460 &reorder_list);
461 brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
462 flow_id, skb_queue_len(&reorder_list),
463 rfi->pend_pkts);
464 } else {
465 u8 end_idx;
466
467 brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
468 flow_id, flags, rfi->cur_idx, rfi->exp_idx,
469 cur_idx, exp_idx);
470 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
471 end_idx = rfi->exp_idx;
472 else
473 end_idx = exp_idx;
474
475 /* flush pkts first */
476 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
477 &reorder_list);
478
479 if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
480 __skb_queue_tail(&reorder_list, pkt);
481 } else {
482 rfi->pktslots[cur_idx] = pkt;
483 rfi->pend_pkts++;
484 }
485 rfi->exp_idx = exp_idx;
486 rfi->cur_idx = cur_idx;
487 }
488 } else {
489 /* explicity window move updating the expected index */
490 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
491
492 brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
493 flow_id, flags, rfi->exp_idx, exp_idx);
494 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
495 end_idx = rfi->exp_idx;
496 else
497 end_idx = exp_idx;
498
499 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
500 &reorder_list);
501 __skb_queue_tail(&reorder_list, pkt);
502 /* set the new expected idx */
503 rfi->exp_idx = exp_idx;
504 }
505 netif_rx:
506 skb_queue_walk_safe(&reorder_list, pkt, pnext) {
507 __skb_unlink(pkt, &reorder_list);
508 brcmf_netif_rx(ifp, pkt);
509 }
510 }
511
512 void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
513 {
514 struct brcmf_if *ifp;
515 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
516 struct brcmf_pub *drvr = bus_if->drvr;
517 struct brcmf_skb_reorder_data *rd;
518 u8 ifidx;
519 int ret;
520
521 brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
522
523 /* process and remove protocol-specific header */
524 ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
525 ifp = drvr->iflist[ifidx];
526
527 if (ret || !ifp || !ifp->ndev) {
528 if ((ret != -ENODATA) && ifp)
529 ifp->stats.rx_errors++;
530 brcmu_pkt_buf_free_skb(skb);
531 return;
532 }
533
534 rd = (struct brcmf_skb_reorder_data *)skb->cb;
535 if (rd->reorder)
536 brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
537 else
538 brcmf_netif_rx(ifp, skb);
539 }
540
541 void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
542 bool success)
543 {
544 struct brcmf_if *ifp;
545 struct ethhdr *eh;
546 u8 ifidx;
547 u16 type;
548 int res;
549
550 res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
551
552 ifp = drvr->iflist[ifidx];
553 if (!ifp)
554 goto done;
555
556 if (res == 0) {
557 eh = (struct ethhdr *)(txp->data);
558 type = ntohs(eh->h_proto);
559
560 if (type == ETH_P_PAE) {
561 atomic_dec(&ifp->pend_8021x_cnt);
562 if (waitqueue_active(&ifp->pend_8021x_wait))
563 wake_up(&ifp->pend_8021x_wait);
564 }
565 }
566 if (!success)
567 ifp->stats.tx_errors++;
568 done:
569 brcmu_pkt_buf_free_skb(txp);
570 }
571
572 void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
573 {
574 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
575 struct brcmf_pub *drvr = bus_if->drvr;
576
577 /* await txstatus signal for firmware if active */
578 if (brcmf_fws_fc_active(drvr->fws)) {
579 if (!success)
580 brcmf_fws_bustxfail(drvr->fws, txp);
581 } else {
582 brcmf_txfinalize(drvr, txp, success);
583 }
584 }
585
586 static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
587 {
588 struct brcmf_if *ifp = netdev_priv(ndev);
589
590 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
591
592 return &ifp->stats;
593 }
594
595 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
596 struct ethtool_drvinfo *info)
597 {
598 struct brcmf_if *ifp = netdev_priv(ndev);
599 struct brcmf_pub *drvr = ifp->drvr;
600
601 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
602 snprintf(info->version, sizeof(info->version), "n/a");
603 strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
604 strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
605 sizeof(info->bus_info));
606 }
607
608 static const struct ethtool_ops brcmf_ethtool_ops = {
609 .get_drvinfo = brcmf_ethtool_get_drvinfo,
610 };
611
612 static int brcmf_netdev_stop(struct net_device *ndev)
613 {
614 struct brcmf_if *ifp = netdev_priv(ndev);
615
616 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
617
618 brcmf_cfg80211_down(ndev);
619
620 /* Set state and stop OS transmissions */
621 netif_stop_queue(ndev);
622
623 return 0;
624 }
625
626 static int brcmf_netdev_open(struct net_device *ndev)
627 {
628 struct brcmf_if *ifp = netdev_priv(ndev);
629 struct brcmf_pub *drvr = ifp->drvr;
630 struct brcmf_bus *bus_if = drvr->bus_if;
631 u32 toe_ol;
632
633 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
634
635 /* If bus is not ready, can't continue */
636 if (bus_if->state != BRCMF_BUS_DATA) {
637 brcmf_err("failed bus is not ready\n");
638 return -EAGAIN;
639 }
640
641 atomic_set(&ifp->pend_8021x_cnt, 0);
642
643 /* Get current TOE mode from dongle */
644 if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
645 && (toe_ol & TOE_TX_CSUM_OL) != 0)
646 ndev->features |= NETIF_F_IP_CSUM;
647 else
648 ndev->features &= ~NETIF_F_IP_CSUM;
649
650 if (brcmf_cfg80211_up(ndev)) {
651 brcmf_err("failed to bring up cfg80211\n");
652 return -EIO;
653 }
654
655 /* Allow transmit calls */
656 netif_start_queue(ndev);
657 return 0;
658 }
659
660 static const struct net_device_ops brcmf_netdev_ops_pri = {
661 .ndo_open = brcmf_netdev_open,
662 .ndo_stop = brcmf_netdev_stop,
663 .ndo_get_stats = brcmf_netdev_get_stats,
664 .ndo_start_xmit = brcmf_netdev_start_xmit,
665 .ndo_set_mac_address = brcmf_netdev_set_mac_address,
666 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
667 };
668
669 int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
670 {
671 struct brcmf_pub *drvr = ifp->drvr;
672 struct net_device *ndev;
673 s32 err;
674
675 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
676 ifp->mac_addr);
677 ndev = ifp->ndev;
678
679 /* set appropriate operations */
680 ndev->netdev_ops = &brcmf_netdev_ops_pri;
681
682 ndev->hard_header_len += drvr->hdrlen;
683 ndev->ethtool_ops = &brcmf_ethtool_ops;
684
685 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
686 drvr->hdrlen;
687
688 /* set the mac address */
689 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
690
691 INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
692 INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
693
694 if (rtnl_locked)
695 err = register_netdevice(ndev);
696 else
697 err = register_netdev(ndev);
698 if (err != 0) {
699 brcmf_err("couldn't register the net device\n");
700 goto fail;
701 }
702
703 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
704
705 ndev->destructor = free_netdev;
706 return 0;
707
708 fail:
709 drvr->iflist[ifp->bssidx] = NULL;
710 ndev->netdev_ops = NULL;
711 free_netdev(ndev);
712 return -EBADE;
713 }
714
715 static int brcmf_net_p2p_open(struct net_device *ndev)
716 {
717 brcmf_dbg(TRACE, "Enter\n");
718
719 return brcmf_cfg80211_up(ndev);
720 }
721
722 static int brcmf_net_p2p_stop(struct net_device *ndev)
723 {
724 brcmf_dbg(TRACE, "Enter\n");
725
726 return brcmf_cfg80211_down(ndev);
727 }
728
729 static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
730 struct net_device *ndev)
731 {
732 if (skb)
733 dev_kfree_skb_any(skb);
734
735 return NETDEV_TX_OK;
736 }
737
738 static const struct net_device_ops brcmf_netdev_ops_p2p = {
739 .ndo_open = brcmf_net_p2p_open,
740 .ndo_stop = brcmf_net_p2p_stop,
741 .ndo_start_xmit = brcmf_net_p2p_start_xmit
742 };
743
744 static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
745 {
746 struct net_device *ndev;
747
748 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
749 ifp->mac_addr);
750 ndev = ifp->ndev;
751
752 ndev->netdev_ops = &brcmf_netdev_ops_p2p;
753
754 /* set the mac address */
755 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
756
757 if (register_netdev(ndev) != 0) {
758 brcmf_err("couldn't register the p2p net device\n");
759 goto fail;
760 }
761
762 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
763
764 return 0;
765
766 fail:
767 ifp->drvr->iflist[ifp->bssidx] = NULL;
768 ndev->netdev_ops = NULL;
769 free_netdev(ndev);
770 return -EBADE;
771 }
772
773 struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
774 char *name, u8 *mac_addr)
775 {
776 struct brcmf_if *ifp;
777 struct net_device *ndev;
778
779 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
780
781 ifp = drvr->iflist[bssidx];
782 /*
783 * Delete the existing interface before overwriting it
784 * in case we missed the BRCMF_E_IF_DEL event.
785 */
786 if (ifp) {
787 brcmf_err("ERROR: netdev:%s already exists\n",
788 ifp->ndev->name);
789 if (ifidx) {
790 netif_stop_queue(ifp->ndev);
791 unregister_netdev(ifp->ndev);
792 free_netdev(ifp->ndev);
793 drvr->iflist[bssidx] = NULL;
794 } else {
795 brcmf_err("ignore IF event\n");
796 return ERR_PTR(-EINVAL);
797 }
798 }
799
800 if (!brcmf_p2p_enable && bssidx == 1) {
801 /* this is P2P_DEVICE interface */
802 brcmf_dbg(INFO, "allocate non-netdev interface\n");
803 ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
804 if (!ifp)
805 return ERR_PTR(-ENOMEM);
806 } else {
807 brcmf_dbg(INFO, "allocate netdev interface\n");
808 /* Allocate netdev, including space for private structure */
809 ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
810 if (!ndev)
811 return ERR_PTR(-ENOMEM);
812
813 ifp = netdev_priv(ndev);
814 ifp->ndev = ndev;
815 }
816
817 ifp->drvr = drvr;
818 drvr->iflist[bssidx] = ifp;
819 ifp->ifidx = ifidx;
820 ifp->bssidx = bssidx;
821
822 init_waitqueue_head(&ifp->pend_8021x_wait);
823 spin_lock_init(&ifp->netif_stop_lock);
824
825 if (mac_addr != NULL)
826 memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
827
828 brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
829 current->pid, name, ifp->mac_addr);
830
831 return ifp;
832 }
833
834 void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
835 {
836 struct brcmf_if *ifp;
837
838 ifp = drvr->iflist[bssidx];
839 drvr->iflist[bssidx] = NULL;
840 if (!ifp) {
841 brcmf_err("Null interface, idx=%d\n", bssidx);
842 return;
843 }
844 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
845 if (ifp->ndev) {
846 if (bssidx == 0) {
847 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
848 rtnl_lock();
849 brcmf_netdev_stop(ifp->ndev);
850 rtnl_unlock();
851 }
852 } else {
853 netif_stop_queue(ifp->ndev);
854 }
855
856 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
857 cancel_work_sync(&ifp->setmacaddr_work);
858 cancel_work_sync(&ifp->multicast_work);
859 }
860 /* unregister will take care of freeing it */
861 unregister_netdev(ifp->ndev);
862 if (bssidx == 0)
863 brcmf_cfg80211_detach(drvr->config);
864 } else {
865 kfree(ifp);
866 }
867 }
868
869 int brcmf_attach(struct device *dev)
870 {
871 struct brcmf_pub *drvr = NULL;
872 int ret = 0;
873
874 brcmf_dbg(TRACE, "Enter\n");
875
876 /* Allocate primary brcmf_info */
877 drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
878 if (!drvr)
879 return -ENOMEM;
880
881 mutex_init(&drvr->proto_block);
882
883 /* Link to bus module */
884 drvr->hdrlen = 0;
885 drvr->bus_if = dev_get_drvdata(dev);
886 drvr->bus_if->drvr = drvr;
887
888 /* create device debugfs folder */
889 brcmf_debugfs_attach(drvr);
890
891 /* Attach and link in the protocol */
892 ret = brcmf_proto_attach(drvr);
893 if (ret != 0) {
894 brcmf_err("brcmf_prot_attach failed\n");
895 goto fail;
896 }
897
898 /* attach firmware event handler */
899 brcmf_fweh_attach(drvr);
900
901 return ret;
902
903 fail:
904 brcmf_detach(dev);
905
906 return ret;
907 }
908
909 int brcmf_bus_start(struct device *dev)
910 {
911 int ret = -1;
912 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
913 struct brcmf_pub *drvr = bus_if->drvr;
914 struct brcmf_if *ifp;
915 struct brcmf_if *p2p_ifp;
916
917 brcmf_dbg(TRACE, "\n");
918
919 /* Bring up the bus */
920 ret = brcmf_bus_init(bus_if);
921 if (ret != 0) {
922 brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
923 return ret;
924 }
925
926 /* add primary networking interface */
927 ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
928 if (IS_ERR(ifp))
929 return PTR_ERR(ifp);
930
931 if (brcmf_p2p_enable)
932 p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
933 else
934 p2p_ifp = NULL;
935 if (IS_ERR(p2p_ifp))
936 p2p_ifp = NULL;
937
938 /* signal bus ready */
939 bus_if->state = BRCMF_BUS_DATA;
940
941 /* Bus is ready, do any initialization */
942 ret = brcmf_c_preinit_dcmds(ifp);
943 if (ret < 0)
944 goto fail;
945
946 ret = brcmf_fws_init(drvr);
947 if (ret < 0)
948 goto fail;
949
950 brcmf_fws_add_interface(ifp);
951
952 drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
953 if (drvr->config == NULL) {
954 ret = -ENOMEM;
955 goto fail;
956 }
957
958 ret = brcmf_fweh_activate_events(ifp);
959 if (ret < 0)
960 goto fail;
961
962 ret = brcmf_net_attach(ifp, false);
963 fail:
964 if (ret < 0) {
965 brcmf_err("failed: %d\n", ret);
966 if (drvr->config)
967 brcmf_cfg80211_detach(drvr->config);
968 if (drvr->fws) {
969 brcmf_fws_del_interface(ifp);
970 brcmf_fws_deinit(drvr);
971 }
972 if (drvr->iflist[0]) {
973 free_netdev(ifp->ndev);
974 drvr->iflist[0] = NULL;
975 }
976 if (p2p_ifp) {
977 free_netdev(p2p_ifp->ndev);
978 drvr->iflist[1] = NULL;
979 }
980 return ret;
981 }
982 if ((brcmf_p2p_enable) && (p2p_ifp))
983 if (brcmf_net_p2p_attach(p2p_ifp) < 0)
984 brcmf_p2p_enable = 0;
985
986 return 0;
987 }
988
989 void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
990 {
991 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
992 struct brcmf_pub *drvr = bus_if->drvr;
993
994 if (drvr) {
995 drvr->hdrlen += len;
996 }
997 }
998
999 static void brcmf_bus_detach(struct brcmf_pub *drvr)
1000 {
1001 brcmf_dbg(TRACE, "Enter\n");
1002
1003 if (drvr) {
1004 /* Stop the bus module */
1005 brcmf_bus_stop(drvr->bus_if);
1006 }
1007 }
1008
1009 void brcmf_dev_reset(struct device *dev)
1010 {
1011 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1012 struct brcmf_pub *drvr = bus_if->drvr;
1013
1014 if (drvr == NULL)
1015 return;
1016
1017 if (drvr->iflist[0])
1018 brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
1019 }
1020
1021 void brcmf_detach(struct device *dev)
1022 {
1023 s32 i;
1024 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1025 struct brcmf_pub *drvr = bus_if->drvr;
1026
1027 brcmf_dbg(TRACE, "Enter\n");
1028
1029 if (drvr == NULL)
1030 return;
1031
1032 /* stop firmware event handling */
1033 brcmf_fweh_detach(drvr);
1034
1035 /* make sure primary interface removed last */
1036 for (i = BRCMF_MAX_IFS-1; i > -1; i--)
1037 if (drvr->iflist[i]) {
1038 brcmf_fws_del_interface(drvr->iflist[i]);
1039 brcmf_del_if(drvr, i);
1040 }
1041
1042 brcmf_bus_detach(drvr);
1043
1044 brcmf_proto_detach(drvr);
1045
1046 brcmf_fws_deinit(drvr);
1047
1048 brcmf_debugfs_detach(drvr);
1049 bus_if->drvr = NULL;
1050 kfree(drvr);
1051 }
1052
1053 s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
1054 {
1055 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1056 struct brcmf_if *ifp = bus_if->drvr->iflist[0];
1057
1058 return brcmf_fil_iovar_data_set(ifp, name, data, len);
1059 }
1060
1061 static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
1062 {
1063 return atomic_read(&ifp->pend_8021x_cnt);
1064 }
1065
1066 int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
1067 {
1068 struct brcmf_if *ifp = netdev_priv(ndev);
1069 int err;
1070
1071 err = wait_event_timeout(ifp->pend_8021x_wait,
1072 !brcmf_get_pend_8021x_cnt(ifp),
1073 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
1074
1075 WARN_ON(!err);
1076
1077 return !err;
1078 }
1079
1080 /*
1081 * return chip id and rev of the device encoded in u32.
1082 */
1083 u32 brcmf_get_chip_info(struct brcmf_if *ifp)
1084 {
1085 struct brcmf_bus *bus = ifp->drvr->bus_if;
1086
1087 return bus->chip << 4 | bus->chiprev;
1088 }
1089
1090 static void brcmf_driver_register(struct work_struct *work)
1091 {
1092 #ifdef CONFIG_BRCMFMAC_SDIO
1093 brcmf_sdio_register();
1094 #endif
1095 #ifdef CONFIG_BRCMFMAC_USB
1096 brcmf_usb_register();
1097 #endif
1098 }
1099 static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
1100
1101 static int __init brcmfmac_module_init(void)
1102 {
1103 brcmf_debugfs_init();
1104 #ifdef CONFIG_BRCMFMAC_SDIO
1105 brcmf_sdio_init();
1106 #endif
1107 if (!schedule_work(&brcmf_driver_work))
1108 return -EBUSY;
1109
1110 return 0;
1111 }
1112
1113 static void __exit brcmfmac_module_exit(void)
1114 {
1115 cancel_work_sync(&brcmf_driver_work);
1116
1117 #ifdef CONFIG_BRCMFMAC_SDIO
1118 brcmf_sdio_exit();
1119 #endif
1120 #ifdef CONFIG_BRCMFMAC_USB
1121 brcmf_usb_exit();
1122 #endif
1123 brcmf_debugfs_exit();
1124 }
1125
1126 module_init(brcmfmac_module_init);
1127 module_exit(brcmfmac_module_exit);
This page took 0.055817 seconds and 5 git commands to generate.