6056efd02fcde54ae7e4b86a39e04aab48e6832f
[deliverable/linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / dhd_linux.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/etherdevice.h>
19 #include <linux/module.h>
20 #include <net/cfg80211.h>
21 #include <net/rtnetlink.h>
22 #include <brcmu_utils.h>
23 #include <brcmu_wifi.h>
24
25 #include "dhd.h"
26 #include "dhd_bus.h"
27 #include "dhd_dbg.h"
28 #include "fwil_types.h"
29 #include "p2p.h"
30 #include "wl_cfg80211.h"
31 #include "fwil.h"
32 #include "fwsignal.h"
33 #include "proto.h"
34
35 MODULE_AUTHOR("Broadcom Corporation");
36 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
37 MODULE_LICENSE("Dual BSD/GPL");
38
39 #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
40
41 /* AMPDU rx reordering definitions */
42 #define BRCMF_RXREORDER_FLOWID_OFFSET 0
43 #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
44 #define BRCMF_RXREORDER_FLAGS_OFFSET 4
45 #define BRCMF_RXREORDER_CURIDX_OFFSET 6
46 #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
47
48 #define BRCMF_RXREORDER_DEL_FLOW 0x01
49 #define BRCMF_RXREORDER_FLUSH_ALL 0x02
50 #define BRCMF_RXREORDER_CURIDX_VALID 0x04
51 #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
52 #define BRCMF_RXREORDER_NEW_HOLE 0x10
53
54 /* Error bits */
55 int brcmf_msg_level;
56 module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
57 MODULE_PARM_DESC(debug, "level of debug output");
58
59 /* P2P0 enable */
60 static int brcmf_p2p_enable;
61 #ifdef CONFIG_BRCMDBG
62 module_param_named(p2pon, brcmf_p2p_enable, int, 0);
63 MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
64 #endif
65
66 char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
67 {
68 if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
69 brcmf_err("ifidx %d out of range\n", ifidx);
70 return "<if_bad>";
71 }
72
73 if (drvr->iflist[ifidx] == NULL) {
74 brcmf_err("null i/f %d\n", ifidx);
75 return "<if_null>";
76 }
77
78 if (drvr->iflist[ifidx]->ndev)
79 return drvr->iflist[ifidx]->ndev->name;
80
81 return "<if_none>";
82 }
83
84 static void _brcmf_set_multicast_list(struct work_struct *work)
85 {
86 struct brcmf_if *ifp;
87 struct net_device *ndev;
88 struct netdev_hw_addr *ha;
89 u32 cmd_value, cnt;
90 __le32 cnt_le;
91 char *buf, *bufp;
92 u32 buflen;
93 s32 err;
94
95 ifp = container_of(work, struct brcmf_if, multicast_work);
96
97 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
98
99 ndev = ifp->ndev;
100
101 /* Determine initial value of allmulti flag */
102 cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
103
104 /* Send down the multicast list first. */
105 cnt = netdev_mc_count(ndev);
106 buflen = sizeof(cnt) + (cnt * ETH_ALEN);
107 buf = kmalloc(buflen, GFP_ATOMIC);
108 if (!buf)
109 return;
110 bufp = buf;
111
112 cnt_le = cpu_to_le32(cnt);
113 memcpy(bufp, &cnt_le, sizeof(cnt_le));
114 bufp += sizeof(cnt_le);
115
116 netdev_for_each_mc_addr(ha, ndev) {
117 if (!cnt)
118 break;
119 memcpy(bufp, ha->addr, ETH_ALEN);
120 bufp += ETH_ALEN;
121 cnt--;
122 }
123
124 err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
125 if (err < 0) {
126 brcmf_err("Setting mcast_list failed, %d\n", err);
127 cmd_value = cnt ? true : cmd_value;
128 }
129
130 kfree(buf);
131
132 /*
133 * Now send the allmulti setting. This is based on the setting in the
134 * net_device flags, but might be modified above to be turned on if we
135 * were trying to set some addresses and dongle rejected it...
136 */
137 err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
138 if (err < 0)
139 brcmf_err("Setting allmulti failed, %d\n", err);
140
141 /*Finally, pick up the PROMISC flag */
142 cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
143 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
144 if (err < 0)
145 brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
146 err);
147 }
148
149 static void
150 _brcmf_set_mac_address(struct work_struct *work)
151 {
152 struct brcmf_if *ifp;
153 s32 err;
154
155 ifp = container_of(work, struct brcmf_if, setmacaddr_work);
156
157 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
158
159 err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
160 ETH_ALEN);
161 if (err < 0) {
162 brcmf_err("Setting cur_etheraddr failed, %d\n", err);
163 } else {
164 brcmf_dbg(TRACE, "MAC address updated to %pM\n",
165 ifp->mac_addr);
166 memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
167 }
168 }
169
170 static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
171 {
172 struct brcmf_if *ifp = netdev_priv(ndev);
173 struct sockaddr *sa = (struct sockaddr *)addr;
174
175 memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
176 schedule_work(&ifp->setmacaddr_work);
177 return 0;
178 }
179
180 static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
181 {
182 struct brcmf_if *ifp = netdev_priv(ndev);
183
184 schedule_work(&ifp->multicast_work);
185 }
186
187 static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
188 struct net_device *ndev)
189 {
190 int ret;
191 struct brcmf_if *ifp = netdev_priv(ndev);
192 struct brcmf_pub *drvr = ifp->drvr;
193 struct ethhdr *eh;
194
195 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
196
197 /* Can the device send data? */
198 if (drvr->bus_if->state != BRCMF_BUS_DATA) {
199 brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
200 netif_stop_queue(ndev);
201 dev_kfree_skb(skb);
202 ret = -ENODEV;
203 goto done;
204 }
205
206 if (!drvr->iflist[ifp->bssidx]) {
207 brcmf_err("bad ifidx %d\n", ifp->bssidx);
208 netif_stop_queue(ndev);
209 dev_kfree_skb(skb);
210 ret = -ENODEV;
211 goto done;
212 }
213
214 /* Make sure there's enough room for any header */
215 if (skb_headroom(skb) < drvr->hdrlen) {
216 struct sk_buff *skb2;
217
218 brcmf_dbg(INFO, "%s: insufficient headroom\n",
219 brcmf_ifname(drvr, ifp->bssidx));
220 drvr->bus_if->tx_realloc++;
221 skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
222 dev_kfree_skb(skb);
223 skb = skb2;
224 if (skb == NULL) {
225 brcmf_err("%s: skb_realloc_headroom failed\n",
226 brcmf_ifname(drvr, ifp->bssidx));
227 ret = -ENOMEM;
228 goto done;
229 }
230 }
231
232 /* validate length for ether packet */
233 if (skb->len < sizeof(*eh)) {
234 ret = -EINVAL;
235 dev_kfree_skb(skb);
236 goto done;
237 }
238
239 ret = brcmf_fws_process_skb(ifp, skb);
240
241 done:
242 if (ret) {
243 ifp->stats.tx_dropped++;
244 } else {
245 ifp->stats.tx_packets++;
246 ifp->stats.tx_bytes += skb->len;
247 }
248
249 /* Return ok: we always eat the packet */
250 return NETDEV_TX_OK;
251 }
252
253 void brcmf_txflowblock_if(struct brcmf_if *ifp,
254 enum brcmf_netif_stop_reason reason, bool state)
255 {
256 unsigned long flags;
257
258 if (!ifp || !ifp->ndev)
259 return;
260
261 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
262 ifp->bssidx, ifp->netif_stop, reason, state);
263
264 spin_lock_irqsave(&ifp->netif_stop_lock, flags);
265 if (state) {
266 if (!ifp->netif_stop)
267 netif_stop_queue(ifp->ndev);
268 ifp->netif_stop |= reason;
269 } else {
270 ifp->netif_stop &= ~reason;
271 if (!ifp->netif_stop)
272 netif_wake_queue(ifp->ndev);
273 }
274 spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
275 }
276
277 void brcmf_txflowblock(struct device *dev, bool state)
278 {
279 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
280 struct brcmf_pub *drvr = bus_if->drvr;
281
282 brcmf_dbg(TRACE, "Enter\n");
283
284 brcmf_fws_bus_blocked(drvr, state);
285 }
286
287 static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
288 {
289 skb->dev = ifp->ndev;
290 skb->protocol = eth_type_trans(skb, skb->dev);
291
292 if (skb->pkt_type == PACKET_MULTICAST)
293 ifp->stats.multicast++;
294
295 /* Process special event packets */
296 brcmf_fweh_process_skb(ifp->drvr, skb);
297
298 if (!(ifp->ndev->flags & IFF_UP)) {
299 brcmu_pkt_buf_free_skb(skb);
300 return;
301 }
302
303 ifp->stats.rx_bytes += skb->len;
304 ifp->stats.rx_packets++;
305
306 brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
307 if (in_interrupt())
308 netif_rx(skb);
309 else
310 /* If the receive is not processed inside an ISR,
311 * the softirqd must be woken explicitly to service
312 * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
313 */
314 netif_rx_ni(skb);
315 }
316
317 static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
318 u8 start, u8 end,
319 struct sk_buff_head *skb_list)
320 {
321 /* initialize return list */
322 __skb_queue_head_init(skb_list);
323
324 if (rfi->pend_pkts == 0) {
325 brcmf_dbg(INFO, "no packets in reorder queue\n");
326 return;
327 }
328
329 do {
330 if (rfi->pktslots[start]) {
331 __skb_queue_tail(skb_list, rfi->pktslots[start]);
332 rfi->pktslots[start] = NULL;
333 }
334 start++;
335 if (start > rfi->max_idx)
336 start = 0;
337 } while (start != end);
338 rfi->pend_pkts -= skb_queue_len(skb_list);
339 }
340
341 static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
342 struct sk_buff *pkt)
343 {
344 u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
345 struct brcmf_ampdu_rx_reorder *rfi;
346 struct sk_buff_head reorder_list;
347 struct sk_buff *pnext;
348 u8 flags;
349 u32 buf_size;
350
351 flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
352 flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
353
354 /* validate flags and flow id */
355 if (flags == 0xFF) {
356 brcmf_err("invalid flags...so ignore this packet\n");
357 brcmf_netif_rx(ifp, pkt);
358 return;
359 }
360
361 rfi = ifp->drvr->reorder_flows[flow_id];
362 if (flags & BRCMF_RXREORDER_DEL_FLOW) {
363 brcmf_dbg(INFO, "flow-%d: delete\n",
364 flow_id);
365
366 if (rfi == NULL) {
367 brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
368 flow_id);
369 brcmf_netif_rx(ifp, pkt);
370 return;
371 }
372
373 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
374 &reorder_list);
375 /* add the last packet */
376 __skb_queue_tail(&reorder_list, pkt);
377 kfree(rfi);
378 ifp->drvr->reorder_flows[flow_id] = NULL;
379 goto netif_rx;
380 }
381 /* from here on we need a flow reorder instance */
382 if (rfi == NULL) {
383 buf_size = sizeof(*rfi);
384 max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
385
386 buf_size += (max_idx + 1) * sizeof(pkt);
387
388 /* allocate space for flow reorder info */
389 brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
390 flow_id, max_idx);
391 rfi = kzalloc(buf_size, GFP_ATOMIC);
392 if (rfi == NULL) {
393 brcmf_err("failed to alloc buffer\n");
394 brcmf_netif_rx(ifp, pkt);
395 return;
396 }
397
398 ifp->drvr->reorder_flows[flow_id] = rfi;
399 rfi->pktslots = (struct sk_buff **)(rfi+1);
400 rfi->max_idx = max_idx;
401 }
402 if (flags & BRCMF_RXREORDER_NEW_HOLE) {
403 if (rfi->pend_pkts) {
404 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
405 rfi->exp_idx,
406 &reorder_list);
407 WARN_ON(rfi->pend_pkts);
408 } else {
409 __skb_queue_head_init(&reorder_list);
410 }
411 rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
412 rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
413 rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
414 rfi->pktslots[rfi->cur_idx] = pkt;
415 rfi->pend_pkts++;
416 brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
417 flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
418 } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
419 cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
420 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
421
422 if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
423 /* still in the current hole */
424 /* enqueue the current on the buffer chain */
425 if (rfi->pktslots[cur_idx] != NULL) {
426 brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
427 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
428 rfi->pktslots[cur_idx] = NULL;
429 }
430 rfi->pktslots[cur_idx] = pkt;
431 rfi->pend_pkts++;
432 rfi->cur_idx = cur_idx;
433 brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
434 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
435
436 /* can return now as there is no reorder
437 * list to process.
438 */
439 return;
440 }
441 if (rfi->exp_idx == cur_idx) {
442 if (rfi->pktslots[cur_idx] != NULL) {
443 brcmf_dbg(INFO, "error buffer pending..free it\n");
444 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
445 rfi->pktslots[cur_idx] = NULL;
446 }
447 rfi->pktslots[cur_idx] = pkt;
448 rfi->pend_pkts++;
449
450 /* got the expected one. flush from current to expected
451 * and update expected
452 */
453 brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
454 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
455
456 rfi->cur_idx = cur_idx;
457 rfi->exp_idx = exp_idx;
458
459 brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
460 &reorder_list);
461 brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
462 flow_id, skb_queue_len(&reorder_list),
463 rfi->pend_pkts);
464 } else {
465 u8 end_idx;
466
467 brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
468 flow_id, flags, rfi->cur_idx, rfi->exp_idx,
469 cur_idx, exp_idx);
470 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
471 end_idx = rfi->exp_idx;
472 else
473 end_idx = exp_idx;
474
475 /* flush pkts first */
476 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
477 &reorder_list);
478
479 if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
480 __skb_queue_tail(&reorder_list, pkt);
481 } else {
482 rfi->pktslots[cur_idx] = pkt;
483 rfi->pend_pkts++;
484 }
485 rfi->exp_idx = exp_idx;
486 rfi->cur_idx = cur_idx;
487 }
488 } else {
489 /* explicity window move updating the expected index */
490 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
491
492 brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
493 flow_id, flags, rfi->exp_idx, exp_idx);
494 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
495 end_idx = rfi->exp_idx;
496 else
497 end_idx = exp_idx;
498
499 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
500 &reorder_list);
501 __skb_queue_tail(&reorder_list, pkt);
502 /* set the new expected idx */
503 rfi->exp_idx = exp_idx;
504 }
505 netif_rx:
506 skb_queue_walk_safe(&reorder_list, pkt, pnext) {
507 __skb_unlink(pkt, &reorder_list);
508 brcmf_netif_rx(ifp, pkt);
509 }
510 }
511
512 void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
513 {
514 struct brcmf_if *ifp;
515 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
516 struct brcmf_pub *drvr = bus_if->drvr;
517 struct brcmf_skb_reorder_data *rd;
518 u8 ifidx;
519 int ret;
520
521 brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
522
523 /* process and remove protocol-specific header */
524 ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
525 ifp = drvr->iflist[ifidx];
526
527 if (ret || !ifp || !ifp->ndev) {
528 if ((ret != -ENODATA) && ifp)
529 ifp->stats.rx_errors++;
530 brcmu_pkt_buf_free_skb(skb);
531 return;
532 }
533
534 rd = (struct brcmf_skb_reorder_data *)skb->cb;
535 if (rd->reorder)
536 brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
537 else
538 brcmf_netif_rx(ifp, skb);
539 }
540
541 void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
542 bool success)
543 {
544 struct brcmf_if *ifp;
545 struct ethhdr *eh;
546 u16 type;
547
548 ifp = drvr->iflist[ifidx];
549 if (!ifp)
550 goto done;
551
552 eh = (struct ethhdr *)(txp->data);
553 type = ntohs(eh->h_proto);
554
555 if (type == ETH_P_PAE) {
556 atomic_dec(&ifp->pend_8021x_cnt);
557 if (waitqueue_active(&ifp->pend_8021x_wait))
558 wake_up(&ifp->pend_8021x_wait);
559 }
560
561 if (!success)
562 ifp->stats.tx_errors++;
563 done:
564 brcmu_pkt_buf_free_skb(txp);
565 }
566
567 void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
568 {
569 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
570 struct brcmf_pub *drvr = bus_if->drvr;
571 u8 ifidx;
572
573 /* await txstatus signal for firmware if active */
574 if (brcmf_fws_fc_active(drvr->fws)) {
575 if (!success)
576 brcmf_fws_bustxfail(drvr->fws, txp);
577 } else {
578 if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
579 brcmu_pkt_buf_free_skb(txp);
580 else
581 brcmf_txfinalize(drvr, txp, ifidx, success);
582 }
583 }
584
585 static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
586 {
587 struct brcmf_if *ifp = netdev_priv(ndev);
588
589 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
590
591 return &ifp->stats;
592 }
593
594 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
595 struct ethtool_drvinfo *info)
596 {
597 struct brcmf_if *ifp = netdev_priv(ndev);
598 struct brcmf_pub *drvr = ifp->drvr;
599
600 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
601 snprintf(info->version, sizeof(info->version), "n/a");
602 strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
603 strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
604 sizeof(info->bus_info));
605 }
606
607 static const struct ethtool_ops brcmf_ethtool_ops = {
608 .get_drvinfo = brcmf_ethtool_get_drvinfo,
609 };
610
611 static int brcmf_netdev_stop(struct net_device *ndev)
612 {
613 struct brcmf_if *ifp = netdev_priv(ndev);
614
615 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
616
617 brcmf_cfg80211_down(ndev);
618
619 /* Set state and stop OS transmissions */
620 netif_stop_queue(ndev);
621
622 return 0;
623 }
624
625 static int brcmf_netdev_open(struct net_device *ndev)
626 {
627 struct brcmf_if *ifp = netdev_priv(ndev);
628 struct brcmf_pub *drvr = ifp->drvr;
629 struct brcmf_bus *bus_if = drvr->bus_if;
630 u32 toe_ol;
631
632 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
633
634 /* If bus is not ready, can't continue */
635 if (bus_if->state != BRCMF_BUS_DATA) {
636 brcmf_err("failed bus is not ready\n");
637 return -EAGAIN;
638 }
639
640 atomic_set(&ifp->pend_8021x_cnt, 0);
641
642 /* Get current TOE mode from dongle */
643 if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
644 && (toe_ol & TOE_TX_CSUM_OL) != 0)
645 ndev->features |= NETIF_F_IP_CSUM;
646 else
647 ndev->features &= ~NETIF_F_IP_CSUM;
648
649 if (brcmf_cfg80211_up(ndev)) {
650 brcmf_err("failed to bring up cfg80211\n");
651 return -EIO;
652 }
653
654 /* Allow transmit calls */
655 netif_start_queue(ndev);
656 return 0;
657 }
658
659 static const struct net_device_ops brcmf_netdev_ops_pri = {
660 .ndo_open = brcmf_netdev_open,
661 .ndo_stop = brcmf_netdev_stop,
662 .ndo_get_stats = brcmf_netdev_get_stats,
663 .ndo_start_xmit = brcmf_netdev_start_xmit,
664 .ndo_set_mac_address = brcmf_netdev_set_mac_address,
665 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
666 };
667
668 int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
669 {
670 struct brcmf_pub *drvr = ifp->drvr;
671 struct net_device *ndev;
672 s32 err;
673
674 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
675 ifp->mac_addr);
676 ndev = ifp->ndev;
677
678 /* set appropriate operations */
679 ndev->netdev_ops = &brcmf_netdev_ops_pri;
680
681 ndev->hard_header_len += drvr->hdrlen;
682 ndev->ethtool_ops = &brcmf_ethtool_ops;
683
684 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
685 drvr->hdrlen;
686
687 /* set the mac address */
688 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
689
690 INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
691 INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
692
693 if (rtnl_locked)
694 err = register_netdevice(ndev);
695 else
696 err = register_netdev(ndev);
697 if (err != 0) {
698 brcmf_err("couldn't register the net device\n");
699 goto fail;
700 }
701
702 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
703
704 ndev->destructor = brcmf_cfg80211_free_netdev;
705 return 0;
706
707 fail:
708 drvr->iflist[ifp->bssidx] = NULL;
709 ndev->netdev_ops = NULL;
710 free_netdev(ndev);
711 return -EBADE;
712 }
713
714 static int brcmf_net_p2p_open(struct net_device *ndev)
715 {
716 brcmf_dbg(TRACE, "Enter\n");
717
718 return brcmf_cfg80211_up(ndev);
719 }
720
721 static int brcmf_net_p2p_stop(struct net_device *ndev)
722 {
723 brcmf_dbg(TRACE, "Enter\n");
724
725 return brcmf_cfg80211_down(ndev);
726 }
727
728 static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
729 struct net_device *ndev)
730 {
731 if (skb)
732 dev_kfree_skb_any(skb);
733
734 return NETDEV_TX_OK;
735 }
736
737 static const struct net_device_ops brcmf_netdev_ops_p2p = {
738 .ndo_open = brcmf_net_p2p_open,
739 .ndo_stop = brcmf_net_p2p_stop,
740 .ndo_start_xmit = brcmf_net_p2p_start_xmit
741 };
742
743 static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
744 {
745 struct net_device *ndev;
746
747 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
748 ifp->mac_addr);
749 ndev = ifp->ndev;
750
751 ndev->netdev_ops = &brcmf_netdev_ops_p2p;
752
753 /* set the mac address */
754 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
755
756 if (register_netdev(ndev) != 0) {
757 brcmf_err("couldn't register the p2p net device\n");
758 goto fail;
759 }
760
761 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
762
763 return 0;
764
765 fail:
766 ifp->drvr->iflist[ifp->bssidx] = NULL;
767 ndev->netdev_ops = NULL;
768 free_netdev(ndev);
769 return -EBADE;
770 }
771
772 struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
773 char *name, u8 *mac_addr)
774 {
775 struct brcmf_if *ifp;
776 struct net_device *ndev;
777
778 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
779
780 ifp = drvr->iflist[bssidx];
781 /*
782 * Delete the existing interface before overwriting it
783 * in case we missed the BRCMF_E_IF_DEL event.
784 */
785 if (ifp) {
786 brcmf_err("ERROR: netdev:%s already exists\n",
787 ifp->ndev->name);
788 if (ifidx) {
789 netif_stop_queue(ifp->ndev);
790 unregister_netdev(ifp->ndev);
791 free_netdev(ifp->ndev);
792 drvr->iflist[bssidx] = NULL;
793 } else {
794 brcmf_err("ignore IF event\n");
795 return ERR_PTR(-EINVAL);
796 }
797 }
798
799 if (!brcmf_p2p_enable && bssidx == 1) {
800 /* this is P2P_DEVICE interface */
801 brcmf_dbg(INFO, "allocate non-netdev interface\n");
802 ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
803 if (!ifp)
804 return ERR_PTR(-ENOMEM);
805 } else {
806 brcmf_dbg(INFO, "allocate netdev interface\n");
807 /* Allocate netdev, including space for private structure */
808 ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
809 if (!ndev)
810 return ERR_PTR(-ENOMEM);
811
812 ifp = netdev_priv(ndev);
813 ifp->ndev = ndev;
814 }
815
816 ifp->drvr = drvr;
817 drvr->iflist[bssidx] = ifp;
818 ifp->ifidx = ifidx;
819 ifp->bssidx = bssidx;
820
821 init_waitqueue_head(&ifp->pend_8021x_wait);
822 spin_lock_init(&ifp->netif_stop_lock);
823
824 if (mac_addr != NULL)
825 memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
826
827 brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
828 current->pid, name, ifp->mac_addr);
829
830 return ifp;
831 }
832
833 void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
834 {
835 struct brcmf_if *ifp;
836
837 ifp = drvr->iflist[bssidx];
838 drvr->iflist[bssidx] = NULL;
839 if (!ifp) {
840 brcmf_err("Null interface, idx=%d\n", bssidx);
841 return;
842 }
843 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
844 if (ifp->ndev) {
845 if (bssidx == 0) {
846 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
847 rtnl_lock();
848 brcmf_netdev_stop(ifp->ndev);
849 rtnl_unlock();
850 }
851 } else {
852 netif_stop_queue(ifp->ndev);
853 }
854
855 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
856 cancel_work_sync(&ifp->setmacaddr_work);
857 cancel_work_sync(&ifp->multicast_work);
858 }
859 /* unregister will take care of freeing it */
860 unregister_netdev(ifp->ndev);
861 } else {
862 kfree(ifp);
863 }
864 }
865
866 int brcmf_attach(struct device *dev)
867 {
868 struct brcmf_pub *drvr = NULL;
869 int ret = 0;
870
871 brcmf_dbg(TRACE, "Enter\n");
872
873 /* Allocate primary brcmf_info */
874 drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
875 if (!drvr)
876 return -ENOMEM;
877
878 mutex_init(&drvr->proto_block);
879
880 /* Link to bus module */
881 drvr->hdrlen = 0;
882 drvr->bus_if = dev_get_drvdata(dev);
883 drvr->bus_if->drvr = drvr;
884
885 /* create device debugfs folder */
886 brcmf_debugfs_attach(drvr);
887
888 /* Attach and link in the protocol */
889 ret = brcmf_proto_attach(drvr);
890 if (ret != 0) {
891 brcmf_err("brcmf_prot_attach failed\n");
892 goto fail;
893 }
894
895 /* attach firmware event handler */
896 brcmf_fweh_attach(drvr);
897
898 return ret;
899
900 fail:
901 brcmf_detach(dev);
902
903 return ret;
904 }
905
906 int brcmf_bus_start(struct device *dev)
907 {
908 int ret = -1;
909 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
910 struct brcmf_pub *drvr = bus_if->drvr;
911 struct brcmf_if *ifp;
912 struct brcmf_if *p2p_ifp;
913
914 brcmf_dbg(TRACE, "\n");
915
916 /* Bring up the bus */
917 ret = brcmf_bus_init(bus_if);
918 if (ret != 0) {
919 brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
920 return ret;
921 }
922
923 /* add primary networking interface */
924 ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
925 if (IS_ERR(ifp))
926 return PTR_ERR(ifp);
927
928 if (brcmf_p2p_enable)
929 p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
930 else
931 p2p_ifp = NULL;
932 if (IS_ERR(p2p_ifp))
933 p2p_ifp = NULL;
934
935 /* signal bus ready */
936 brcmf_bus_change_state(bus_if, BRCMF_BUS_DATA);
937
938 /* Bus is ready, do any initialization */
939 ret = brcmf_c_preinit_dcmds(ifp);
940 if (ret < 0)
941 goto fail;
942
943 ret = brcmf_fws_init(drvr);
944 if (ret < 0)
945 goto fail;
946
947 brcmf_fws_add_interface(ifp);
948
949 drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
950 if (drvr->config == NULL) {
951 ret = -ENOMEM;
952 goto fail;
953 }
954
955 ret = brcmf_fweh_activate_events(ifp);
956 if (ret < 0)
957 goto fail;
958
959 ret = brcmf_net_attach(ifp, false);
960 fail:
961 if (ret < 0) {
962 brcmf_err("failed: %d\n", ret);
963 brcmf_cfg80211_detach(drvr->config);
964 if (drvr->fws) {
965 brcmf_fws_del_interface(ifp);
966 brcmf_fws_deinit(drvr);
967 }
968 if (drvr->iflist[0]) {
969 free_netdev(ifp->ndev);
970 drvr->iflist[0] = NULL;
971 }
972 if (p2p_ifp) {
973 free_netdev(p2p_ifp->ndev);
974 drvr->iflist[1] = NULL;
975 }
976 return ret;
977 }
978 if ((brcmf_p2p_enable) && (p2p_ifp))
979 if (brcmf_net_p2p_attach(p2p_ifp) < 0)
980 brcmf_p2p_enable = 0;
981
982 return 0;
983 }
984
985 void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
986 {
987 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
988 struct brcmf_pub *drvr = bus_if->drvr;
989
990 if (drvr) {
991 drvr->hdrlen += len;
992 }
993 }
994
995 static void brcmf_bus_detach(struct brcmf_pub *drvr)
996 {
997 brcmf_dbg(TRACE, "Enter\n");
998
999 if (drvr) {
1000 /* Stop the bus module */
1001 brcmf_bus_stop(drvr->bus_if);
1002 }
1003 }
1004
1005 void brcmf_dev_reset(struct device *dev)
1006 {
1007 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1008 struct brcmf_pub *drvr = bus_if->drvr;
1009
1010 if (drvr == NULL)
1011 return;
1012
1013 if (drvr->iflist[0])
1014 brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
1015 }
1016
1017 void brcmf_detach(struct device *dev)
1018 {
1019 s32 i;
1020 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1021 struct brcmf_pub *drvr = bus_if->drvr;
1022
1023 brcmf_dbg(TRACE, "Enter\n");
1024
1025 if (drvr == NULL)
1026 return;
1027
1028 /* stop firmware event handling */
1029 brcmf_fweh_detach(drvr);
1030
1031 brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
1032
1033 /* make sure primary interface removed last */
1034 for (i = BRCMF_MAX_IFS-1; i > -1; i--)
1035 if (drvr->iflist[i]) {
1036 brcmf_fws_del_interface(drvr->iflist[i]);
1037 brcmf_del_if(drvr, i);
1038 }
1039
1040 brcmf_cfg80211_detach(drvr->config);
1041
1042 brcmf_fws_deinit(drvr);
1043
1044 brcmf_bus_detach(drvr);
1045
1046 brcmf_proto_detach(drvr);
1047
1048 brcmf_debugfs_detach(drvr);
1049 bus_if->drvr = NULL;
1050 kfree(drvr);
1051 }
1052
1053 s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
1054 {
1055 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1056 struct brcmf_if *ifp = bus_if->drvr->iflist[0];
1057
1058 return brcmf_fil_iovar_data_set(ifp, name, data, len);
1059 }
1060
1061 static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
1062 {
1063 return atomic_read(&ifp->pend_8021x_cnt);
1064 }
1065
1066 int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
1067 {
1068 struct brcmf_if *ifp = netdev_priv(ndev);
1069 int err;
1070
1071 err = wait_event_timeout(ifp->pend_8021x_wait,
1072 !brcmf_get_pend_8021x_cnt(ifp),
1073 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
1074
1075 WARN_ON(!err);
1076
1077 return !err;
1078 }
1079
1080 /*
1081 * return chip id and rev of the device encoded in u32.
1082 */
1083 u32 brcmf_get_chip_info(struct brcmf_if *ifp)
1084 {
1085 struct brcmf_bus *bus = ifp->drvr->bus_if;
1086
1087 return bus->chip << 4 | bus->chiprev;
1088 }
1089
1090 static void brcmf_driver_register(struct work_struct *work)
1091 {
1092 #ifdef CONFIG_BRCMFMAC_SDIO
1093 brcmf_sdio_register();
1094 #endif
1095 #ifdef CONFIG_BRCMFMAC_USB
1096 brcmf_usb_register();
1097 #endif
1098 }
1099 static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
1100
1101 static int __init brcmfmac_module_init(void)
1102 {
1103 brcmf_debugfs_init();
1104 #ifdef CONFIG_BRCMFMAC_SDIO
1105 brcmf_sdio_init();
1106 #endif
1107 if (!schedule_work(&brcmf_driver_work))
1108 return -EBUSY;
1109
1110 return 0;
1111 }
1112
1113 static void __exit brcmfmac_module_exit(void)
1114 {
1115 cancel_work_sync(&brcmf_driver_work);
1116
1117 #ifdef CONFIG_BRCMFMAC_SDIO
1118 brcmf_sdio_exit();
1119 #endif
1120 #ifdef CONFIG_BRCMFMAC_USB
1121 brcmf_usb_exit();
1122 #endif
1123 brcmf_debugfs_exit();
1124 }
1125
1126 module_init(brcmfmac_module_init);
1127 module_exit(brcmfmac_module_exit);
This page took 0.053561 seconds and 4 git commands to generate.