brcmfmac: remove redundant ioctl handlers
[deliverable/linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / dhd_linux.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/etherdevice.h>
19 #include <linux/module.h>
20 #include <net/cfg80211.h>
21 #include <net/rtnetlink.h>
22 #include <brcmu_utils.h>
23 #include <brcmu_wifi.h>
24
25 #include "dhd.h"
26 #include "dhd_bus.h"
27 #include "dhd_dbg.h"
28 #include "fwil_types.h"
29 #include "p2p.h"
30 #include "wl_cfg80211.h"
31 #include "fwil.h"
32 #include "fwsignal.h"
33
34 MODULE_AUTHOR("Broadcom Corporation");
35 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
36 MODULE_LICENSE("Dual BSD/GPL");
37
38 #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
39
40 /* AMPDU rx reordering definitions */
41 #define BRCMF_RXREORDER_FLOWID_OFFSET 0
42 #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
43 #define BRCMF_RXREORDER_FLAGS_OFFSET 4
44 #define BRCMF_RXREORDER_CURIDX_OFFSET 6
45 #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
46
47 #define BRCMF_RXREORDER_DEL_FLOW 0x01
48 #define BRCMF_RXREORDER_FLUSH_ALL 0x02
49 #define BRCMF_RXREORDER_CURIDX_VALID 0x04
50 #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
51 #define BRCMF_RXREORDER_NEW_HOLE 0x10
52
53 /* Error bits */
54 int brcmf_msg_level;
55 module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
56 MODULE_PARM_DESC(debug, "level of debug output");
57
58 /* P2P0 enable */
59 static int brcmf_p2p_enable;
60 #ifdef CONFIG_BRCMDBG
61 module_param_named(p2pon, brcmf_p2p_enable, int, 0);
62 MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
63 #endif
64
65 char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
66 {
67 if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
68 brcmf_err("ifidx %d out of range\n", ifidx);
69 return "<if_bad>";
70 }
71
72 if (drvr->iflist[ifidx] == NULL) {
73 brcmf_err("null i/f %d\n", ifidx);
74 return "<if_null>";
75 }
76
77 if (drvr->iflist[ifidx]->ndev)
78 return drvr->iflist[ifidx]->ndev->name;
79
80 return "<if_none>";
81 }
82
83 static void _brcmf_set_multicast_list(struct work_struct *work)
84 {
85 struct brcmf_if *ifp;
86 struct net_device *ndev;
87 struct netdev_hw_addr *ha;
88 u32 cmd_value, cnt;
89 __le32 cnt_le;
90 char *buf, *bufp;
91 u32 buflen;
92 s32 err;
93
94 ifp = container_of(work, struct brcmf_if, multicast_work);
95
96 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
97
98 ndev = ifp->ndev;
99
100 /* Determine initial value of allmulti flag */
101 cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
102
103 /* Send down the multicast list first. */
104 cnt = netdev_mc_count(ndev);
105 buflen = sizeof(cnt) + (cnt * ETH_ALEN);
106 buf = kmalloc(buflen, GFP_ATOMIC);
107 if (!buf)
108 return;
109 bufp = buf;
110
111 cnt_le = cpu_to_le32(cnt);
112 memcpy(bufp, &cnt_le, sizeof(cnt_le));
113 bufp += sizeof(cnt_le);
114
115 netdev_for_each_mc_addr(ha, ndev) {
116 if (!cnt)
117 break;
118 memcpy(bufp, ha->addr, ETH_ALEN);
119 bufp += ETH_ALEN;
120 cnt--;
121 }
122
123 err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
124 if (err < 0) {
125 brcmf_err("Setting mcast_list failed, %d\n", err);
126 cmd_value = cnt ? true : cmd_value;
127 }
128
129 kfree(buf);
130
131 /*
132 * Now send the allmulti setting. This is based on the setting in the
133 * net_device flags, but might be modified above to be turned on if we
134 * were trying to set some addresses and dongle rejected it...
135 */
136 err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
137 if (err < 0)
138 brcmf_err("Setting allmulti failed, %d\n", err);
139
140 /*Finally, pick up the PROMISC flag */
141 cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
142 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
143 if (err < 0)
144 brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
145 err);
146 }
147
148 static void
149 _brcmf_set_mac_address(struct work_struct *work)
150 {
151 struct brcmf_if *ifp;
152 s32 err;
153
154 ifp = container_of(work, struct brcmf_if, setmacaddr_work);
155
156 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
157
158 err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
159 ETH_ALEN);
160 if (err < 0) {
161 brcmf_err("Setting cur_etheraddr failed, %d\n", err);
162 } else {
163 brcmf_dbg(TRACE, "MAC address updated to %pM\n",
164 ifp->mac_addr);
165 memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
166 }
167 }
168
169 static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
170 {
171 struct brcmf_if *ifp = netdev_priv(ndev);
172 struct sockaddr *sa = (struct sockaddr *)addr;
173
174 memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
175 schedule_work(&ifp->setmacaddr_work);
176 return 0;
177 }
178
179 static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
180 {
181 struct brcmf_if *ifp = netdev_priv(ndev);
182
183 schedule_work(&ifp->multicast_work);
184 }
185
186 static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
187 struct net_device *ndev)
188 {
189 int ret;
190 struct brcmf_if *ifp = netdev_priv(ndev);
191 struct brcmf_pub *drvr = ifp->drvr;
192 struct ethhdr *eh;
193
194 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
195
196 /* Can the device send data? */
197 if (drvr->bus_if->state != BRCMF_BUS_DATA) {
198 brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
199 netif_stop_queue(ndev);
200 dev_kfree_skb(skb);
201 ret = -ENODEV;
202 goto done;
203 }
204
205 if (!drvr->iflist[ifp->bssidx]) {
206 brcmf_err("bad ifidx %d\n", ifp->bssidx);
207 netif_stop_queue(ndev);
208 dev_kfree_skb(skb);
209 ret = -ENODEV;
210 goto done;
211 }
212
213 /* Make sure there's enough room for any header */
214 if (skb_headroom(skb) < drvr->hdrlen) {
215 struct sk_buff *skb2;
216
217 brcmf_dbg(INFO, "%s: insufficient headroom\n",
218 brcmf_ifname(drvr, ifp->bssidx));
219 drvr->bus_if->tx_realloc++;
220 skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
221 dev_kfree_skb(skb);
222 skb = skb2;
223 if (skb == NULL) {
224 brcmf_err("%s: skb_realloc_headroom failed\n",
225 brcmf_ifname(drvr, ifp->bssidx));
226 ret = -ENOMEM;
227 goto done;
228 }
229 }
230
231 /* validate length for ether packet */
232 if (skb->len < sizeof(*eh)) {
233 ret = -EINVAL;
234 dev_kfree_skb(skb);
235 goto done;
236 }
237
238 ret = brcmf_fws_process_skb(ifp, skb);
239
240 done:
241 if (ret) {
242 ifp->stats.tx_dropped++;
243 } else {
244 ifp->stats.tx_packets++;
245 ifp->stats.tx_bytes += skb->len;
246 }
247
248 /* Return ok: we always eat the packet */
249 return NETDEV_TX_OK;
250 }
251
252 void brcmf_txflowblock_if(struct brcmf_if *ifp,
253 enum brcmf_netif_stop_reason reason, bool state)
254 {
255 unsigned long flags;
256
257 if (!ifp || !ifp->ndev)
258 return;
259
260 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
261 ifp->bssidx, ifp->netif_stop, reason, state);
262
263 spin_lock_irqsave(&ifp->netif_stop_lock, flags);
264 if (state) {
265 if (!ifp->netif_stop)
266 netif_stop_queue(ifp->ndev);
267 ifp->netif_stop |= reason;
268 } else {
269 ifp->netif_stop &= ~reason;
270 if (!ifp->netif_stop)
271 netif_wake_queue(ifp->ndev);
272 }
273 spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
274 }
275
276 void brcmf_txflowblock(struct device *dev, bool state)
277 {
278 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
279 struct brcmf_pub *drvr = bus_if->drvr;
280
281 brcmf_dbg(TRACE, "Enter\n");
282
283 brcmf_fws_bus_blocked(drvr, state);
284 }
285
286 static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
287 {
288 skb->dev = ifp->ndev;
289 skb->protocol = eth_type_trans(skb, skb->dev);
290
291 if (skb->pkt_type == PACKET_MULTICAST)
292 ifp->stats.multicast++;
293
294 /* Process special event packets */
295 brcmf_fweh_process_skb(ifp->drvr, skb);
296
297 if (!(ifp->ndev->flags & IFF_UP)) {
298 brcmu_pkt_buf_free_skb(skb);
299 return;
300 }
301
302 ifp->stats.rx_bytes += skb->len;
303 ifp->stats.rx_packets++;
304
305 brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
306 if (in_interrupt())
307 netif_rx(skb);
308 else
309 /* If the receive is not processed inside an ISR,
310 * the softirqd must be woken explicitly to service
311 * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
312 */
313 netif_rx_ni(skb);
314 }
315
316 static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
317 u8 start, u8 end,
318 struct sk_buff_head *skb_list)
319 {
320 /* initialize return list */
321 __skb_queue_head_init(skb_list);
322
323 if (rfi->pend_pkts == 0) {
324 brcmf_dbg(INFO, "no packets in reorder queue\n");
325 return;
326 }
327
328 do {
329 if (rfi->pktslots[start]) {
330 __skb_queue_tail(skb_list, rfi->pktslots[start]);
331 rfi->pktslots[start] = NULL;
332 }
333 start++;
334 if (start > rfi->max_idx)
335 start = 0;
336 } while (start != end);
337 rfi->pend_pkts -= skb_queue_len(skb_list);
338 }
339
340 static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
341 struct sk_buff *pkt)
342 {
343 u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
344 struct brcmf_ampdu_rx_reorder *rfi;
345 struct sk_buff_head reorder_list;
346 struct sk_buff *pnext;
347 u8 flags;
348 u32 buf_size;
349
350 flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
351 flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
352
353 /* validate flags and flow id */
354 if (flags == 0xFF) {
355 brcmf_err("invalid flags...so ignore this packet\n");
356 brcmf_netif_rx(ifp, pkt);
357 return;
358 }
359
360 rfi = ifp->drvr->reorder_flows[flow_id];
361 if (flags & BRCMF_RXREORDER_DEL_FLOW) {
362 brcmf_dbg(INFO, "flow-%d: delete\n",
363 flow_id);
364
365 if (rfi == NULL) {
366 brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
367 flow_id);
368 brcmf_netif_rx(ifp, pkt);
369 return;
370 }
371
372 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
373 &reorder_list);
374 /* add the last packet */
375 __skb_queue_tail(&reorder_list, pkt);
376 kfree(rfi);
377 ifp->drvr->reorder_flows[flow_id] = NULL;
378 goto netif_rx;
379 }
380 /* from here on we need a flow reorder instance */
381 if (rfi == NULL) {
382 buf_size = sizeof(*rfi);
383 max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
384
385 buf_size += (max_idx + 1) * sizeof(pkt);
386
387 /* allocate space for flow reorder info */
388 brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
389 flow_id, max_idx);
390 rfi = kzalloc(buf_size, GFP_ATOMIC);
391 if (rfi == NULL) {
392 brcmf_err("failed to alloc buffer\n");
393 brcmf_netif_rx(ifp, pkt);
394 return;
395 }
396
397 ifp->drvr->reorder_flows[flow_id] = rfi;
398 rfi->pktslots = (struct sk_buff **)(rfi+1);
399 rfi->max_idx = max_idx;
400 }
401 if (flags & BRCMF_RXREORDER_NEW_HOLE) {
402 if (rfi->pend_pkts) {
403 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
404 rfi->exp_idx,
405 &reorder_list);
406 WARN_ON(rfi->pend_pkts);
407 } else {
408 __skb_queue_head_init(&reorder_list);
409 }
410 rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
411 rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
412 rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
413 rfi->pktslots[rfi->cur_idx] = pkt;
414 rfi->pend_pkts++;
415 brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
416 flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
417 } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
418 cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
419 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
420
421 if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
422 /* still in the current hole */
423 /* enqueue the current on the buffer chain */
424 if (rfi->pktslots[cur_idx] != NULL) {
425 brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
426 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
427 rfi->pktslots[cur_idx] = NULL;
428 }
429 rfi->pktslots[cur_idx] = pkt;
430 rfi->pend_pkts++;
431 rfi->cur_idx = cur_idx;
432 brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
433 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
434
435 /* can return now as there is no reorder
436 * list to process.
437 */
438 return;
439 }
440 if (rfi->exp_idx == cur_idx) {
441 if (rfi->pktslots[cur_idx] != NULL) {
442 brcmf_dbg(INFO, "error buffer pending..free it\n");
443 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
444 rfi->pktslots[cur_idx] = NULL;
445 }
446 rfi->pktslots[cur_idx] = pkt;
447 rfi->pend_pkts++;
448
449 /* got the expected one. flush from current to expected
450 * and update expected
451 */
452 brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
453 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
454
455 rfi->cur_idx = cur_idx;
456 rfi->exp_idx = exp_idx;
457
458 brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
459 &reorder_list);
460 brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
461 flow_id, skb_queue_len(&reorder_list),
462 rfi->pend_pkts);
463 } else {
464 u8 end_idx;
465
466 brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
467 flow_id, flags, rfi->cur_idx, rfi->exp_idx,
468 cur_idx, exp_idx);
469 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
470 end_idx = rfi->exp_idx;
471 else
472 end_idx = exp_idx;
473
474 /* flush pkts first */
475 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
476 &reorder_list);
477
478 if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
479 __skb_queue_tail(&reorder_list, pkt);
480 } else {
481 rfi->pktslots[cur_idx] = pkt;
482 rfi->pend_pkts++;
483 }
484 rfi->exp_idx = exp_idx;
485 rfi->cur_idx = cur_idx;
486 }
487 } else {
488 /* explicity window move updating the expected index */
489 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
490
491 brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
492 flow_id, flags, rfi->exp_idx, exp_idx);
493 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
494 end_idx = rfi->exp_idx;
495 else
496 end_idx = exp_idx;
497
498 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
499 &reorder_list);
500 __skb_queue_tail(&reorder_list, pkt);
501 /* set the new expected idx */
502 rfi->exp_idx = exp_idx;
503 }
504 netif_rx:
505 skb_queue_walk_safe(&reorder_list, pkt, pnext) {
506 __skb_unlink(pkt, &reorder_list);
507 brcmf_netif_rx(ifp, pkt);
508 }
509 }
510
511 void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
512 {
513 struct brcmf_if *ifp;
514 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
515 struct brcmf_pub *drvr = bus_if->drvr;
516 struct brcmf_skb_reorder_data *rd;
517 u8 ifidx;
518 int ret;
519
520 brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
521
522 /* process and remove protocol-specific header */
523 ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
524 ifp = drvr->iflist[ifidx];
525
526 if (ret || !ifp || !ifp->ndev) {
527 if ((ret != -ENODATA) && ifp)
528 ifp->stats.rx_errors++;
529 brcmu_pkt_buf_free_skb(skb);
530 return;
531 }
532
533 rd = (struct brcmf_skb_reorder_data *)skb->cb;
534 if (rd->reorder)
535 brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
536 else
537 brcmf_netif_rx(ifp, skb);
538 }
539
540 void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
541 bool success)
542 {
543 struct brcmf_if *ifp;
544 struct ethhdr *eh;
545 u8 ifidx;
546 u16 type;
547 int res;
548
549 res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
550
551 ifp = drvr->iflist[ifidx];
552 if (!ifp)
553 goto done;
554
555 if (res == 0) {
556 eh = (struct ethhdr *)(txp->data);
557 type = ntohs(eh->h_proto);
558
559 if (type == ETH_P_PAE) {
560 atomic_dec(&ifp->pend_8021x_cnt);
561 if (waitqueue_active(&ifp->pend_8021x_wait))
562 wake_up(&ifp->pend_8021x_wait);
563 }
564 }
565 if (!success)
566 ifp->stats.tx_errors++;
567 done:
568 brcmu_pkt_buf_free_skb(txp);
569 }
570
571 void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
572 {
573 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
574 struct brcmf_pub *drvr = bus_if->drvr;
575
576 /* await txstatus signal for firmware if active */
577 if (brcmf_fws_fc_active(drvr->fws)) {
578 if (!success)
579 brcmf_fws_bustxfail(drvr->fws, txp);
580 } else {
581 brcmf_txfinalize(drvr, txp, success);
582 }
583 }
584
585 static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
586 {
587 struct brcmf_if *ifp = netdev_priv(ndev);
588
589 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
590
591 return &ifp->stats;
592 }
593
594 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
595 struct ethtool_drvinfo *info)
596 {
597 struct brcmf_if *ifp = netdev_priv(ndev);
598 struct brcmf_pub *drvr = ifp->drvr;
599
600 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
601 snprintf(info->version, sizeof(info->version), "%lu",
602 drvr->drv_version);
603 strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
604 sizeof(info->bus_info));
605 }
606
607 static const struct ethtool_ops brcmf_ethtool_ops = {
608 .get_drvinfo = brcmf_ethtool_get_drvinfo,
609 };
610
611 static int brcmf_netdev_stop(struct net_device *ndev)
612 {
613 struct brcmf_if *ifp = netdev_priv(ndev);
614
615 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
616
617 brcmf_cfg80211_down(ndev);
618
619 /* Set state and stop OS transmissions */
620 netif_stop_queue(ndev);
621
622 return 0;
623 }
624
625 static int brcmf_netdev_open(struct net_device *ndev)
626 {
627 struct brcmf_if *ifp = netdev_priv(ndev);
628 struct brcmf_pub *drvr = ifp->drvr;
629 struct brcmf_bus *bus_if = drvr->bus_if;
630 u32 toe_ol;
631
632 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
633
634 /* If bus is not ready, can't continue */
635 if (bus_if->state != BRCMF_BUS_DATA) {
636 brcmf_err("failed bus is not ready\n");
637 return -EAGAIN;
638 }
639
640 atomic_set(&ifp->pend_8021x_cnt, 0);
641
642 /* Get current TOE mode from dongle */
643 if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
644 && (toe_ol & TOE_TX_CSUM_OL) != 0)
645 ndev->features |= NETIF_F_IP_CSUM;
646 else
647 ndev->features &= ~NETIF_F_IP_CSUM;
648
649 if (brcmf_cfg80211_up(ndev)) {
650 brcmf_err("failed to bring up cfg80211\n");
651 return -EIO;
652 }
653
654 /* Allow transmit calls */
655 netif_start_queue(ndev);
656 return 0;
657 }
658
659 static const struct net_device_ops brcmf_netdev_ops_pri = {
660 .ndo_open = brcmf_netdev_open,
661 .ndo_stop = brcmf_netdev_stop,
662 .ndo_get_stats = brcmf_netdev_get_stats,
663 .ndo_start_xmit = brcmf_netdev_start_xmit,
664 .ndo_set_mac_address = brcmf_netdev_set_mac_address,
665 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
666 };
667
668 int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
669 {
670 struct brcmf_pub *drvr = ifp->drvr;
671 struct net_device *ndev;
672 s32 err;
673
674 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
675 ifp->mac_addr);
676 ndev = ifp->ndev;
677
678 /* set appropriate operations */
679 ndev->netdev_ops = &brcmf_netdev_ops_pri;
680
681 ndev->hard_header_len += drvr->hdrlen;
682 ndev->ethtool_ops = &brcmf_ethtool_ops;
683
684 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
685 drvr->hdrlen;
686
687 /* set the mac address */
688 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
689
690 INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
691 INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
692
693 if (rtnl_locked)
694 err = register_netdevice(ndev);
695 else
696 err = register_netdev(ndev);
697 if (err != 0) {
698 brcmf_err("couldn't register the net device\n");
699 goto fail;
700 }
701
702 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
703
704 ndev->destructor = free_netdev;
705 return 0;
706
707 fail:
708 drvr->iflist[ifp->bssidx] = NULL;
709 ndev->netdev_ops = NULL;
710 free_netdev(ndev);
711 return -EBADE;
712 }
713
714 static int brcmf_net_p2p_open(struct net_device *ndev)
715 {
716 brcmf_dbg(TRACE, "Enter\n");
717
718 return brcmf_cfg80211_up(ndev);
719 }
720
721 static int brcmf_net_p2p_stop(struct net_device *ndev)
722 {
723 brcmf_dbg(TRACE, "Enter\n");
724
725 return brcmf_cfg80211_down(ndev);
726 }
727
728 static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
729 struct net_device *ndev)
730 {
731 if (skb)
732 dev_kfree_skb_any(skb);
733
734 return NETDEV_TX_OK;
735 }
736
737 static const struct net_device_ops brcmf_netdev_ops_p2p = {
738 .ndo_open = brcmf_net_p2p_open,
739 .ndo_stop = brcmf_net_p2p_stop,
740 .ndo_start_xmit = brcmf_net_p2p_start_xmit
741 };
742
743 static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
744 {
745 struct net_device *ndev;
746
747 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
748 ifp->mac_addr);
749 ndev = ifp->ndev;
750
751 ndev->netdev_ops = &brcmf_netdev_ops_p2p;
752
753 /* set the mac address */
754 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
755
756 if (register_netdev(ndev) != 0) {
757 brcmf_err("couldn't register the p2p net device\n");
758 goto fail;
759 }
760
761 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
762
763 return 0;
764
765 fail:
766 ifp->drvr->iflist[ifp->bssidx] = NULL;
767 ndev->netdev_ops = NULL;
768 free_netdev(ndev);
769 return -EBADE;
770 }
771
772 struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
773 char *name, u8 *mac_addr)
774 {
775 struct brcmf_if *ifp;
776 struct net_device *ndev;
777
778 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
779
780 ifp = drvr->iflist[bssidx];
781 /*
782 * Delete the existing interface before overwriting it
783 * in case we missed the BRCMF_E_IF_DEL event.
784 */
785 if (ifp) {
786 brcmf_err("ERROR: netdev:%s already exists\n",
787 ifp->ndev->name);
788 if (ifidx) {
789 netif_stop_queue(ifp->ndev);
790 unregister_netdev(ifp->ndev);
791 free_netdev(ifp->ndev);
792 drvr->iflist[bssidx] = NULL;
793 } else {
794 brcmf_err("ignore IF event\n");
795 return ERR_PTR(-EINVAL);
796 }
797 }
798
799 if (!brcmf_p2p_enable && bssidx == 1) {
800 /* this is P2P_DEVICE interface */
801 brcmf_dbg(INFO, "allocate non-netdev interface\n");
802 ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
803 if (!ifp)
804 return ERR_PTR(-ENOMEM);
805 } else {
806 brcmf_dbg(INFO, "allocate netdev interface\n");
807 /* Allocate netdev, including space for private structure */
808 ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
809 if (!ndev)
810 return ERR_PTR(-ENOMEM);
811
812 ifp = netdev_priv(ndev);
813 ifp->ndev = ndev;
814 }
815
816 ifp->drvr = drvr;
817 drvr->iflist[bssidx] = ifp;
818 ifp->ifidx = ifidx;
819 ifp->bssidx = bssidx;
820
821 init_waitqueue_head(&ifp->pend_8021x_wait);
822 spin_lock_init(&ifp->netif_stop_lock);
823
824 if (mac_addr != NULL)
825 memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
826
827 brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
828 current->pid, name, ifp->mac_addr);
829
830 return ifp;
831 }
832
833 void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
834 {
835 struct brcmf_if *ifp;
836
837 ifp = drvr->iflist[bssidx];
838 drvr->iflist[bssidx] = NULL;
839 if (!ifp) {
840 brcmf_err("Null interface, idx=%d\n", bssidx);
841 return;
842 }
843 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
844 if (ifp->ndev) {
845 if (bssidx == 0) {
846 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
847 rtnl_lock();
848 brcmf_netdev_stop(ifp->ndev);
849 rtnl_unlock();
850 }
851 } else {
852 netif_stop_queue(ifp->ndev);
853 }
854
855 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
856 cancel_work_sync(&ifp->setmacaddr_work);
857 cancel_work_sync(&ifp->multicast_work);
858 }
859 /* unregister will take care of freeing it */
860 unregister_netdev(ifp->ndev);
861 if (bssidx == 0)
862 brcmf_cfg80211_detach(drvr->config);
863 } else {
864 kfree(ifp);
865 }
866 }
867
868 int brcmf_attach(struct device *dev)
869 {
870 struct brcmf_pub *drvr = NULL;
871 int ret = 0;
872
873 brcmf_dbg(TRACE, "Enter\n");
874
875 /* Allocate primary brcmf_info */
876 drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
877 if (!drvr)
878 return -ENOMEM;
879
880 mutex_init(&drvr->proto_block);
881
882 /* Link to bus module */
883 drvr->hdrlen = 0;
884 drvr->bus_if = dev_get_drvdata(dev);
885 drvr->bus_if->drvr = drvr;
886
887 /* create device debugfs folder */
888 brcmf_debugfs_attach(drvr);
889
890 /* Attach and link in the protocol */
891 ret = brcmf_proto_attach(drvr);
892 if (ret != 0) {
893 brcmf_err("brcmf_prot_attach failed\n");
894 goto fail;
895 }
896
897 /* attach firmware event handler */
898 brcmf_fweh_attach(drvr);
899
900 return ret;
901
902 fail:
903 brcmf_detach(dev);
904
905 return ret;
906 }
907
908 int brcmf_bus_start(struct device *dev)
909 {
910 int ret = -1;
911 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
912 struct brcmf_pub *drvr = bus_if->drvr;
913 struct brcmf_if *ifp;
914 struct brcmf_if *p2p_ifp;
915
916 brcmf_dbg(TRACE, "\n");
917
918 /* Bring up the bus */
919 ret = brcmf_bus_init(bus_if);
920 if (ret != 0) {
921 brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
922 return ret;
923 }
924
925 /* add primary networking interface */
926 ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
927 if (IS_ERR(ifp))
928 return PTR_ERR(ifp);
929
930 if (brcmf_p2p_enable)
931 p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
932 else
933 p2p_ifp = NULL;
934 if (IS_ERR(p2p_ifp))
935 p2p_ifp = NULL;
936
937 /* signal bus ready */
938 bus_if->state = BRCMF_BUS_DATA;
939
940 /* Bus is ready, do any initialization */
941 ret = brcmf_c_preinit_dcmds(ifp);
942 if (ret < 0)
943 goto fail;
944
945 ret = brcmf_fws_init(drvr);
946 if (ret < 0)
947 goto fail;
948
949 brcmf_fws_add_interface(ifp);
950
951 drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
952 if (drvr->config == NULL) {
953 ret = -ENOMEM;
954 goto fail;
955 }
956
957 ret = brcmf_fweh_activate_events(ifp);
958 if (ret < 0)
959 goto fail;
960
961 ret = brcmf_net_attach(ifp, false);
962 fail:
963 if (ret < 0) {
964 brcmf_err("failed: %d\n", ret);
965 if (drvr->config)
966 brcmf_cfg80211_detach(drvr->config);
967 if (drvr->fws) {
968 brcmf_fws_del_interface(ifp);
969 brcmf_fws_deinit(drvr);
970 }
971 if (drvr->iflist[0]) {
972 free_netdev(ifp->ndev);
973 drvr->iflist[0] = NULL;
974 }
975 if (p2p_ifp) {
976 free_netdev(p2p_ifp->ndev);
977 drvr->iflist[1] = NULL;
978 }
979 return ret;
980 }
981 if ((brcmf_p2p_enable) && (p2p_ifp))
982 if (brcmf_net_p2p_attach(p2p_ifp) < 0)
983 brcmf_p2p_enable = 0;
984
985 return 0;
986 }
987
988 void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
989 {
990 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
991 struct brcmf_pub *drvr = bus_if->drvr;
992
993 if (drvr) {
994 drvr->hdrlen += len;
995 }
996 }
997
998 static void brcmf_bus_detach(struct brcmf_pub *drvr)
999 {
1000 brcmf_dbg(TRACE, "Enter\n");
1001
1002 if (drvr) {
1003 /* Stop the bus module */
1004 brcmf_bus_stop(drvr->bus_if);
1005 }
1006 }
1007
1008 void brcmf_dev_reset(struct device *dev)
1009 {
1010 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1011 struct brcmf_pub *drvr = bus_if->drvr;
1012
1013 if (drvr == NULL)
1014 return;
1015
1016 if (drvr->iflist[0])
1017 brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
1018 }
1019
1020 void brcmf_detach(struct device *dev)
1021 {
1022 s32 i;
1023 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1024 struct brcmf_pub *drvr = bus_if->drvr;
1025
1026 brcmf_dbg(TRACE, "Enter\n");
1027
1028 if (drvr == NULL)
1029 return;
1030
1031 /* stop firmware event handling */
1032 brcmf_fweh_detach(drvr);
1033
1034 /* make sure primary interface removed last */
1035 for (i = BRCMF_MAX_IFS-1; i > -1; i--)
1036 if (drvr->iflist[i]) {
1037 brcmf_fws_del_interface(drvr->iflist[i]);
1038 brcmf_del_if(drvr, i);
1039 }
1040
1041 brcmf_bus_detach(drvr);
1042
1043 if (drvr->prot)
1044 brcmf_proto_detach(drvr);
1045
1046 brcmf_fws_deinit(drvr);
1047
1048 brcmf_debugfs_detach(drvr);
1049 bus_if->drvr = NULL;
1050 kfree(drvr);
1051 }
1052
1053 s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
1054 {
1055 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1056 struct brcmf_if *ifp = bus_if->drvr->iflist[0];
1057
1058 return brcmf_fil_iovar_data_set(ifp, name, data, len);
1059 }
1060
1061 static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
1062 {
1063 return atomic_read(&ifp->pend_8021x_cnt);
1064 }
1065
1066 int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
1067 {
1068 struct brcmf_if *ifp = netdev_priv(ndev);
1069 int err;
1070
1071 err = wait_event_timeout(ifp->pend_8021x_wait,
1072 !brcmf_get_pend_8021x_cnt(ifp),
1073 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
1074
1075 WARN_ON(!err);
1076
1077 return !err;
1078 }
1079
1080 /*
1081 * return chip id and rev of the device encoded in u32.
1082 */
1083 u32 brcmf_get_chip_info(struct brcmf_if *ifp)
1084 {
1085 struct brcmf_bus *bus = ifp->drvr->bus_if;
1086
1087 return bus->chip << 4 | bus->chiprev;
1088 }
1089
1090 static void brcmf_driver_register(struct work_struct *work)
1091 {
1092 #ifdef CONFIG_BRCMFMAC_SDIO
1093 brcmf_sdio_register();
1094 #endif
1095 #ifdef CONFIG_BRCMFMAC_USB
1096 brcmf_usb_register();
1097 #endif
1098 }
1099 static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
1100
1101 static int __init brcmfmac_module_init(void)
1102 {
1103 brcmf_debugfs_init();
1104 #ifdef CONFIG_BRCMFMAC_SDIO
1105 brcmf_sdio_init();
1106 #endif
1107 if (!schedule_work(&brcmf_driver_work))
1108 return -EBUSY;
1109
1110 return 0;
1111 }
1112
1113 static void __exit brcmfmac_module_exit(void)
1114 {
1115 cancel_work_sync(&brcmf_driver_work);
1116
1117 #ifdef CONFIG_BRCMFMAC_SDIO
1118 brcmf_sdio_exit();
1119 #endif
1120 #ifdef CONFIG_BRCMFMAC_USB
1121 brcmf_usb_exit();
1122 #endif
1123 brcmf_debugfs_exit();
1124 }
1125
1126 module_init(brcmfmac_module_init);
1127 module_exit(brcmfmac_module_exit);
This page took 0.08492 seconds and 5 git commands to generate.