brcmfmac: add AMPDU reordering functionality
[deliverable/linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / dhd_linux.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/etherdevice.h>
19 #include <linux/module.h>
20 #include <net/cfg80211.h>
21 #include <net/rtnetlink.h>
22 #include <brcmu_utils.h>
23 #include <brcmu_wifi.h>
24
25 #include "dhd.h"
26 #include "dhd_bus.h"
27 #include "dhd_proto.h"
28 #include "dhd_dbg.h"
29 #include "fwil_types.h"
30 #include "p2p.h"
31 #include "wl_cfg80211.h"
32 #include "fwil.h"
33 #include "fwsignal.h"
34
35 MODULE_AUTHOR("Broadcom Corporation");
36 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
37 MODULE_LICENSE("Dual BSD/GPL");
38
39 #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
40
41 /* AMPDU rx reordering definitions */
42 #define BRCMF_RXREORDER_FLOWID_OFFSET 0
43 #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
44 #define BRCMF_RXREORDER_FLAGS_OFFSET 4
45 #define BRCMF_RXREORDER_CURIDX_OFFSET 6
46 #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
47
48 #define BRCMF_RXREORDER_DEL_FLOW 0x01
49 #define BRCMF_RXREORDER_FLUSH_ALL 0x02
50 #define BRCMF_RXREORDER_CURIDX_VALID 0x04
51 #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
52 #define BRCMF_RXREORDER_NEW_HOLE 0x10
53
54 /* Error bits */
55 int brcmf_msg_level;
56 module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
57 MODULE_PARM_DESC(debug, "level of debug output");
58
59 /* P2P0 enable */
60 static int brcmf_p2p_enable;
61 #ifdef CONFIG_BRCMDBG
62 module_param_named(p2pon, brcmf_p2p_enable, int, 0);
63 MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
64 #endif
65
66 char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
67 {
68 if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
69 brcmf_err("ifidx %d out of range\n", ifidx);
70 return "<if_bad>";
71 }
72
73 if (drvr->iflist[ifidx] == NULL) {
74 brcmf_err("null i/f %d\n", ifidx);
75 return "<if_null>";
76 }
77
78 if (drvr->iflist[ifidx]->ndev)
79 return drvr->iflist[ifidx]->ndev->name;
80
81 return "<if_none>";
82 }
83
84 static void _brcmf_set_multicast_list(struct work_struct *work)
85 {
86 struct brcmf_if *ifp;
87 struct net_device *ndev;
88 struct netdev_hw_addr *ha;
89 u32 cmd_value, cnt;
90 __le32 cnt_le;
91 char *buf, *bufp;
92 u32 buflen;
93 s32 err;
94
95 ifp = container_of(work, struct brcmf_if, multicast_work);
96
97 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
98
99 ndev = ifp->ndev;
100
101 /* Determine initial value of allmulti flag */
102 cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
103
104 /* Send down the multicast list first. */
105 cnt = netdev_mc_count(ndev);
106 buflen = sizeof(cnt) + (cnt * ETH_ALEN);
107 buf = kmalloc(buflen, GFP_ATOMIC);
108 if (!buf)
109 return;
110 bufp = buf;
111
112 cnt_le = cpu_to_le32(cnt);
113 memcpy(bufp, &cnt_le, sizeof(cnt_le));
114 bufp += sizeof(cnt_le);
115
116 netdev_for_each_mc_addr(ha, ndev) {
117 if (!cnt)
118 break;
119 memcpy(bufp, ha->addr, ETH_ALEN);
120 bufp += ETH_ALEN;
121 cnt--;
122 }
123
124 err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
125 if (err < 0) {
126 brcmf_err("Setting mcast_list failed, %d\n", err);
127 cmd_value = cnt ? true : cmd_value;
128 }
129
130 kfree(buf);
131
132 /*
133 * Now send the allmulti setting. This is based on the setting in the
134 * net_device flags, but might be modified above to be turned on if we
135 * were trying to set some addresses and dongle rejected it...
136 */
137 err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
138 if (err < 0)
139 brcmf_err("Setting allmulti failed, %d\n", err);
140
141 /*Finally, pick up the PROMISC flag */
142 cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
143 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
144 if (err < 0)
145 brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
146 err);
147 }
148
149 static void
150 _brcmf_set_mac_address(struct work_struct *work)
151 {
152 struct brcmf_if *ifp;
153 s32 err;
154
155 ifp = container_of(work, struct brcmf_if, setmacaddr_work);
156
157 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
158
159 err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
160 ETH_ALEN);
161 if (err < 0) {
162 brcmf_err("Setting cur_etheraddr failed, %d\n", err);
163 } else {
164 brcmf_dbg(TRACE, "MAC address updated to %pM\n",
165 ifp->mac_addr);
166 memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
167 }
168 }
169
170 static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
171 {
172 struct brcmf_if *ifp = netdev_priv(ndev);
173 struct sockaddr *sa = (struct sockaddr *)addr;
174
175 memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
176 schedule_work(&ifp->setmacaddr_work);
177 return 0;
178 }
179
180 static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
181 {
182 struct brcmf_if *ifp = netdev_priv(ndev);
183
184 schedule_work(&ifp->multicast_work);
185 }
186
187 static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
188 struct net_device *ndev)
189 {
190 int ret;
191 struct brcmf_if *ifp = netdev_priv(ndev);
192 struct brcmf_pub *drvr = ifp->drvr;
193 struct ethhdr *eh;
194
195 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
196
197 /* Can the device send data? */
198 if (drvr->bus_if->state != BRCMF_BUS_DATA) {
199 brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
200 netif_stop_queue(ndev);
201 dev_kfree_skb(skb);
202 ret = -ENODEV;
203 goto done;
204 }
205
206 if (!drvr->iflist[ifp->bssidx]) {
207 brcmf_err("bad ifidx %d\n", ifp->bssidx);
208 netif_stop_queue(ndev);
209 dev_kfree_skb(skb);
210 ret = -ENODEV;
211 goto done;
212 }
213
214 /* Make sure there's enough room for any header */
215 if (skb_headroom(skb) < drvr->hdrlen) {
216 struct sk_buff *skb2;
217
218 brcmf_dbg(INFO, "%s: insufficient headroom\n",
219 brcmf_ifname(drvr, ifp->bssidx));
220 drvr->bus_if->tx_realloc++;
221 skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
222 dev_kfree_skb(skb);
223 skb = skb2;
224 if (skb == NULL) {
225 brcmf_err("%s: skb_realloc_headroom failed\n",
226 brcmf_ifname(drvr, ifp->bssidx));
227 ret = -ENOMEM;
228 goto done;
229 }
230 }
231
232 /* validate length for ether packet */
233 if (skb->len < sizeof(*eh)) {
234 ret = -EINVAL;
235 dev_kfree_skb(skb);
236 goto done;
237 }
238
239 ret = brcmf_fws_process_skb(ifp, skb);
240
241 done:
242 if (ret) {
243 ifp->stats.tx_dropped++;
244 } else {
245 ifp->stats.tx_packets++;
246 ifp->stats.tx_bytes += skb->len;
247 }
248
249 /* Return ok: we always eat the packet */
250 return NETDEV_TX_OK;
251 }
252
253 void brcmf_txflowblock_if(struct brcmf_if *ifp,
254 enum brcmf_netif_stop_reason reason, bool state)
255 {
256 unsigned long flags;
257
258 if (!ifp || !ifp->ndev)
259 return;
260
261 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
262 ifp->bssidx, ifp->netif_stop, reason, state);
263
264 spin_lock_irqsave(&ifp->netif_stop_lock, flags);
265 if (state) {
266 if (!ifp->netif_stop)
267 netif_stop_queue(ifp->ndev);
268 ifp->netif_stop |= reason;
269 } else {
270 ifp->netif_stop &= ~reason;
271 if (!ifp->netif_stop)
272 netif_wake_queue(ifp->ndev);
273 }
274 spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
275 }
276
277 void brcmf_txflowblock(struct device *dev, bool state)
278 {
279 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
280 struct brcmf_pub *drvr = bus_if->drvr;
281 int i;
282
283 brcmf_dbg(TRACE, "Enter\n");
284
285 if (brcmf_fws_fc_active(drvr->fws)) {
286 brcmf_fws_bus_blocked(drvr, state);
287 } else {
288 for (i = 0; i < BRCMF_MAX_IFS; i++)
289 brcmf_txflowblock_if(drvr->iflist[i],
290 BRCMF_NETIF_STOP_REASON_BLOCK_BUS,
291 state);
292 }
293 }
294
295 static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
296 {
297 skb->dev = ifp->ndev;
298 skb->protocol = eth_type_trans(skb, skb->dev);
299
300 if (skb->pkt_type == PACKET_MULTICAST)
301 ifp->stats.multicast++;
302
303 /* Process special event packets */
304 brcmf_fweh_process_skb(ifp->drvr, skb);
305
306 if (!(ifp->ndev->flags & IFF_UP)) {
307 brcmu_pkt_buf_free_skb(skb);
308 return;
309 }
310
311 ifp->stats.rx_bytes += skb->len;
312 ifp->stats.rx_packets++;
313
314 brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
315 if (in_interrupt())
316 netif_rx(skb);
317 else
318 /* If the receive is not processed inside an ISR,
319 * the softirqd must be woken explicitly to service
320 * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
321 */
322 netif_rx_ni(skb);
323 }
324
325 static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
326 u8 start, u8 end,
327 struct sk_buff_head *skb_list)
328 {
329 /* initialize return list */
330 __skb_queue_head_init(skb_list);
331
332 if (rfi->pend_pkts == 0) {
333 brcmf_dbg(INFO, "no packets in reorder queue\n");
334 return;
335 }
336
337 do {
338 if (rfi->pktslots[start]) {
339 __skb_queue_tail(skb_list, rfi->pktslots[start]);
340 rfi->pktslots[start] = NULL;
341 }
342 start++;
343 if (start > rfi->max_idx)
344 start = 0;
345 } while (start != end);
346 rfi->pend_pkts -= skb_queue_len(skb_list);
347 }
348
349 static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
350 struct sk_buff *pkt)
351 {
352 u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
353 struct brcmf_ampdu_rx_reorder *rfi;
354 struct sk_buff_head reorder_list;
355 struct sk_buff *pnext;
356 u8 flags;
357 u32 buf_size;
358
359 flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
360 flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
361
362 /* validate flags and flow id */
363 if (flags == 0xFF) {
364 brcmf_err("invalid flags...so ignore this packet\n");
365 brcmf_netif_rx(ifp, pkt);
366 return;
367 }
368
369 rfi = ifp->drvr->reorder_flows[flow_id];
370 if (flags & BRCMF_RXREORDER_DEL_FLOW) {
371 brcmf_dbg(INFO, "flow-%d: delete\n",
372 flow_id);
373
374 if (rfi == NULL) {
375 brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
376 flow_id);
377 brcmf_netif_rx(ifp, pkt);
378 return;
379 }
380
381 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
382 &reorder_list);
383 /* add the last packet */
384 __skb_queue_tail(&reorder_list, pkt);
385 kfree(rfi);
386 ifp->drvr->reorder_flows[flow_id] = NULL;
387 goto netif_rx;
388 }
389 /* from here on we need a flow reorder instance */
390 if (rfi == NULL) {
391 buf_size = sizeof(*rfi);
392 max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
393
394 buf_size += (max_idx + 1) * sizeof(pkt);
395
396 /* allocate space for flow reorder info */
397 brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
398 flow_id, max_idx);
399 rfi = kzalloc(buf_size, GFP_ATOMIC);
400 if (rfi == NULL) {
401 brcmf_err("failed to alloc buffer\n");
402 brcmf_netif_rx(ifp, pkt);
403 return;
404 }
405
406 ifp->drvr->reorder_flows[flow_id] = rfi;
407 rfi->pktslots = (struct sk_buff **)(rfi+1);
408 rfi->max_idx = max_idx;
409 }
410 if (flags & BRCMF_RXREORDER_NEW_HOLE) {
411 if (rfi->pend_pkts) {
412 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
413 rfi->exp_idx,
414 &reorder_list);
415 WARN_ON(rfi->pend_pkts);
416 } else {
417 __skb_queue_head_init(&reorder_list);
418 }
419 rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
420 rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
421 rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
422 rfi->pktslots[rfi->cur_idx] = pkt;
423 rfi->pend_pkts++;
424 brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
425 flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
426 } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
427 cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
428 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
429
430 if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
431 /* still in the current hole */
432 /* enqueue the current on the buffer chain */
433 if (rfi->pktslots[cur_idx] != NULL) {
434 brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
435 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
436 rfi->pktslots[cur_idx] = NULL;
437 }
438 rfi->pktslots[cur_idx] = pkt;
439 rfi->pend_pkts++;
440 rfi->cur_idx = cur_idx;
441 brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
442 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
443
444 /* can return now as there is no reorder
445 * list to process.
446 */
447 return;
448 }
449 if (rfi->exp_idx == cur_idx) {
450 if (rfi->pktslots[cur_idx] != NULL) {
451 brcmf_dbg(INFO, "error buffer pending..free it\n");
452 brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
453 rfi->pktslots[cur_idx] = NULL;
454 }
455 rfi->pktslots[cur_idx] = pkt;
456 rfi->pend_pkts++;
457
458 /* got the expected one. flush from current to expected
459 * and update expected
460 */
461 brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
462 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
463
464 rfi->cur_idx = cur_idx;
465 rfi->exp_idx = exp_idx;
466
467 brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
468 &reorder_list);
469 brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
470 flow_id, skb_queue_len(&reorder_list),
471 rfi->pend_pkts);
472 } else {
473 u8 end_idx;
474
475 brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
476 flow_id, flags, rfi->cur_idx, rfi->exp_idx,
477 cur_idx, exp_idx);
478 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
479 end_idx = rfi->exp_idx;
480 else
481 end_idx = exp_idx;
482
483 /* flush pkts first */
484 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
485 &reorder_list);
486
487 if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
488 __skb_queue_tail(&reorder_list, pkt);
489 } else {
490 rfi->pktslots[cur_idx] = pkt;
491 rfi->pend_pkts++;
492 }
493 rfi->exp_idx = exp_idx;
494 rfi->cur_idx = cur_idx;
495 }
496 } else {
497 /* explicity window move updating the expected index */
498 exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
499
500 brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
501 flow_id, flags, rfi->exp_idx, exp_idx);
502 if (flags & BRCMF_RXREORDER_FLUSH_ALL)
503 end_idx = rfi->exp_idx;
504 else
505 end_idx = exp_idx;
506
507 brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
508 &reorder_list);
509 __skb_queue_tail(&reorder_list, pkt);
510 /* set the new expected idx */
511 rfi->exp_idx = exp_idx;
512 }
513 netif_rx:
514 skb_queue_walk_safe(&reorder_list, pkt, pnext) {
515 __skb_unlink(pkt, &reorder_list);
516 brcmf_netif_rx(ifp, pkt);
517 }
518 }
519
520 void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
521 {
522 struct sk_buff *skb, *pnext;
523 struct brcmf_if *ifp;
524 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
525 struct brcmf_pub *drvr = bus_if->drvr;
526 struct brcmf_skb_reorder_data *rd;
527 u8 ifidx;
528 int ret;
529
530 brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
531 skb_queue_len(skb_list));
532
533 skb_queue_walk_safe(skb_list, skb, pnext) {
534 skb_unlink(skb, skb_list);
535
536 /* process and remove protocol-specific header */
537 ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb);
538 ifp = drvr->iflist[ifidx];
539
540 if (ret || !ifp || !ifp->ndev) {
541 if ((ret != -ENODATA) && ifp)
542 ifp->stats.rx_errors++;
543 brcmu_pkt_buf_free_skb(skb);
544 continue;
545 }
546
547 rd = (struct brcmf_skb_reorder_data *)skb->cb;
548 if (rd->reorder)
549 brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
550 else
551 brcmf_netif_rx(ifp, skb);
552 }
553 }
554
555 void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
556 bool success)
557 {
558 struct brcmf_if *ifp;
559 struct ethhdr *eh;
560 u8 ifidx;
561 u16 type;
562 int res;
563
564 res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
565
566 ifp = drvr->iflist[ifidx];
567 if (!ifp)
568 goto done;
569
570 if (res == 0) {
571 eh = (struct ethhdr *)(txp->data);
572 type = ntohs(eh->h_proto);
573
574 if (type == ETH_P_PAE) {
575 atomic_dec(&ifp->pend_8021x_cnt);
576 if (waitqueue_active(&ifp->pend_8021x_wait))
577 wake_up(&ifp->pend_8021x_wait);
578 }
579 }
580 if (!success)
581 ifp->stats.tx_errors++;
582 done:
583 brcmu_pkt_buf_free_skb(txp);
584 }
585
586 void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
587 {
588 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
589 struct brcmf_pub *drvr = bus_if->drvr;
590
591 /* await txstatus signal for firmware if active */
592 if (brcmf_fws_fc_active(drvr->fws)) {
593 if (!success)
594 brcmf_fws_bustxfail(drvr->fws, txp);
595 } else {
596 brcmf_txfinalize(drvr, txp, success);
597 }
598 }
599
600 static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
601 {
602 struct brcmf_if *ifp = netdev_priv(ndev);
603
604 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
605
606 return &ifp->stats;
607 }
608
609 /*
610 * Set current toe component enables in toe_ol iovar,
611 * and set toe global enable iovar
612 */
613 static int brcmf_toe_set(struct brcmf_if *ifp, u32 toe_ol)
614 {
615 s32 err;
616
617 err = brcmf_fil_iovar_int_set(ifp, "toe_ol", toe_ol);
618 if (err < 0) {
619 brcmf_err("Setting toe_ol failed, %d\n", err);
620 return err;
621 }
622
623 err = brcmf_fil_iovar_int_set(ifp, "toe", (toe_ol != 0));
624 if (err < 0)
625 brcmf_err("Setting toe failed, %d\n", err);
626
627 return err;
628
629 }
630
631 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
632 struct ethtool_drvinfo *info)
633 {
634 struct brcmf_if *ifp = netdev_priv(ndev);
635 struct brcmf_pub *drvr = ifp->drvr;
636
637 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
638 snprintf(info->version, sizeof(info->version), "%lu",
639 drvr->drv_version);
640 strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
641 sizeof(info->bus_info));
642 }
643
644 static const struct ethtool_ops brcmf_ethtool_ops = {
645 .get_drvinfo = brcmf_ethtool_get_drvinfo,
646 };
647
648 static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
649 {
650 struct brcmf_pub *drvr = ifp->drvr;
651 struct ethtool_drvinfo info;
652 char drvname[sizeof(info.driver)];
653 u32 cmd;
654 struct ethtool_value edata;
655 u32 toe_cmpnt, csum_dir;
656 int ret;
657
658 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
659
660 /* all ethtool calls start with a cmd word */
661 if (copy_from_user(&cmd, uaddr, sizeof(u32)))
662 return -EFAULT;
663
664 switch (cmd) {
665 case ETHTOOL_GDRVINFO:
666 /* Copy out any request driver name */
667 if (copy_from_user(&info, uaddr, sizeof(info)))
668 return -EFAULT;
669 strncpy(drvname, info.driver, sizeof(info.driver));
670 drvname[sizeof(info.driver) - 1] = '\0';
671
672 /* clear struct for return */
673 memset(&info, 0, sizeof(info));
674 info.cmd = cmd;
675
676 /* if requested, identify ourselves */
677 if (strcmp(drvname, "?dhd") == 0) {
678 sprintf(info.driver, "dhd");
679 strcpy(info.version, BRCMF_VERSION_STR);
680 }
681 /* report dongle driver type */
682 else
683 sprintf(info.driver, "wl");
684
685 sprintf(info.version, "%lu", drvr->drv_version);
686 if (copy_to_user(uaddr, &info, sizeof(info)))
687 return -EFAULT;
688 brcmf_dbg(TRACE, "given %*s, returning %s\n",
689 (int)sizeof(drvname), drvname, info.driver);
690 break;
691
692 /* Get toe offload components from dongle */
693 case ETHTOOL_GRXCSUM:
694 case ETHTOOL_GTXCSUM:
695 ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
696 if (ret < 0)
697 return ret;
698
699 csum_dir =
700 (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
701
702 edata.cmd = cmd;
703 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
704
705 if (copy_to_user(uaddr, &edata, sizeof(edata)))
706 return -EFAULT;
707 break;
708
709 /* Set toe offload components in dongle */
710 case ETHTOOL_SRXCSUM:
711 case ETHTOOL_STXCSUM:
712 if (copy_from_user(&edata, uaddr, sizeof(edata)))
713 return -EFAULT;
714
715 /* Read the current settings, update and write back */
716 ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
717 if (ret < 0)
718 return ret;
719
720 csum_dir =
721 (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
722
723 if (edata.data != 0)
724 toe_cmpnt |= csum_dir;
725 else
726 toe_cmpnt &= ~csum_dir;
727
728 ret = brcmf_toe_set(ifp, toe_cmpnt);
729 if (ret < 0)
730 return ret;
731
732 /* If setting TX checksum mode, tell Linux the new mode */
733 if (cmd == ETHTOOL_STXCSUM) {
734 if (edata.data)
735 ifp->ndev->features |= NETIF_F_IP_CSUM;
736 else
737 ifp->ndev->features &= ~NETIF_F_IP_CSUM;
738 }
739
740 break;
741
742 default:
743 return -EOPNOTSUPP;
744 }
745
746 return 0;
747 }
748
749 static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
750 int cmd)
751 {
752 struct brcmf_if *ifp = netdev_priv(ndev);
753 struct brcmf_pub *drvr = ifp->drvr;
754
755 brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd);
756
757 if (!drvr->iflist[ifp->bssidx])
758 return -1;
759
760 if (cmd == SIOCETHTOOL)
761 return brcmf_ethtool(ifp, ifr->ifr_data);
762
763 return -EOPNOTSUPP;
764 }
765
766 static int brcmf_netdev_stop(struct net_device *ndev)
767 {
768 struct brcmf_if *ifp = netdev_priv(ndev);
769
770 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
771
772 brcmf_cfg80211_down(ndev);
773
774 /* Set state and stop OS transmissions */
775 netif_stop_queue(ndev);
776
777 return 0;
778 }
779
780 static int brcmf_netdev_open(struct net_device *ndev)
781 {
782 struct brcmf_if *ifp = netdev_priv(ndev);
783 struct brcmf_pub *drvr = ifp->drvr;
784 struct brcmf_bus *bus_if = drvr->bus_if;
785 u32 toe_ol;
786 s32 ret = 0;
787
788 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
789
790 /* If bus is not ready, can't continue */
791 if (bus_if->state != BRCMF_BUS_DATA) {
792 brcmf_err("failed bus is not ready\n");
793 return -EAGAIN;
794 }
795
796 atomic_set(&ifp->pend_8021x_cnt, 0);
797
798 /* Get current TOE mode from dongle */
799 if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
800 && (toe_ol & TOE_TX_CSUM_OL) != 0)
801 ndev->features |= NETIF_F_IP_CSUM;
802 else
803 ndev->features &= ~NETIF_F_IP_CSUM;
804
805 /* Allow transmit calls */
806 netif_start_queue(ndev);
807 if (brcmf_cfg80211_up(ndev)) {
808 brcmf_err("failed to bring up cfg80211\n");
809 return -1;
810 }
811
812 return ret;
813 }
814
815 static const struct net_device_ops brcmf_netdev_ops_pri = {
816 .ndo_open = brcmf_netdev_open,
817 .ndo_stop = brcmf_netdev_stop,
818 .ndo_get_stats = brcmf_netdev_get_stats,
819 .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
820 .ndo_start_xmit = brcmf_netdev_start_xmit,
821 .ndo_set_mac_address = brcmf_netdev_set_mac_address,
822 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
823 };
824
825 int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
826 {
827 struct brcmf_pub *drvr = ifp->drvr;
828 struct net_device *ndev;
829 s32 err;
830
831 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
832 ifp->mac_addr);
833 ndev = ifp->ndev;
834
835 /* set appropriate operations */
836 ndev->netdev_ops = &brcmf_netdev_ops_pri;
837
838 ndev->hard_header_len += drvr->hdrlen;
839 ndev->ethtool_ops = &brcmf_ethtool_ops;
840
841 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
842 drvr->hdrlen;
843
844 /* set the mac address */
845 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
846
847 INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
848 INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
849
850 if (rtnl_locked)
851 err = register_netdevice(ndev);
852 else
853 err = register_netdev(ndev);
854 if (err != 0) {
855 brcmf_err("couldn't register the net device\n");
856 goto fail;
857 }
858
859 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
860
861 ndev->destructor = free_netdev;
862 return 0;
863
864 fail:
865 drvr->iflist[ifp->bssidx] = NULL;
866 ndev->netdev_ops = NULL;
867 free_netdev(ndev);
868 return -EBADE;
869 }
870
871 static int brcmf_net_p2p_open(struct net_device *ndev)
872 {
873 brcmf_dbg(TRACE, "Enter\n");
874
875 return brcmf_cfg80211_up(ndev);
876 }
877
878 static int brcmf_net_p2p_stop(struct net_device *ndev)
879 {
880 brcmf_dbg(TRACE, "Enter\n");
881
882 return brcmf_cfg80211_down(ndev);
883 }
884
885 static int brcmf_net_p2p_do_ioctl(struct net_device *ndev,
886 struct ifreq *ifr, int cmd)
887 {
888 brcmf_dbg(TRACE, "Enter\n");
889 return 0;
890 }
891
892 static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
893 struct net_device *ndev)
894 {
895 if (skb)
896 dev_kfree_skb_any(skb);
897
898 return NETDEV_TX_OK;
899 }
900
901 static const struct net_device_ops brcmf_netdev_ops_p2p = {
902 .ndo_open = brcmf_net_p2p_open,
903 .ndo_stop = brcmf_net_p2p_stop,
904 .ndo_do_ioctl = brcmf_net_p2p_do_ioctl,
905 .ndo_start_xmit = brcmf_net_p2p_start_xmit
906 };
907
908 static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
909 {
910 struct net_device *ndev;
911
912 brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
913 ifp->mac_addr);
914 ndev = ifp->ndev;
915
916 ndev->netdev_ops = &brcmf_netdev_ops_p2p;
917
918 /* set the mac address */
919 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
920
921 if (register_netdev(ndev) != 0) {
922 brcmf_err("couldn't register the p2p net device\n");
923 goto fail;
924 }
925
926 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
927
928 return 0;
929
930 fail:
931 ifp->drvr->iflist[ifp->bssidx] = NULL;
932 ndev->netdev_ops = NULL;
933 free_netdev(ndev);
934 return -EBADE;
935 }
936
937 struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
938 char *name, u8 *mac_addr)
939 {
940 struct brcmf_if *ifp;
941 struct net_device *ndev;
942
943 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
944
945 ifp = drvr->iflist[bssidx];
946 /*
947 * Delete the existing interface before overwriting it
948 * in case we missed the BRCMF_E_IF_DEL event.
949 */
950 if (ifp) {
951 brcmf_err("ERROR: netdev:%s already exists\n",
952 ifp->ndev->name);
953 if (ifidx) {
954 netif_stop_queue(ifp->ndev);
955 unregister_netdev(ifp->ndev);
956 free_netdev(ifp->ndev);
957 drvr->iflist[bssidx] = NULL;
958 } else {
959 brcmf_err("ignore IF event\n");
960 return ERR_PTR(-EINVAL);
961 }
962 }
963
964 if (!brcmf_p2p_enable && bssidx == 1) {
965 /* this is P2P_DEVICE interface */
966 brcmf_dbg(INFO, "allocate non-netdev interface\n");
967 ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
968 if (!ifp)
969 return ERR_PTR(-ENOMEM);
970 } else {
971 brcmf_dbg(INFO, "allocate netdev interface\n");
972 /* Allocate netdev, including space for private structure */
973 ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
974 if (!ndev)
975 return ERR_PTR(-ENOMEM);
976
977 ifp = netdev_priv(ndev);
978 ifp->ndev = ndev;
979 }
980
981 ifp->drvr = drvr;
982 drvr->iflist[bssidx] = ifp;
983 ifp->ifidx = ifidx;
984 ifp->bssidx = bssidx;
985
986 init_waitqueue_head(&ifp->pend_8021x_wait);
987 spin_lock_init(&ifp->netif_stop_lock);
988
989 if (mac_addr != NULL)
990 memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
991
992 brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
993 current->pid, name, ifp->mac_addr);
994
995 return ifp;
996 }
997
998 void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
999 {
1000 struct brcmf_if *ifp;
1001
1002 ifp = drvr->iflist[bssidx];
1003 drvr->iflist[bssidx] = NULL;
1004 if (!ifp) {
1005 brcmf_err("Null interface, idx=%d\n", bssidx);
1006 return;
1007 }
1008 brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
1009 if (ifp->ndev) {
1010 if (bssidx == 0) {
1011 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
1012 rtnl_lock();
1013 brcmf_netdev_stop(ifp->ndev);
1014 rtnl_unlock();
1015 }
1016 } else {
1017 netif_stop_queue(ifp->ndev);
1018 }
1019
1020 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
1021 cancel_work_sync(&ifp->setmacaddr_work);
1022 cancel_work_sync(&ifp->multicast_work);
1023 }
1024 /* unregister will take care of freeing it */
1025 unregister_netdev(ifp->ndev);
1026 if (bssidx == 0)
1027 brcmf_cfg80211_detach(drvr->config);
1028 } else {
1029 kfree(ifp);
1030 }
1031 }
1032
1033 int brcmf_attach(uint bus_hdrlen, struct device *dev)
1034 {
1035 struct brcmf_pub *drvr = NULL;
1036 int ret = 0;
1037
1038 brcmf_dbg(TRACE, "Enter\n");
1039
1040 /* Allocate primary brcmf_info */
1041 drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
1042 if (!drvr)
1043 return -ENOMEM;
1044
1045 mutex_init(&drvr->proto_block);
1046
1047 /* Link to bus module */
1048 drvr->hdrlen = bus_hdrlen;
1049 drvr->bus_if = dev_get_drvdata(dev);
1050 drvr->bus_if->drvr = drvr;
1051
1052 /* create device debugfs folder */
1053 brcmf_debugfs_attach(drvr);
1054
1055 /* Attach and link in the protocol */
1056 ret = brcmf_proto_attach(drvr);
1057 if (ret != 0) {
1058 brcmf_err("brcmf_prot_attach failed\n");
1059 goto fail;
1060 }
1061
1062 /* attach firmware event handler */
1063 brcmf_fweh_attach(drvr);
1064
1065 INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
1066
1067 return ret;
1068
1069 fail:
1070 brcmf_detach(dev);
1071
1072 return ret;
1073 }
1074
1075 int brcmf_bus_start(struct device *dev)
1076 {
1077 int ret = -1;
1078 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1079 struct brcmf_pub *drvr = bus_if->drvr;
1080 struct brcmf_if *ifp;
1081 struct brcmf_if *p2p_ifp;
1082
1083 brcmf_dbg(TRACE, "\n");
1084
1085 /* Bring up the bus */
1086 ret = brcmf_bus_init(bus_if);
1087 if (ret != 0) {
1088 brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
1089 return ret;
1090 }
1091
1092 /* add primary networking interface */
1093 ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
1094 if (IS_ERR(ifp))
1095 return PTR_ERR(ifp);
1096
1097 if (brcmf_p2p_enable)
1098 p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
1099 else
1100 p2p_ifp = NULL;
1101 if (IS_ERR(p2p_ifp))
1102 p2p_ifp = NULL;
1103
1104 /* signal bus ready */
1105 bus_if->state = BRCMF_BUS_DATA;
1106
1107 /* Bus is ready, do any initialization */
1108 ret = brcmf_c_preinit_dcmds(ifp);
1109 if (ret < 0)
1110 goto fail;
1111
1112 drvr->fw_signals = true;
1113 ret = brcmf_fws_init(drvr);
1114 if (ret < 0)
1115 goto fail;
1116
1117 brcmf_fws_add_interface(ifp);
1118
1119 drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
1120 if (drvr->config == NULL) {
1121 ret = -ENOMEM;
1122 goto fail;
1123 }
1124
1125 ret = brcmf_fweh_activate_events(ifp);
1126 if (ret < 0)
1127 goto fail;
1128
1129 ret = brcmf_net_attach(ifp, false);
1130 fail:
1131 if (ret < 0) {
1132 brcmf_err("failed: %d\n", ret);
1133 if (drvr->config)
1134 brcmf_cfg80211_detach(drvr->config);
1135 if (drvr->fws) {
1136 brcmf_fws_del_interface(ifp);
1137 brcmf_fws_deinit(drvr);
1138 }
1139 if (drvr->iflist[0]) {
1140 free_netdev(ifp->ndev);
1141 drvr->iflist[0] = NULL;
1142 }
1143 if (p2p_ifp) {
1144 free_netdev(p2p_ifp->ndev);
1145 drvr->iflist[1] = NULL;
1146 }
1147 return ret;
1148 }
1149 if ((brcmf_p2p_enable) && (p2p_ifp))
1150 if (brcmf_net_p2p_attach(p2p_ifp) < 0)
1151 brcmf_p2p_enable = 0;
1152
1153 return 0;
1154 }
1155
1156 static void brcmf_bus_detach(struct brcmf_pub *drvr)
1157 {
1158 brcmf_dbg(TRACE, "Enter\n");
1159
1160 if (drvr) {
1161 /* Stop the protocol module */
1162 brcmf_proto_stop(drvr);
1163
1164 /* Stop the bus module */
1165 brcmf_bus_stop(drvr->bus_if);
1166 }
1167 }
1168
1169 void brcmf_dev_reset(struct device *dev)
1170 {
1171 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1172 struct brcmf_pub *drvr = bus_if->drvr;
1173
1174 if (drvr == NULL)
1175 return;
1176
1177 if (drvr->iflist[0])
1178 brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
1179 }
1180
1181 void brcmf_detach(struct device *dev)
1182 {
1183 s32 i;
1184 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1185 struct brcmf_pub *drvr = bus_if->drvr;
1186
1187 brcmf_dbg(TRACE, "Enter\n");
1188
1189 if (drvr == NULL)
1190 return;
1191
1192 /* stop firmware event handling */
1193 brcmf_fweh_detach(drvr);
1194
1195 /* make sure primary interface removed last */
1196 for (i = BRCMF_MAX_IFS-1; i > -1; i--)
1197 if (drvr->iflist[i]) {
1198 brcmf_fws_del_interface(drvr->iflist[i]);
1199 brcmf_del_if(drvr, i);
1200 }
1201
1202 brcmf_bus_detach(drvr);
1203
1204 if (drvr->prot)
1205 brcmf_proto_detach(drvr);
1206
1207 brcmf_fws_deinit(drvr);
1208
1209 brcmf_debugfs_detach(drvr);
1210 bus_if->drvr = NULL;
1211 kfree(drvr);
1212 }
1213
1214 static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
1215 {
1216 return atomic_read(&ifp->pend_8021x_cnt);
1217 }
1218
1219 int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
1220 {
1221 struct brcmf_if *ifp = netdev_priv(ndev);
1222 int err;
1223
1224 err = wait_event_timeout(ifp->pend_8021x_wait,
1225 !brcmf_get_pend_8021x_cnt(ifp),
1226 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
1227
1228 WARN_ON(!err);
1229
1230 return !err;
1231 }
1232
1233 /*
1234 * return chip id and rev of the device encoded in u32.
1235 */
1236 u32 brcmf_get_chip_info(struct brcmf_if *ifp)
1237 {
1238 struct brcmf_bus *bus = ifp->drvr->bus_if;
1239
1240 return bus->chip << 4 | bus->chiprev;
1241 }
1242
1243 static void brcmf_driver_init(struct work_struct *work)
1244 {
1245 brcmf_debugfs_init();
1246
1247 #ifdef CONFIG_BRCMFMAC_SDIO
1248 brcmf_sdio_init();
1249 #endif
1250 #ifdef CONFIG_BRCMFMAC_USB
1251 brcmf_usb_init();
1252 #endif
1253 }
1254 static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init);
1255
1256 static int __init brcmfmac_module_init(void)
1257 {
1258 if (!schedule_work(&brcmf_driver_work))
1259 return -EBUSY;
1260
1261 return 0;
1262 }
1263
1264 static void __exit brcmfmac_module_exit(void)
1265 {
1266 cancel_work_sync(&brcmf_driver_work);
1267
1268 #ifdef CONFIG_BRCMFMAC_SDIO
1269 brcmf_sdio_exit();
1270 #endif
1271 #ifdef CONFIG_BRCMFMAC_USB
1272 brcmf_usb_exit();
1273 #endif
1274 brcmf_debugfs_exit();
1275 }
1276
1277 module_init(brcmfmac_module_init);
1278 module_exit(brcmfmac_module_exit);
This page took 0.076257 seconds and 5 git commands to generate.