x86: lapic address print out like io apic addr
[deliverable/linux.git] / net / mac80211 / mesh.c
CommitLineData
2e3c8736
LCC
1/*
2 * Copyright (c) 2008 open80211s Ltd.
3 * Authors: Luis Carlos Cobo <luisca@cozybit.com>
4 * Javier Cardona <javier@cozybit.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
51ceddad 11#include <asm/unaligned.h>
2e3c8736
LCC
12#include "ieee80211_i.h"
13#include "mesh.h"
14
472dbc45
JB
15#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
16#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
17
24736701
JL
18#define PP_OFFSET 1 /* Path Selection Protocol */
19#define PM_OFFSET 5 /* Path Selection Metric */
20#define CC_OFFSET 9 /* Congestion Control Mode */
21#define CAPAB_OFFSET 17
2e3c8736
LCC
22#define ACCEPT_PLINKS 0x80
23
24int mesh_allocated;
25static struct kmem_cache *rm_cache;
26
27void ieee80211s_init(void)
28{
29 mesh_pathtbl_init();
30 mesh_allocated = 1;
31 rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry),
32 0, 0, NULL);
33}
34
35void ieee80211s_stop(void)
36{
37 mesh_pathtbl_unregister();
38 kmem_cache_destroy(rm_cache);
39}
40
472dbc45
JB
41static void ieee80211_mesh_housekeeping_timer(unsigned long data)
42{
43 struct ieee80211_sub_if_data *sdata = (void *) data;
44 struct ieee80211_local *local = sdata->local;
45 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
46
47 ifmsh->housekeeping = true;
48 queue_work(local->hw.workqueue, &ifmsh->work);
49}
50
2e3c8736
LCC
51/**
52 * mesh_matches_local - check if the config of a mesh point matches ours
53 *
54 * @ie: information elements of a management frame from the mesh peer
f698d856 55 * @sdata: local mesh subif
2e3c8736
LCC
56 *
57 * This function checks if the mesh configuration of a mesh point matches the
58 * local mesh configuration, i.e. if both nodes belong to the same mesh network.
59 */
f698d856 60bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
2e3c8736 61{
472dbc45 62 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2e3c8736 63
2e3c8736
LCC
64 /*
65 * As support for each feature is added, check for matching
66 * - On mesh config capabilities
67 * - Power Save Support En
68 * - Sync support enabled
69 * - Sync support active
70 * - Sync support required from peer
71 * - MDA enabled
72 * - Power management control on fc
73 */
472dbc45
JB
74 if (ifmsh->mesh_id_len == ie->mesh_id_len &&
75 memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
76 memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 &&
77 memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 &&
78 memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0)
2e3c8736
LCC
79 return true;
80
81 return false;
82}
83
84/**
85 * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links
86 *
87 * @ie: information elements of a management frame from the mesh peer
2e3c8736 88 */
f698d856 89bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
2e3c8736
LCC
90{
91 return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0;
92}
93
94/**
95 * mesh_accept_plinks_update: update accepting_plink in local mesh beacons
96 *
d0709a65 97 * @sdata: mesh interface in which mesh beacons are going to be updated
2e3c8736 98 */
d0709a65 99void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
2e3c8736 100{
2e3c8736
LCC
101 bool free_plinks;
102
103 /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0,
104 * the mesh interface might be able to establish plinks with peers that
b4e08ea1
LCC
105 * are already on the table but are not on PLINK_ESTAB state. However,
106 * in general the mesh interface is not accepting peer link requests
107 * from new peers, and that must be reflected in the beacon
2e3c8736
LCC
108 */
109 free_plinks = mesh_plink_availables(sdata);
110
472dbc45
JB
111 if (free_plinks != sdata->u.mesh.accepting_plinks)
112 ieee80211_mesh_housekeeping_timer((unsigned long) sdata);
2e3c8736
LCC
113}
114
472dbc45 115void mesh_ids_set_default(struct ieee80211_if_mesh *sta)
2e3c8736
LCC
116{
117 u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff};
118
119 memcpy(sta->mesh_pp_id, def_id, 4);
120 memcpy(sta->mesh_pm_id, def_id, 4);
121 memcpy(sta->mesh_cc_id, def_id, 4);
122}
123
f698d856 124int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
2e3c8736 125{
2e3c8736
LCC
126 int i;
127
472dbc45
JB
128 sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL);
129 if (!sdata->u.mesh.rmc)
2e3c8736 130 return -ENOMEM;
472dbc45 131 sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
2e3c8736 132 for (i = 0; i < RMC_BUCKETS; i++)
472dbc45 133 INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list);
2e3c8736
LCC
134 return 0;
135}
136
f698d856 137void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
2e3c8736 138{
472dbc45 139 struct mesh_rmc *rmc = sdata->u.mesh.rmc;
2e3c8736
LCC
140 struct rmc_entry *p, *n;
141 int i;
142
472dbc45 143 if (!sdata->u.mesh.rmc)
2e3c8736
LCC
144 return;
145
146 for (i = 0; i < RMC_BUCKETS; i++)
147 list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) {
148 list_del(&p->list);
149 kmem_cache_free(rm_cache, p);
150 }
151
152 kfree(rmc);
472dbc45 153 sdata->u.mesh.rmc = NULL;
2e3c8736
LCC
154}
155
156/**
157 * mesh_rmc_check - Check frame in recent multicast cache and add if absent.
158 *
159 * @sa: source address
160 * @mesh_hdr: mesh_header
161 *
162 * Returns: 0 if the frame is not in the cache, nonzero otherwise.
163 *
164 * Checks using the source address and the mesh sequence number if we have
165 * received this frame lately. If the frame is not in the cache, it is added to
166 * it.
167 */
168int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
f698d856 169 struct ieee80211_sub_if_data *sdata)
2e3c8736 170{
472dbc45 171 struct mesh_rmc *rmc = sdata->u.mesh.rmc;
2e3c8736
LCC
172 u32 seqnum = 0;
173 int entries = 0;
174 u8 idx;
175 struct rmc_entry *p, *n;
176
177 /* Don't care about endianness since only match matters */
51ceddad
LCC
178 memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
179 idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
2e3c8736
LCC
180 list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) {
181 ++entries;
182 if (time_after(jiffies, p->exp_time) ||
183 (entries == RMC_QUEUE_MAX_LEN)) {
184 list_del(&p->list);
185 kmem_cache_free(rm_cache, p);
186 --entries;
187 } else if ((seqnum == p->seqnum)
188 && (memcmp(sa, p->sa, ETH_ALEN) == 0))
189 return -1;
190 }
191
192 p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
193 if (!p) {
194 printk(KERN_DEBUG "o11s: could not allocate RMC entry\n");
195 return 0;
196 }
197 p->seqnum = seqnum;
198 p->exp_time = jiffies + RMC_TIMEOUT;
199 memcpy(p->sa, sa, ETH_ALEN);
200 list_add(&p->list, &rmc->bucket[idx].list);
201 return 0;
202}
203
f698d856 204void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
2e3c8736 205{
f698d856 206 struct ieee80211_local *local = sdata->local;
2e3c8736
LCC
207 struct ieee80211_supported_band *sband;
208 u8 *pos;
209 int len, i, rate;
210
211 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
212 len = sband->n_bitrates;
213 if (len > 8)
214 len = 8;
215 pos = skb_put(skb, len + 2);
216 *pos++ = WLAN_EID_SUPP_RATES;
217 *pos++ = len;
218 for (i = 0; i < len; i++) {
219 rate = sband->bitrates[i].bitrate;
220 *pos++ = (u8) (rate / 5);
221 }
222
223 if (sband->n_bitrates > len) {
224 pos = skb_put(skb, sband->n_bitrates - len + 2);
225 *pos++ = WLAN_EID_EXT_SUPP_RATES;
226 *pos++ = sband->n_bitrates - len;
227 for (i = len; i < sband->n_bitrates; i++) {
228 rate = sband->bitrates[i].bitrate;
229 *pos++ = (u8) (rate / 5);
230 }
231 }
232
472dbc45 233 pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len);
2e3c8736 234 *pos++ = WLAN_EID_MESH_ID;
472dbc45
JB
235 *pos++ = sdata->u.mesh.mesh_id_len;
236 if (sdata->u.mesh.mesh_id_len)
237 memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
2e3c8736
LCC
238
239 pos = skb_put(skb, 21);
240 *pos++ = WLAN_EID_MESH_CONFIG;
241 *pos++ = MESH_CFG_LEN;
242 /* Version */
243 *pos++ = 1;
244
245 /* Active path selection protocol ID */
472dbc45 246 memcpy(pos, sdata->u.mesh.mesh_pp_id, 4);
2e3c8736
LCC
247 pos += 4;
248
249 /* Active path selection metric ID */
472dbc45 250 memcpy(pos, sdata->u.mesh.mesh_pm_id, 4);
2e3c8736
LCC
251 pos += 4;
252
253 /* Congestion control mode identifier */
472dbc45 254 memcpy(pos, sdata->u.mesh.mesh_cc_id, 4);
2e3c8736
LCC
255 pos += 4;
256
257 /* Channel precedence:
258 * Not running simple channel unification protocol
259 */
260 memset(pos, 0x00, 4);
261 pos += 4;
262
263 /* Mesh capability */
472dbc45
JB
264 sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
265 *pos++ = sdata->u.mesh.accepting_plinks ? ACCEPT_PLINKS : 0x00;
2e3c8736
LCC
266 *pos++ = 0x00;
267
268 return;
269}
270
f698d856 271u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
2e3c8736
LCC
272{
273 /* Use last four bytes of hw addr and interface index as hash index */
f698d856 274 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
2e3c8736
LCC
275 & tbl->hash_mask;
276}
277
278u8 mesh_id_hash(u8 *mesh_id, int mesh_id_len)
279{
280 if (!mesh_id_len)
281 return 1;
282 else if (mesh_id_len == 1)
283 return (u8) mesh_id[0];
284 else
285 return (u8) (mesh_id[0] + 2 * mesh_id[1]);
286}
287
288struct mesh_table *mesh_table_alloc(int size_order)
289{
290 int i;
291 struct mesh_table *newtbl;
292
293 newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
294 if (!newtbl)
295 return NULL;
296
297 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
298 (1 << size_order), GFP_KERNEL);
299
300 if (!newtbl->hash_buckets) {
301 kfree(newtbl);
302 return NULL;
303 }
304
305 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
306 (1 << size_order), GFP_KERNEL);
307 if (!newtbl->hashwlock) {
308 kfree(newtbl->hash_buckets);
309 kfree(newtbl);
310 return NULL;
311 }
312
313 newtbl->size_order = size_order;
314 newtbl->hash_mask = (1 << size_order) - 1;
315 atomic_set(&newtbl->entries, 0);
316 get_random_bytes(&newtbl->hash_rnd,
317 sizeof(newtbl->hash_rnd));
318 for (i = 0; i <= newtbl->hash_mask; i++)
319 spin_lock_init(&newtbl->hashwlock[i]);
320
321 return newtbl;
322}
323
bd9b448f
PE
324static void __mesh_table_free(struct mesh_table *tbl)
325{
326 kfree(tbl->hash_buckets);
327 kfree(tbl->hashwlock);
328 kfree(tbl);
329}
330
2e3c8736
LCC
331void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
332{
333 struct hlist_head *mesh_hash;
334 struct hlist_node *p, *q;
335 int i;
336
337 mesh_hash = tbl->hash_buckets;
338 for (i = 0; i <= tbl->hash_mask; i++) {
339 spin_lock(&tbl->hashwlock[i]);
340 hlist_for_each_safe(p, q, &mesh_hash[i]) {
341 tbl->free_node(p, free_leafs);
342 atomic_dec(&tbl->entries);
343 }
344 spin_unlock(&tbl->hashwlock[i]);
345 }
bd9b448f 346 __mesh_table_free(tbl);
2e3c8736
LCC
347}
348
349static void ieee80211_mesh_path_timer(unsigned long data)
350{
351 struct ieee80211_sub_if_data *sdata =
352 (struct ieee80211_sub_if_data *) data;
472dbc45 353 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
133b8226 354 struct ieee80211_local *local = sdata->local;
2e3c8736 355
472dbc45 356 queue_work(local->hw.workqueue, &ifmsh->work);
2e3c8736
LCC
357}
358
359struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
360{
361 struct mesh_table *newtbl;
362 struct hlist_head *oldhash;
4caf86c6 363 struct hlist_node *p, *q;
2e3c8736
LCC
364 int i;
365
366 if (atomic_read(&tbl->entries)
a3538b19 367 < tbl->mean_chain_len * (tbl->hash_mask + 1))
2e3c8736 368 goto endgrow;
2e3c8736
LCC
369
370 newtbl = mesh_table_alloc(tbl->size_order + 1);
a3538b19 371 if (!newtbl)
2e3c8736 372 goto endgrow;
2e3c8736
LCC
373
374 newtbl->free_node = tbl->free_node;
375 newtbl->mean_chain_len = tbl->mean_chain_len;
376 newtbl->copy_node = tbl->copy_node;
377 atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
378
379 oldhash = tbl->hash_buckets;
380 for (i = 0; i <= tbl->hash_mask; i++)
381 hlist_for_each(p, &oldhash[i])
4caf86c6
PE
382 if (tbl->copy_node(p, newtbl) < 0)
383 goto errcopy;
2e3c8736 384
a3538b19 385 return newtbl;
4caf86c6
PE
386
387errcopy:
388 for (i = 0; i <= newtbl->hash_mask; i++) {
389 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
390 tbl->free_node(p, 0);
391 }
667d8af9 392 __mesh_table_free(newtbl);
a3538b19 393endgrow:
4caf86c6 394 return NULL;
2e3c8736 395}
902acc78
JB
396
397/**
398 * ieee80211_new_mesh_header - create a new mesh header
399 * @meshhdr: uninitialized mesh header
400 * @sdata: mesh interface to be used
401 *
402 * Return the header length.
403 */
404int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
405 struct ieee80211_sub_if_data *sdata)
406{
407 meshhdr->flags = 0;
472dbc45
JB
408 meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
409 put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
410 sdata->u.mesh.mesh_seqnum++;
902acc78 411
ef269254 412 return 6;
902acc78
JB
413}
414
472dbc45
JB
415static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
416 struct ieee80211_if_mesh *ifmsh)
417{
418 bool free_plinks;
419
420#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
421 printk(KERN_DEBUG "%s: running mesh housekeeping\n",
422 sdata->dev->name);
423#endif
424
425 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
426 mesh_path_expire(sdata);
427
428 free_plinks = mesh_plink_availables(sdata);
429 if (free_plinks != sdata->u.mesh.accepting_plinks)
430 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
431
432 ifmsh->housekeeping = false;
433 mod_timer(&ifmsh->housekeeping_timer,
434 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
435}
436
437
438void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
439{
440 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
441 struct ieee80211_local *local = sdata->local;
442
443 ifmsh->housekeeping = true;
444 queue_work(local->hw.workqueue, &ifmsh->work);
445 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
446}
447
448void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
449{
450 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
b7413430
JB
451 /*
452 * If the timer fired while we waited for it, it will have
453 * requeued the work. Now the work will be running again
454 * but will not rearm the timer again because it checks
455 * whether the interface is running, which, at this point,
456 * it no longer is.
457 */
458 cancel_work_sync(&sdata->u.mesh.work);
459
472dbc45
JB
460 /*
461 * When we get here, the interface is marked down.
462 * Call synchronize_rcu() to wait for the RX path
463 * should it be using the interface and enqueuing
464 * frames at this very time on another CPU.
465 */
466 synchronize_rcu();
467 skb_queue_purge(&sdata->u.mesh.skb_queue);
468}
469
470static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
471 u16 stype,
472 struct ieee80211_mgmt *mgmt,
473 size_t len,
474 struct ieee80211_rx_status *rx_status)
475{
476 struct ieee80211_local *local= sdata->local;
477 struct ieee802_11_elems elems;
478 struct ieee80211_channel *channel;
479 u64 supp_rates = 0;
480 size_t baselen;
481 int freq;
482 enum ieee80211_band band = rx_status->band;
483
484 /* ignore ProbeResp to foreign address */
485 if (stype == IEEE80211_STYPE_PROBE_RESP &&
486 compare_ether_addr(mgmt->da, sdata->dev->dev_addr))
487 return;
488
489 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
490 if (baselen > len)
491 return;
492
493 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
494 &elems);
495
496 if (elems.ds_params && elems.ds_params_len == 1)
497 freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
498 else
499 freq = rx_status->freq;
500
501 channel = ieee80211_get_channel(local->hw.wiphy, freq);
502
503 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
504 return;
505
506 if (elems.mesh_id && elems.mesh_config &&
507 mesh_matches_local(&elems, sdata)) {
508 supp_rates = ieee80211_sta_get_rates(local, &elems, band);
509
510 mesh_neighbour_update(mgmt->sa, supp_rates, sdata,
511 mesh_peer_accepts_plinks(&elems));
512 }
513}
514
515static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
516 struct ieee80211_mgmt *mgmt,
517 size_t len,
518 struct ieee80211_rx_status *rx_status)
519{
520 switch (mgmt->u.action.category) {
521 case PLINK_CATEGORY:
522 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
523 break;
524 case MESH_PATH_SEL_CATEGORY:
525 mesh_rx_path_sel_frame(sdata, mgmt, len);
526 break;
527 }
528}
529
530static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
531 struct sk_buff *skb)
532{
533 struct ieee80211_rx_status *rx_status;
534 struct ieee80211_if_mesh *ifmsh;
535 struct ieee80211_mgmt *mgmt;
536 u16 stype;
537
538 ifmsh = &sdata->u.mesh;
539
540 rx_status = (struct ieee80211_rx_status *) skb->cb;
541 mgmt = (struct ieee80211_mgmt *) skb->data;
542 stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
543
544 switch (stype) {
545 case IEEE80211_STYPE_PROBE_RESP:
546 case IEEE80211_STYPE_BEACON:
547 ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len,
548 rx_status);
549 break;
550 case IEEE80211_STYPE_ACTION:
551 ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
552 break;
553 }
554
555 kfree_skb(skb);
556}
557
558static void ieee80211_mesh_work(struct work_struct *work)
559{
560 struct ieee80211_sub_if_data *sdata =
561 container_of(work, struct ieee80211_sub_if_data, u.mesh.work);
562 struct ieee80211_local *local = sdata->local;
563 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
564 struct sk_buff *skb;
565
566 if (!netif_running(sdata->dev))
567 return;
568
c2b13452 569 if (local->sw_scanning || local->hw_scanning)
472dbc45
JB
570 return;
571
572 while ((skb = skb_dequeue(&ifmsh->skb_queue)))
573 ieee80211_mesh_rx_queued_mgmt(sdata, skb);
574
575 if (ifmsh->preq_queue_len &&
576 time_after(jiffies,
577 ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
578 mesh_path_start_discovery(sdata);
579
580 if (ifmsh->housekeeping)
581 ieee80211_mesh_housekeeping(sdata, ifmsh);
582}
583
584void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
585{
586 struct ieee80211_sub_if_data *sdata;
587
588 rcu_read_lock();
589 list_for_each_entry_rcu(sdata, &local->interfaces, list)
590 if (ieee80211_vif_is_mesh(&sdata->vif))
591 queue_work(local->hw.workqueue, &sdata->u.mesh.work);
592 rcu_read_unlock();
593}
594
902acc78
JB
595void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
596{
472dbc45
JB
597 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
598
599 INIT_WORK(&ifmsh->work, ieee80211_mesh_work);
600 setup_timer(&ifmsh->housekeeping_timer,
601 ieee80211_mesh_housekeeping_timer,
602 (unsigned long) sdata);
603 skb_queue_head_init(&sdata->u.mesh.skb_queue);
604
605 ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
606 ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
607 ifmsh->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T;
608 ifmsh->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR;
609 ifmsh->mshcfg.dot11MeshTTL = MESH_TTL;
610 ifmsh->mshcfg.auto_open_plinks = true;
611 ifmsh->mshcfg.dot11MeshMaxPeerLinks =
902acc78 612 MESH_MAX_ESTAB_PLINKS;
472dbc45 613 ifmsh->mshcfg.dot11MeshHWMPactivePathTimeout =
902acc78 614 MESH_PATH_TIMEOUT;
472dbc45 615 ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval =
902acc78 616 MESH_PREQ_MIN_INT;
472dbc45 617 ifmsh->mshcfg.dot11MeshHWMPnetDiameterTraversalTime =
902acc78 618 MESH_DIAM_TRAVERSAL_TIME;
472dbc45 619 ifmsh->mshcfg.dot11MeshHWMPmaxPREQretries =
902acc78 620 MESH_MAX_PREQ_RETRIES;
472dbc45 621 ifmsh->mshcfg.path_refresh_time =
902acc78 622 MESH_PATH_REFRESH_TIME;
472dbc45 623 ifmsh->mshcfg.min_discovery_timeout =
902acc78 624 MESH_MIN_DISCOVERY_TIMEOUT;
472dbc45
JB
625 ifmsh->accepting_plinks = true;
626 ifmsh->preq_id = 0;
627 ifmsh->dsn = 0;
628 atomic_set(&ifmsh->mpaths, 0);
f698d856 629 mesh_rmc_init(sdata);
472dbc45 630 ifmsh->last_preq = jiffies;
902acc78
JB
631 /* Allocate all mesh structures when creating the first mesh interface. */
632 if (!mesh_allocated)
633 ieee80211s_init();
472dbc45
JB
634 mesh_ids_set_default(ifmsh);
635 setup_timer(&ifmsh->mesh_path_timer,
902acc78
JB
636 ieee80211_mesh_path_timer,
637 (unsigned long) sdata);
472dbc45
JB
638 INIT_LIST_HEAD(&ifmsh->preq_queue.list);
639 spin_lock_init(&ifmsh->mesh_preq_queue_lock);
640}
641
642ieee80211_rx_result
643ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
644 struct ieee80211_rx_status *rx_status)
645{
646 struct ieee80211_local *local = sdata->local;
647 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
648 struct ieee80211_mgmt *mgmt;
649 u16 fc;
650
651 if (skb->len < 24)
652 return RX_DROP_MONITOR;
653
654 mgmt = (struct ieee80211_mgmt *) skb->data;
655 fc = le16_to_cpu(mgmt->frame_control);
656
657 switch (fc & IEEE80211_FCTL_STYPE) {
658 case IEEE80211_STYPE_PROBE_RESP:
659 case IEEE80211_STYPE_BEACON:
660 case IEEE80211_STYPE_ACTION:
661 memcpy(skb->cb, rx_status, sizeof(*rx_status));
662 skb_queue_tail(&ifmsh->skb_queue, skb);
663 queue_work(local->hw.workqueue, &ifmsh->work);
664 return RX_QUEUED;
665 }
666
667 return RX_CONTINUE;
902acc78 668}
This page took 0.102249 seconds and 5 git commands to generate.