mac80211: simplify mesh frame queue mapping and QoS
[deliverable/linux.git] / net / mac80211 / mesh_pathtbl.c
1 /*
2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "wme.h"
18 #include "ieee80211_i.h"
19 #include "mesh.h"
20
21 #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
22 #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
23 #else
24 #define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
25 #endif
26
27 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
28 #define INIT_PATHS_SIZE_ORDER 2
29
30 /* Keep the mean chain length below this constant */
31 #define MEAN_CHAIN_LEN 2
32
33 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
34 time_after(jiffies, mpath->exp_time) && \
35 !(mpath->flags & MESH_PATH_FIXED))
36
37 struct mpath_node {
38 struct hlist_node list;
39 struct rcu_head rcu;
40 /* This indirection allows two different tables to point to the same
41 * mesh_path structure, useful when resizing
42 */
43 struct mesh_path *mpath;
44 };
45
46 static struct mesh_table __rcu *mesh_paths;
47 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
48
49 int mesh_paths_generation;
50
51 /* This lock will have the grow table function as writer and add / delete nodes
52 * as readers. RCU provides sufficient protection only when reading the table
53 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
54 * the read lock or we risk operating on an old table. The write lock is only
55 * needed when modifying the number of buckets a table.
56 */
57 static DEFINE_RWLOCK(pathtbl_resize_lock);
58
59
60 static inline struct mesh_table *resize_dereference_mesh_paths(void)
61 {
62 return rcu_dereference_protected(mesh_paths,
63 lockdep_is_held(&pathtbl_resize_lock));
64 }
65
66 static inline struct mesh_table *resize_dereference_mpp_paths(void)
67 {
68 return rcu_dereference_protected(mpp_paths,
69 lockdep_is_held(&pathtbl_resize_lock));
70 }
71
72 static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
73
74 /*
75 * CAREFUL -- "tbl" must not be an expression,
76 * in particular not an rcu_dereference(), since
77 * it's used twice. So it is illegal to do
78 * for_each_mesh_entry(rcu_dereference(...), ...)
79 */
80 #define for_each_mesh_entry(tbl, p, node, i) \
81 for (i = 0; i <= tbl->hash_mask; i++) \
82 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
83
84
85 static struct mesh_table *mesh_table_alloc(int size_order)
86 {
87 int i;
88 struct mesh_table *newtbl;
89
90 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
91 if (!newtbl)
92 return NULL;
93
94 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
95 (1 << size_order), GFP_ATOMIC);
96
97 if (!newtbl->hash_buckets) {
98 kfree(newtbl);
99 return NULL;
100 }
101
102 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
103 (1 << size_order), GFP_ATOMIC);
104 if (!newtbl->hashwlock) {
105 kfree(newtbl->hash_buckets);
106 kfree(newtbl);
107 return NULL;
108 }
109
110 newtbl->size_order = size_order;
111 newtbl->hash_mask = (1 << size_order) - 1;
112 atomic_set(&newtbl->entries, 0);
113 get_random_bytes(&newtbl->hash_rnd,
114 sizeof(newtbl->hash_rnd));
115 for (i = 0; i <= newtbl->hash_mask; i++)
116 spin_lock_init(&newtbl->hashwlock[i]);
117 spin_lock_init(&newtbl->gates_lock);
118
119 return newtbl;
120 }
121
122 static void __mesh_table_free(struct mesh_table *tbl)
123 {
124 kfree(tbl->hash_buckets);
125 kfree(tbl->hashwlock);
126 kfree(tbl);
127 }
128
129 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
130 {
131 struct hlist_head *mesh_hash;
132 struct hlist_node *p, *q;
133 struct mpath_node *gate;
134 int i;
135
136 mesh_hash = tbl->hash_buckets;
137 for (i = 0; i <= tbl->hash_mask; i++) {
138 spin_lock_bh(&tbl->hashwlock[i]);
139 hlist_for_each_safe(p, q, &mesh_hash[i]) {
140 tbl->free_node(p, free_leafs);
141 atomic_dec(&tbl->entries);
142 }
143 spin_unlock_bh(&tbl->hashwlock[i]);
144 }
145 if (free_leafs) {
146 spin_lock_bh(&tbl->gates_lock);
147 hlist_for_each_entry_safe(gate, p, q,
148 tbl->known_gates, list) {
149 hlist_del(&gate->list);
150 kfree(gate);
151 }
152 kfree(tbl->known_gates);
153 spin_unlock_bh(&tbl->gates_lock);
154 }
155
156 __mesh_table_free(tbl);
157 }
158
159 static int mesh_table_grow(struct mesh_table *oldtbl,
160 struct mesh_table *newtbl)
161 {
162 struct hlist_head *oldhash;
163 struct hlist_node *p, *q;
164 int i;
165
166 if (atomic_read(&oldtbl->entries)
167 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
168 return -EAGAIN;
169
170 newtbl->free_node = oldtbl->free_node;
171 newtbl->mean_chain_len = oldtbl->mean_chain_len;
172 newtbl->copy_node = oldtbl->copy_node;
173 newtbl->known_gates = oldtbl->known_gates;
174 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
175
176 oldhash = oldtbl->hash_buckets;
177 for (i = 0; i <= oldtbl->hash_mask; i++)
178 hlist_for_each(p, &oldhash[i])
179 if (oldtbl->copy_node(p, newtbl) < 0)
180 goto errcopy;
181
182 return 0;
183
184 errcopy:
185 for (i = 0; i <= newtbl->hash_mask; i++) {
186 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
187 oldtbl->free_node(p, 0);
188 }
189 return -ENOMEM;
190 }
191
192 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
193 struct mesh_table *tbl)
194 {
195 /* Use last four bytes of hw addr and interface index as hash index */
196 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
197 & tbl->hash_mask;
198 }
199
200
201 /**
202 *
203 * mesh_path_assign_nexthop - update mesh path next hop
204 *
205 * @mpath: mesh path to update
206 * @sta: next hop to assign
207 *
208 * Locking: mpath->state_lock must be held when calling this function
209 */
210 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
211 {
212 struct sk_buff *skb;
213 struct ieee80211_hdr *hdr;
214 struct sk_buff_head tmpq;
215 unsigned long flags;
216
217 rcu_assign_pointer(mpath->next_hop, sta);
218
219 __skb_queue_head_init(&tmpq);
220
221 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
222
223 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
224 hdr = (struct ieee80211_hdr *) skb->data;
225 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
226 __skb_queue_tail(&tmpq, skb);
227 }
228
229 skb_queue_splice(&tmpq, &mpath->frame_queue);
230 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
231 }
232
233 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
234 struct mesh_path *gate_mpath)
235 {
236 struct ieee80211_hdr *hdr;
237 struct ieee80211s_hdr *mshdr;
238 int mesh_hdrlen, hdrlen;
239 char *next_hop;
240
241 hdr = (struct ieee80211_hdr *) skb->data;
242 hdrlen = ieee80211_hdrlen(hdr->frame_control);
243 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
244
245 if (!(mshdr->flags & MESH_FLAGS_AE)) {
246 /* size of the fixed part of the mesh header */
247 mesh_hdrlen = 6;
248
249 /* make room for the two extended addresses */
250 skb_push(skb, 2 * ETH_ALEN);
251 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
252
253 hdr = (struct ieee80211_hdr *) skb->data;
254
255 /* we preserve the previous mesh header and only add
256 * the new addreses */
257 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
258 mshdr->flags = MESH_FLAGS_AE_A5_A6;
259 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
260 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
261 }
262
263 /* update next hop */
264 hdr = (struct ieee80211_hdr *) skb->data;
265 rcu_read_lock();
266 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
267 memcpy(hdr->addr1, next_hop, ETH_ALEN);
268 rcu_read_unlock();
269 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
270 }
271
272 /**
273 *
274 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
275 *
276 * This function is used to transfer or copy frames from an unresolved mpath to
277 * a gate mpath. The function also adds the Address Extension field and
278 * updates the next hop.
279 *
280 * If a frame already has an Address Extension field, only the next hop and
281 * destination addresses are updated.
282 *
283 * The gate mpath must be an active mpath with a valid mpath->next_hop.
284 *
285 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
286 * @from_mpath: The failed mpath
287 * @copy: When true, copy all the frames to the new mpath queue. When false,
288 * move them.
289 */
290 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
291 struct mesh_path *from_mpath,
292 bool copy)
293 {
294 struct sk_buff *skb, *cp_skb = NULL;
295 struct sk_buff_head gateq, failq;
296 unsigned long flags;
297 int num_skbs;
298
299 BUG_ON(gate_mpath == from_mpath);
300 BUG_ON(!gate_mpath->next_hop);
301
302 __skb_queue_head_init(&gateq);
303 __skb_queue_head_init(&failq);
304
305 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
306 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
307 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
308
309 num_skbs = skb_queue_len(&failq);
310
311 while (num_skbs--) {
312 skb = __skb_dequeue(&failq);
313 if (copy) {
314 cp_skb = skb_copy(skb, GFP_ATOMIC);
315 if (cp_skb)
316 __skb_queue_tail(&failq, cp_skb);
317 }
318
319 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
320 __skb_queue_tail(&gateq, skb);
321 }
322
323 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
324 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
325 mpath_dbg("Mpath queue for gate %pM has %d frames\n",
326 gate_mpath->dst,
327 skb_queue_len(&gate_mpath->frame_queue));
328 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
329
330 if (!copy)
331 return;
332
333 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
334 skb_queue_splice(&failq, &from_mpath->frame_queue);
335 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
336 }
337
338
339 static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
340 struct ieee80211_sub_if_data *sdata)
341 {
342 struct mesh_path *mpath;
343 struct hlist_node *n;
344 struct hlist_head *bucket;
345 struct mpath_node *node;
346
347 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
348 hlist_for_each_entry_rcu(node, n, bucket, list) {
349 mpath = node->mpath;
350 if (mpath->sdata == sdata &&
351 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
352 if (MPATH_EXPIRED(mpath)) {
353 spin_lock_bh(&mpath->state_lock);
354 mpath->flags &= ~MESH_PATH_ACTIVE;
355 spin_unlock_bh(&mpath->state_lock);
356 }
357 return mpath;
358 }
359 }
360 return NULL;
361 }
362
363 /**
364 * mesh_path_lookup - look up a path in the mesh path table
365 * @dst: hardware address (ETH_ALEN length) of destination
366 * @sdata: local subif
367 *
368 * Returns: pointer to the mesh path structure, or NULL if not found
369 *
370 * Locking: must be called within a read rcu section.
371 */
372 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
373 {
374 return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
375 }
376
377 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
378 {
379 return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
380 }
381
382
383 /**
384 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
385 * @idx: index
386 * @sdata: local subif, or NULL for all entries
387 *
388 * Returns: pointer to the mesh path structure, or NULL if not found.
389 *
390 * Locking: must be called within a read rcu section.
391 */
392 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
393 {
394 struct mesh_table *tbl = rcu_dereference(mesh_paths);
395 struct mpath_node *node;
396 struct hlist_node *p;
397 int i;
398 int j = 0;
399
400 for_each_mesh_entry(tbl, p, node, i) {
401 if (sdata && node->mpath->sdata != sdata)
402 continue;
403 if (j++ == idx) {
404 if (MPATH_EXPIRED(node->mpath)) {
405 spin_lock_bh(&node->mpath->state_lock);
406 node->mpath->flags &= ~MESH_PATH_ACTIVE;
407 spin_unlock_bh(&node->mpath->state_lock);
408 }
409 return node->mpath;
410 }
411 }
412
413 return NULL;
414 }
415
416 static void mesh_gate_node_reclaim(struct rcu_head *rp)
417 {
418 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
419 kfree(node);
420 }
421
422 /**
423 * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
424 * @mesh_tbl: table which contains known_gates list
425 * @mpath: mpath to known mesh gate
426 *
427 * Returns: 0 on success
428 *
429 */
430 static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
431 {
432 struct mpath_node *gate, *new_gate;
433 struct hlist_node *n;
434 int err;
435
436 rcu_read_lock();
437 tbl = rcu_dereference(tbl);
438
439 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
440 if (gate->mpath == mpath) {
441 err = -EEXIST;
442 goto err_rcu;
443 }
444
445 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
446 if (!new_gate) {
447 err = -ENOMEM;
448 goto err_rcu;
449 }
450
451 mpath->is_gate = true;
452 mpath->sdata->u.mesh.num_gates++;
453 new_gate->mpath = mpath;
454 spin_lock_bh(&tbl->gates_lock);
455 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
456 spin_unlock_bh(&tbl->gates_lock);
457 rcu_read_unlock();
458 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
459 mpath->sdata->name, mpath->dst,
460 mpath->sdata->u.mesh.num_gates);
461 return 0;
462 err_rcu:
463 rcu_read_unlock();
464 return err;
465 }
466
467 /**
468 * mesh_gate_del - remove a mesh gate from the list of known gates
469 * @tbl: table which holds our list of known gates
470 * @mpath: gate mpath
471 *
472 * Returns: 0 on success
473 *
474 * Locking: must be called inside rcu_read_lock() section
475 */
476 static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
477 {
478 struct mpath_node *gate;
479 struct hlist_node *p, *q;
480
481 tbl = rcu_dereference(tbl);
482
483 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
484 if (gate->mpath == mpath) {
485 spin_lock_bh(&tbl->gates_lock);
486 hlist_del_rcu(&gate->list);
487 call_rcu(&gate->rcu, mesh_gate_node_reclaim);
488 spin_unlock_bh(&tbl->gates_lock);
489 mpath->sdata->u.mesh.num_gates--;
490 mpath->is_gate = false;
491 mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
492 "%d known gates\n", mpath->sdata->name,
493 mpath->dst, mpath->sdata->u.mesh.num_gates);
494 break;
495 }
496
497 return 0;
498 }
499
500 /**
501 *
502 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
503 * @mpath: gate path to add to table
504 */
505 int mesh_path_add_gate(struct mesh_path *mpath)
506 {
507 return mesh_gate_add(mesh_paths, mpath);
508 }
509
510 /**
511 * mesh_gate_num - number of gates known to this interface
512 * @sdata: subif data
513 */
514 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
515 {
516 return sdata->u.mesh.num_gates;
517 }
518
519 /**
520 * mesh_path_add - allocate and add a new path to the mesh path table
521 * @addr: destination address of the path (ETH_ALEN length)
522 * @sdata: local subif
523 *
524 * Returns: 0 on success
525 *
526 * State: the initial state of the new path is set to 0
527 */
528 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
529 {
530 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
531 struct ieee80211_local *local = sdata->local;
532 struct mesh_table *tbl;
533 struct mesh_path *mpath, *new_mpath;
534 struct mpath_node *node, *new_node;
535 struct hlist_head *bucket;
536 struct hlist_node *n;
537 int grow = 0;
538 int err = 0;
539 u32 hash_idx;
540
541 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
542 /* never add ourselves as neighbours */
543 return -ENOTSUPP;
544
545 if (is_multicast_ether_addr(dst))
546 return -ENOTSUPP;
547
548 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
549 return -ENOSPC;
550
551 err = -ENOMEM;
552 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
553 if (!new_mpath)
554 goto err_path_alloc;
555
556 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
557 if (!new_node)
558 goto err_node_alloc;
559
560 read_lock_bh(&pathtbl_resize_lock);
561 memcpy(new_mpath->dst, dst, ETH_ALEN);
562 new_mpath->sdata = sdata;
563 new_mpath->flags = 0;
564 skb_queue_head_init(&new_mpath->frame_queue);
565 new_node->mpath = new_mpath;
566 new_mpath->timer.data = (unsigned long) new_mpath;
567 new_mpath->timer.function = mesh_path_timer;
568 new_mpath->exp_time = jiffies;
569 spin_lock_init(&new_mpath->state_lock);
570 init_timer(&new_mpath->timer);
571
572 tbl = resize_dereference_mesh_paths();
573
574 hash_idx = mesh_table_hash(dst, sdata, tbl);
575 bucket = &tbl->hash_buckets[hash_idx];
576
577 spin_lock_bh(&tbl->hashwlock[hash_idx]);
578
579 err = -EEXIST;
580 hlist_for_each_entry(node, n, bucket, list) {
581 mpath = node->mpath;
582 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
583 goto err_exists;
584 }
585
586 hlist_add_head_rcu(&new_node->list, bucket);
587 if (atomic_inc_return(&tbl->entries) >=
588 tbl->mean_chain_len * (tbl->hash_mask + 1))
589 grow = 1;
590
591 mesh_paths_generation++;
592
593 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
594 read_unlock_bh(&pathtbl_resize_lock);
595 if (grow) {
596 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
597 ieee80211_queue_work(&local->hw, &sdata->work);
598 }
599 return 0;
600
601 err_exists:
602 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
603 read_unlock_bh(&pathtbl_resize_lock);
604 kfree(new_node);
605 err_node_alloc:
606 kfree(new_mpath);
607 err_path_alloc:
608 atomic_dec(&sdata->u.mesh.mpaths);
609 return err;
610 }
611
612 static void mesh_table_free_rcu(struct rcu_head *rcu)
613 {
614 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
615
616 mesh_table_free(tbl, false);
617 }
618
619 void mesh_mpath_table_grow(void)
620 {
621 struct mesh_table *oldtbl, *newtbl;
622
623 write_lock_bh(&pathtbl_resize_lock);
624 oldtbl = resize_dereference_mesh_paths();
625 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
626 if (!newtbl)
627 goto out;
628 if (mesh_table_grow(oldtbl, newtbl) < 0) {
629 __mesh_table_free(newtbl);
630 goto out;
631 }
632 rcu_assign_pointer(mesh_paths, newtbl);
633
634 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
635
636 out:
637 write_unlock_bh(&pathtbl_resize_lock);
638 }
639
640 void mesh_mpp_table_grow(void)
641 {
642 struct mesh_table *oldtbl, *newtbl;
643
644 write_lock_bh(&pathtbl_resize_lock);
645 oldtbl = resize_dereference_mpp_paths();
646 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
647 if (!newtbl)
648 goto out;
649 if (mesh_table_grow(oldtbl, newtbl) < 0) {
650 __mesh_table_free(newtbl);
651 goto out;
652 }
653 rcu_assign_pointer(mpp_paths, newtbl);
654 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
655
656 out:
657 write_unlock_bh(&pathtbl_resize_lock);
658 }
659
660 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
661 {
662 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
663 struct ieee80211_local *local = sdata->local;
664 struct mesh_table *tbl;
665 struct mesh_path *mpath, *new_mpath;
666 struct mpath_node *node, *new_node;
667 struct hlist_head *bucket;
668 struct hlist_node *n;
669 int grow = 0;
670 int err = 0;
671 u32 hash_idx;
672
673 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
674 /* never add ourselves as neighbours */
675 return -ENOTSUPP;
676
677 if (is_multicast_ether_addr(dst))
678 return -ENOTSUPP;
679
680 err = -ENOMEM;
681 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
682 if (!new_mpath)
683 goto err_path_alloc;
684
685 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
686 if (!new_node)
687 goto err_node_alloc;
688
689 read_lock_bh(&pathtbl_resize_lock);
690 memcpy(new_mpath->dst, dst, ETH_ALEN);
691 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
692 new_mpath->sdata = sdata;
693 new_mpath->flags = 0;
694 skb_queue_head_init(&new_mpath->frame_queue);
695 new_node->mpath = new_mpath;
696 init_timer(&new_mpath->timer);
697 new_mpath->exp_time = jiffies;
698 spin_lock_init(&new_mpath->state_lock);
699
700 tbl = resize_dereference_mpp_paths();
701
702 hash_idx = mesh_table_hash(dst, sdata, tbl);
703 bucket = &tbl->hash_buckets[hash_idx];
704
705 spin_lock_bh(&tbl->hashwlock[hash_idx]);
706
707 err = -EEXIST;
708 hlist_for_each_entry(node, n, bucket, list) {
709 mpath = node->mpath;
710 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
711 goto err_exists;
712 }
713
714 hlist_add_head_rcu(&new_node->list, bucket);
715 if (atomic_inc_return(&tbl->entries) >=
716 tbl->mean_chain_len * (tbl->hash_mask + 1))
717 grow = 1;
718
719 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
720 read_unlock_bh(&pathtbl_resize_lock);
721 if (grow) {
722 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
723 ieee80211_queue_work(&local->hw, &sdata->work);
724 }
725 return 0;
726
727 err_exists:
728 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
729 read_unlock_bh(&pathtbl_resize_lock);
730 kfree(new_node);
731 err_node_alloc:
732 kfree(new_mpath);
733 err_path_alloc:
734 return err;
735 }
736
737
738 /**
739 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
740 *
741 * @sta: broken peer link
742 *
743 * This function must be called from the rate control algorithm if enough
744 * delivery errors suggest that a peer link is no longer usable.
745 */
746 void mesh_plink_broken(struct sta_info *sta)
747 {
748 struct mesh_table *tbl;
749 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
750 struct mesh_path *mpath;
751 struct mpath_node *node;
752 struct hlist_node *p;
753 struct ieee80211_sub_if_data *sdata = sta->sdata;
754 int i;
755 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
756
757 rcu_read_lock();
758 tbl = rcu_dereference(mesh_paths);
759 for_each_mesh_entry(tbl, p, node, i) {
760 mpath = node->mpath;
761 if (rcu_dereference(mpath->next_hop) == sta &&
762 mpath->flags & MESH_PATH_ACTIVE &&
763 !(mpath->flags & MESH_PATH_FIXED)) {
764 spin_lock_bh(&mpath->state_lock);
765 mpath->flags &= ~MESH_PATH_ACTIVE;
766 ++mpath->sn;
767 spin_unlock_bh(&mpath->state_lock);
768 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
769 mpath->dst, cpu_to_le32(mpath->sn),
770 reason, bcast, sdata);
771 }
772 }
773 rcu_read_unlock();
774 }
775
776 static void mesh_path_node_reclaim(struct rcu_head *rp)
777 {
778 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
779 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
780
781 del_timer_sync(&node->mpath->timer);
782 atomic_dec(&sdata->u.mesh.mpaths);
783 kfree(node->mpath);
784 kfree(node);
785 }
786
787 /* needs to be called with the corresponding hashwlock taken */
788 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
789 {
790 struct mesh_path *mpath;
791 mpath = node->mpath;
792 spin_lock(&mpath->state_lock);
793 mpath->flags |= MESH_PATH_RESOLVING;
794 if (mpath->is_gate)
795 mesh_gate_del(tbl, mpath);
796 hlist_del_rcu(&node->list);
797 call_rcu(&node->rcu, mesh_path_node_reclaim);
798 spin_unlock(&mpath->state_lock);
799 atomic_dec(&tbl->entries);
800 }
801
802 /**
803 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
804 *
805 * @sta - mesh peer to match
806 *
807 * RCU notes: this function is called when a mesh plink transitions from
808 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
809 * allows path creation. This will happen before the sta can be freed (because
810 * sta_info_destroy() calls this) so any reader in a rcu read block will be
811 * protected against the plink disappearing.
812 */
813 void mesh_path_flush_by_nexthop(struct sta_info *sta)
814 {
815 struct mesh_table *tbl;
816 struct mesh_path *mpath;
817 struct mpath_node *node;
818 struct hlist_node *p;
819 int i;
820
821 rcu_read_lock();
822 read_lock_bh(&pathtbl_resize_lock);
823 tbl = resize_dereference_mesh_paths();
824 for_each_mesh_entry(tbl, p, node, i) {
825 mpath = node->mpath;
826 if (rcu_dereference(mpath->next_hop) == sta) {
827 spin_lock_bh(&tbl->hashwlock[i]);
828 __mesh_path_del(tbl, node);
829 spin_unlock_bh(&tbl->hashwlock[i]);
830 }
831 }
832 read_unlock_bh(&pathtbl_resize_lock);
833 rcu_read_unlock();
834 }
835
836 static void table_flush_by_iface(struct mesh_table *tbl,
837 struct ieee80211_sub_if_data *sdata)
838 {
839 struct mesh_path *mpath;
840 struct mpath_node *node;
841 struct hlist_node *p;
842 int i;
843
844 WARN_ON(!rcu_read_lock_held());
845 for_each_mesh_entry(tbl, p, node, i) {
846 mpath = node->mpath;
847 if (mpath->sdata != sdata)
848 continue;
849 spin_lock_bh(&tbl->hashwlock[i]);
850 __mesh_path_del(tbl, node);
851 spin_unlock_bh(&tbl->hashwlock[i]);
852 }
853 }
854
855 /**
856 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
857 *
858 * This function deletes both mesh paths as well as mesh portal paths.
859 *
860 * @sdata - interface data to match
861 *
862 */
863 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
864 {
865 struct mesh_table *tbl;
866
867 rcu_read_lock();
868 read_lock_bh(&pathtbl_resize_lock);
869 tbl = resize_dereference_mesh_paths();
870 table_flush_by_iface(tbl, sdata);
871 tbl = resize_dereference_mpp_paths();
872 table_flush_by_iface(tbl, sdata);
873 read_unlock_bh(&pathtbl_resize_lock);
874 rcu_read_unlock();
875 }
876
877 /**
878 * mesh_path_del - delete a mesh path from the table
879 *
880 * @addr: dst address (ETH_ALEN length)
881 * @sdata: local subif
882 *
883 * Returns: 0 if successful
884 */
885 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
886 {
887 struct mesh_table *tbl;
888 struct mesh_path *mpath;
889 struct mpath_node *node;
890 struct hlist_head *bucket;
891 struct hlist_node *n;
892 int hash_idx;
893 int err = 0;
894
895 read_lock_bh(&pathtbl_resize_lock);
896 tbl = resize_dereference_mesh_paths();
897 hash_idx = mesh_table_hash(addr, sdata, tbl);
898 bucket = &tbl->hash_buckets[hash_idx];
899
900 spin_lock_bh(&tbl->hashwlock[hash_idx]);
901 hlist_for_each_entry(node, n, bucket, list) {
902 mpath = node->mpath;
903 if (mpath->sdata == sdata &&
904 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
905 __mesh_path_del(tbl, node);
906 goto enddel;
907 }
908 }
909
910 err = -ENXIO;
911 enddel:
912 mesh_paths_generation++;
913 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
914 read_unlock_bh(&pathtbl_resize_lock);
915 return err;
916 }
917
918 /**
919 * mesh_path_tx_pending - sends pending frames in a mesh path queue
920 *
921 * @mpath: mesh path to activate
922 *
923 * Locking: the state_lock of the mpath structure must NOT be held when calling
924 * this function.
925 */
926 void mesh_path_tx_pending(struct mesh_path *mpath)
927 {
928 if (mpath->flags & MESH_PATH_ACTIVE)
929 ieee80211_add_pending_skbs(mpath->sdata->local,
930 &mpath->frame_queue);
931 }
932
933 /**
934 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
935 *
936 * @mpath: mesh path whose queue will be emptied
937 *
938 * If there is only one gate, the frames are transferred from the failed mpath
939 * queue to that gate's queue. If there are more than one gates, the frames
940 * are copied from each gate to the next. After frames are copied, the
941 * mpath queues are emptied onto the transmission queue.
942 */
943 int mesh_path_send_to_gates(struct mesh_path *mpath)
944 {
945 struct ieee80211_sub_if_data *sdata = mpath->sdata;
946 struct hlist_node *n;
947 struct mesh_table *tbl;
948 struct mesh_path *from_mpath = mpath;
949 struct mpath_node *gate = NULL;
950 bool copy = false;
951 struct hlist_head *known_gates;
952
953 rcu_read_lock();
954 tbl = rcu_dereference(mesh_paths);
955 known_gates = tbl->known_gates;
956 rcu_read_unlock();
957
958 if (!known_gates)
959 return -EHOSTUNREACH;
960
961 hlist_for_each_entry_rcu(gate, n, known_gates, list) {
962 if (gate->mpath->sdata != sdata)
963 continue;
964
965 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
966 mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
967 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
968 from_mpath = gate->mpath;
969 copy = true;
970 } else {
971 mpath_dbg("Not forwarding %p\n", gate->mpath);
972 mpath_dbg("flags %x\n", gate->mpath->flags);
973 }
974 }
975
976 hlist_for_each_entry_rcu(gate, n, known_gates, list)
977 if (gate->mpath->sdata == sdata) {
978 mpath_dbg("Sending to %pM\n", gate->mpath->dst);
979 mesh_path_tx_pending(gate->mpath);
980 }
981
982 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
983 }
984
985 /**
986 * mesh_path_discard_frame - discard a frame whose path could not be resolved
987 *
988 * @skb: frame to discard
989 * @sdata: network subif the frame was to be sent through
990 *
991 * If the frame was being forwarded from another MP, a PERR frame will be sent
992 * to the precursor. The precursor's address (i.e. the previous hop) was saved
993 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
994 * the destination is successfully resolved.
995 *
996 * Locking: the function must me called within a rcu_read_lock region
997 */
998 void mesh_path_discard_frame(struct sk_buff *skb,
999 struct ieee80211_sub_if_data *sdata)
1000 {
1001 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1002 struct mesh_path *mpath;
1003 u32 sn = 0;
1004 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
1005
1006 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
1007 u8 *ra, *da;
1008
1009 da = hdr->addr3;
1010 ra = hdr->addr1;
1011 rcu_read_lock();
1012 mpath = mesh_path_lookup(da, sdata);
1013 if (mpath) {
1014 spin_lock_bh(&mpath->state_lock);
1015 sn = ++mpath->sn;
1016 spin_unlock_bh(&mpath->state_lock);
1017 }
1018 rcu_read_unlock();
1019 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
1020 cpu_to_le32(sn), reason, ra, sdata);
1021 }
1022
1023 kfree_skb(skb);
1024 sdata->u.mesh.mshstats.dropped_frames_no_route++;
1025 }
1026
1027 /**
1028 * mesh_path_flush_pending - free the pending queue of a mesh path
1029 *
1030 * @mpath: mesh path whose queue has to be freed
1031 *
1032 * Locking: the function must me called within a rcu_read_lock region
1033 */
1034 void mesh_path_flush_pending(struct mesh_path *mpath)
1035 {
1036 struct sk_buff *skb;
1037
1038 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
1039 mesh_path_discard_frame(skb, mpath->sdata);
1040 }
1041
1042 /**
1043 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1044 *
1045 * @mpath: the mesh path to modify
1046 * @next_hop: the next hop to force
1047 *
1048 * Locking: this function must be called holding mpath->state_lock
1049 */
1050 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1051 {
1052 spin_lock_bh(&mpath->state_lock);
1053 mesh_path_assign_nexthop(mpath, next_hop);
1054 mpath->sn = 0xffff;
1055 mpath->metric = 0;
1056 mpath->hop_count = 0;
1057 mpath->exp_time = 0;
1058 mpath->flags |= MESH_PATH_FIXED;
1059 mesh_path_activate(mpath);
1060 spin_unlock_bh(&mpath->state_lock);
1061 mesh_path_tx_pending(mpath);
1062 }
1063
1064 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1065 {
1066 struct mesh_path *mpath;
1067 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1068 mpath = node->mpath;
1069 hlist_del_rcu(p);
1070 if (free_leafs) {
1071 del_timer_sync(&mpath->timer);
1072 kfree(mpath);
1073 }
1074 kfree(node);
1075 }
1076
1077 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1078 {
1079 struct mesh_path *mpath;
1080 struct mpath_node *node, *new_node;
1081 u32 hash_idx;
1082
1083 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1084 if (new_node == NULL)
1085 return -ENOMEM;
1086
1087 node = hlist_entry(p, struct mpath_node, list);
1088 mpath = node->mpath;
1089 new_node->mpath = mpath;
1090 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1091 hlist_add_head(&new_node->list,
1092 &newtbl->hash_buckets[hash_idx]);
1093 return 0;
1094 }
1095
1096 int mesh_pathtbl_init(void)
1097 {
1098 struct mesh_table *tbl_path, *tbl_mpp;
1099 int ret;
1100
1101 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1102 if (!tbl_path)
1103 return -ENOMEM;
1104 tbl_path->free_node = &mesh_path_node_free;
1105 tbl_path->copy_node = &mesh_path_node_copy;
1106 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1107 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1108 if (!tbl_path->known_gates) {
1109 ret = -ENOMEM;
1110 goto free_path;
1111 }
1112 INIT_HLIST_HEAD(tbl_path->known_gates);
1113
1114
1115 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1116 if (!tbl_mpp) {
1117 ret = -ENOMEM;
1118 goto free_path;
1119 }
1120 tbl_mpp->free_node = &mesh_path_node_free;
1121 tbl_mpp->copy_node = &mesh_path_node_copy;
1122 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1123 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1124 if (!tbl_mpp->known_gates) {
1125 ret = -ENOMEM;
1126 goto free_mpp;
1127 }
1128 INIT_HLIST_HEAD(tbl_mpp->known_gates);
1129
1130 /* Need no locking since this is during init */
1131 RCU_INIT_POINTER(mesh_paths, tbl_path);
1132 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1133
1134 return 0;
1135
1136 free_mpp:
1137 mesh_table_free(tbl_mpp, true);
1138 free_path:
1139 mesh_table_free(tbl_path, true);
1140 return ret;
1141 }
1142
1143 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1144 {
1145 struct mesh_table *tbl;
1146 struct mesh_path *mpath;
1147 struct mpath_node *node;
1148 struct hlist_node *p;
1149 int i;
1150
1151 rcu_read_lock();
1152 tbl = rcu_dereference(mesh_paths);
1153 for_each_mesh_entry(tbl, p, node, i) {
1154 if (node->mpath->sdata != sdata)
1155 continue;
1156 mpath = node->mpath;
1157 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1158 (!(mpath->flags & MESH_PATH_FIXED)) &&
1159 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1160 mesh_path_del(mpath->dst, mpath->sdata);
1161 }
1162 rcu_read_unlock();
1163 }
1164
1165 void mesh_pathtbl_unregister(void)
1166 {
1167 /* no need for locking during exit path */
1168 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1169 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1170 }
This page took 0.078995 seconds and 5 git commands to generate.