2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
18 #include "ieee80211_i.h"
21 #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
22 #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
24 #define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
27 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
28 #define INIT_PATHS_SIZE_ORDER 2
30 /* Keep the mean chain length below this constant */
31 #define MEAN_CHAIN_LEN 2
33 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
34 time_after(jiffies, mpath->exp_time) && \
35 !(mpath->flags & MESH_PATH_FIXED))
38 struct hlist_node list
;
40 /* This indirection allows two different tables to point to the same
41 * mesh_path structure, useful when resizing
43 struct mesh_path
*mpath
;
46 static struct mesh_table __rcu
*mesh_paths
;
47 static struct mesh_table __rcu
*mpp_paths
; /* Store paths for MPP&MAP */
49 int mesh_paths_generation
;
51 /* This lock will have the grow table function as writer and add / delete nodes
52 * as readers. RCU provides sufficient protection only when reading the table
53 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
54 * the read lock or we risk operating on an old table. The write lock is only
55 * needed when modifying the number of buckets a table.
57 static DEFINE_RWLOCK(pathtbl_resize_lock
);
60 static inline struct mesh_table
*resize_dereference_mesh_paths(void)
62 return rcu_dereference_protected(mesh_paths
,
63 lockdep_is_held(&pathtbl_resize_lock
));
66 static inline struct mesh_table
*resize_dereference_mpp_paths(void)
68 return rcu_dereference_protected(mpp_paths
,
69 lockdep_is_held(&pathtbl_resize_lock
));
72 static int mesh_gate_add(struct mesh_table
*tbl
, struct mesh_path
*mpath
);
75 * CAREFUL -- "tbl" must not be an expression,
76 * in particular not an rcu_dereference(), since
77 * it's used twice. So it is illegal to do
78 * for_each_mesh_entry(rcu_dereference(...), ...)
80 #define for_each_mesh_entry(tbl, p, node, i) \
81 for (i = 0; i <= tbl->hash_mask; i++) \
82 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
85 static struct mesh_table
*mesh_table_alloc(int size_order
)
88 struct mesh_table
*newtbl
;
90 newtbl
= kmalloc(sizeof(struct mesh_table
), GFP_ATOMIC
);
94 newtbl
->hash_buckets
= kzalloc(sizeof(struct hlist_head
) *
95 (1 << size_order
), GFP_ATOMIC
);
97 if (!newtbl
->hash_buckets
) {
102 newtbl
->hashwlock
= kmalloc(sizeof(spinlock_t
) *
103 (1 << size_order
), GFP_ATOMIC
);
104 if (!newtbl
->hashwlock
) {
105 kfree(newtbl
->hash_buckets
);
110 newtbl
->size_order
= size_order
;
111 newtbl
->hash_mask
= (1 << size_order
) - 1;
112 atomic_set(&newtbl
->entries
, 0);
113 get_random_bytes(&newtbl
->hash_rnd
,
114 sizeof(newtbl
->hash_rnd
));
115 for (i
= 0; i
<= newtbl
->hash_mask
; i
++)
116 spin_lock_init(&newtbl
->hashwlock
[i
]);
117 spin_lock_init(&newtbl
->gates_lock
);
122 static void __mesh_table_free(struct mesh_table
*tbl
)
124 kfree(tbl
->hash_buckets
);
125 kfree(tbl
->hashwlock
);
129 static void mesh_table_free(struct mesh_table
*tbl
, bool free_leafs
)
131 struct hlist_head
*mesh_hash
;
132 struct hlist_node
*p
, *q
;
133 struct mpath_node
*gate
;
136 mesh_hash
= tbl
->hash_buckets
;
137 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
138 spin_lock_bh(&tbl
->hashwlock
[i
]);
139 hlist_for_each_safe(p
, q
, &mesh_hash
[i
]) {
140 tbl
->free_node(p
, free_leafs
);
141 atomic_dec(&tbl
->entries
);
143 spin_unlock_bh(&tbl
->hashwlock
[i
]);
146 spin_lock_bh(&tbl
->gates_lock
);
147 hlist_for_each_entry_safe(gate
, p
, q
,
148 tbl
->known_gates
, list
) {
149 hlist_del(&gate
->list
);
152 kfree(tbl
->known_gates
);
153 spin_unlock_bh(&tbl
->gates_lock
);
156 __mesh_table_free(tbl
);
159 static int mesh_table_grow(struct mesh_table
*oldtbl
,
160 struct mesh_table
*newtbl
)
162 struct hlist_head
*oldhash
;
163 struct hlist_node
*p
, *q
;
166 if (atomic_read(&oldtbl
->entries
)
167 < oldtbl
->mean_chain_len
* (oldtbl
->hash_mask
+ 1))
170 newtbl
->free_node
= oldtbl
->free_node
;
171 newtbl
->mean_chain_len
= oldtbl
->mean_chain_len
;
172 newtbl
->copy_node
= oldtbl
->copy_node
;
173 newtbl
->known_gates
= oldtbl
->known_gates
;
174 atomic_set(&newtbl
->entries
, atomic_read(&oldtbl
->entries
));
176 oldhash
= oldtbl
->hash_buckets
;
177 for (i
= 0; i
<= oldtbl
->hash_mask
; i
++)
178 hlist_for_each(p
, &oldhash
[i
])
179 if (oldtbl
->copy_node(p
, newtbl
) < 0)
185 for (i
= 0; i
<= newtbl
->hash_mask
; i
++) {
186 hlist_for_each_safe(p
, q
, &newtbl
->hash_buckets
[i
])
187 oldtbl
->free_node(p
, 0);
192 static u32
mesh_table_hash(u8
*addr
, struct ieee80211_sub_if_data
*sdata
,
193 struct mesh_table
*tbl
)
195 /* Use last four bytes of hw addr and interface index as hash index */
196 return jhash_2words(*(u32
*)(addr
+2), sdata
->dev
->ifindex
, tbl
->hash_rnd
)
203 * mesh_path_assign_nexthop - update mesh path next hop
205 * @mpath: mesh path to update
206 * @sta: next hop to assign
208 * Locking: mpath->state_lock must be held when calling this function
210 void mesh_path_assign_nexthop(struct mesh_path
*mpath
, struct sta_info
*sta
)
213 struct ieee80211_hdr
*hdr
;
214 struct sk_buff_head tmpq
;
217 rcu_assign_pointer(mpath
->next_hop
, sta
);
219 __skb_queue_head_init(&tmpq
);
221 spin_lock_irqsave(&mpath
->frame_queue
.lock
, flags
);
223 while ((skb
= __skb_dequeue(&mpath
->frame_queue
)) != NULL
) {
224 hdr
= (struct ieee80211_hdr
*) skb
->data
;
225 memcpy(hdr
->addr1
, sta
->sta
.addr
, ETH_ALEN
);
226 __skb_queue_tail(&tmpq
, skb
);
229 skb_queue_splice(&tmpq
, &mpath
->frame_queue
);
230 spin_unlock_irqrestore(&mpath
->frame_queue
.lock
, flags
);
233 static void prepare_for_gate(struct sk_buff
*skb
, char *dst_addr
,
234 struct mesh_path
*gate_mpath
)
236 struct ieee80211_hdr
*hdr
;
237 struct ieee80211s_hdr
*mshdr
;
238 int mesh_hdrlen
, hdrlen
;
241 hdr
= (struct ieee80211_hdr
*) skb
->data
;
242 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
243 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
245 if (!(mshdr
->flags
& MESH_FLAGS_AE
)) {
246 /* size of the fixed part of the mesh header */
249 /* make room for the two extended addresses */
250 skb_push(skb
, 2 * ETH_ALEN
);
251 memmove(skb
->data
, hdr
, hdrlen
+ mesh_hdrlen
);
253 hdr
= (struct ieee80211_hdr
*) skb
->data
;
255 /* we preserve the previous mesh header and only add
256 * the new addreses */
257 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
258 mshdr
->flags
= MESH_FLAGS_AE_A5_A6
;
259 memcpy(mshdr
->eaddr1
, hdr
->addr3
, ETH_ALEN
);
260 memcpy(mshdr
->eaddr2
, hdr
->addr4
, ETH_ALEN
);
263 /* update next hop */
264 hdr
= (struct ieee80211_hdr
*) skb
->data
;
266 next_hop
= rcu_dereference(gate_mpath
->next_hop
)->sta
.addr
;
267 memcpy(hdr
->addr1
, next_hop
, ETH_ALEN
);
269 memcpy(hdr
->addr3
, dst_addr
, ETH_ALEN
);
274 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
276 * This function is used to transfer or copy frames from an unresolved mpath to
277 * a gate mpath. The function also adds the Address Extension field and
278 * updates the next hop.
280 * If a frame already has an Address Extension field, only the next hop and
281 * destination addresses are updated.
283 * The gate mpath must be an active mpath with a valid mpath->next_hop.
285 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
286 * @from_mpath: The failed mpath
287 * @copy: When true, copy all the frames to the new mpath queue. When false,
290 static void mesh_path_move_to_queue(struct mesh_path
*gate_mpath
,
291 struct mesh_path
*from_mpath
,
294 struct sk_buff
*skb
, *cp_skb
= NULL
;
295 struct sk_buff_head gateq
, failq
;
299 BUG_ON(gate_mpath
== from_mpath
);
300 BUG_ON(!gate_mpath
->next_hop
);
302 __skb_queue_head_init(&gateq
);
303 __skb_queue_head_init(&failq
);
305 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
306 skb_queue_splice_init(&from_mpath
->frame_queue
, &failq
);
307 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
309 num_skbs
= skb_queue_len(&failq
);
312 skb
= __skb_dequeue(&failq
);
314 cp_skb
= skb_copy(skb
, GFP_ATOMIC
);
316 __skb_queue_tail(&failq
, cp_skb
);
319 prepare_for_gate(skb
, gate_mpath
->dst
, gate_mpath
);
320 __skb_queue_tail(&gateq
, skb
);
323 spin_lock_irqsave(&gate_mpath
->frame_queue
.lock
, flags
);
324 skb_queue_splice(&gateq
, &gate_mpath
->frame_queue
);
325 mpath_dbg("Mpath queue for gate %pM has %d frames\n",
327 skb_queue_len(&gate_mpath
->frame_queue
));
328 spin_unlock_irqrestore(&gate_mpath
->frame_queue
.lock
, flags
);
333 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
334 skb_queue_splice(&failq
, &from_mpath
->frame_queue
);
335 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
339 static struct mesh_path
*path_lookup(struct mesh_table
*tbl
, u8
*dst
,
340 struct ieee80211_sub_if_data
*sdata
)
342 struct mesh_path
*mpath
;
343 struct hlist_node
*n
;
344 struct hlist_head
*bucket
;
345 struct mpath_node
*node
;
347 bucket
= &tbl
->hash_buckets
[mesh_table_hash(dst
, sdata
, tbl
)];
348 hlist_for_each_entry_rcu(node
, n
, bucket
, list
) {
350 if (mpath
->sdata
== sdata
&&
351 memcmp(dst
, mpath
->dst
, ETH_ALEN
) == 0) {
352 if (MPATH_EXPIRED(mpath
)) {
353 spin_lock_bh(&mpath
->state_lock
);
354 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
355 spin_unlock_bh(&mpath
->state_lock
);
364 * mesh_path_lookup - look up a path in the mesh path table
365 * @dst: hardware address (ETH_ALEN length) of destination
366 * @sdata: local subif
368 * Returns: pointer to the mesh path structure, or NULL if not found
370 * Locking: must be called within a read rcu section.
372 struct mesh_path
*mesh_path_lookup(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
374 return path_lookup(rcu_dereference(mesh_paths
), dst
, sdata
);
377 struct mesh_path
*mpp_path_lookup(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
379 return path_lookup(rcu_dereference(mpp_paths
), dst
, sdata
);
384 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
386 * @sdata: local subif, or NULL for all entries
388 * Returns: pointer to the mesh path structure, or NULL if not found.
390 * Locking: must be called within a read rcu section.
392 struct mesh_path
*mesh_path_lookup_by_idx(int idx
, struct ieee80211_sub_if_data
*sdata
)
394 struct mesh_table
*tbl
= rcu_dereference(mesh_paths
);
395 struct mpath_node
*node
;
396 struct hlist_node
*p
;
400 for_each_mesh_entry(tbl
, p
, node
, i
) {
401 if (sdata
&& node
->mpath
->sdata
!= sdata
)
404 if (MPATH_EXPIRED(node
->mpath
)) {
405 spin_lock_bh(&node
->mpath
->state_lock
);
406 node
->mpath
->flags
&= ~MESH_PATH_ACTIVE
;
407 spin_unlock_bh(&node
->mpath
->state_lock
);
416 static void mesh_gate_node_reclaim(struct rcu_head
*rp
)
418 struct mpath_node
*node
= container_of(rp
, struct mpath_node
, rcu
);
423 * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
424 * @mesh_tbl: table which contains known_gates list
425 * @mpath: mpath to known mesh gate
427 * Returns: 0 on success
430 static int mesh_gate_add(struct mesh_table
*tbl
, struct mesh_path
*mpath
)
432 struct mpath_node
*gate
, *new_gate
;
433 struct hlist_node
*n
;
437 tbl
= rcu_dereference(tbl
);
439 hlist_for_each_entry_rcu(gate
, n
, tbl
->known_gates
, list
)
440 if (gate
->mpath
== mpath
) {
445 new_gate
= kzalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
451 mpath
->is_gate
= true;
452 mpath
->sdata
->u
.mesh
.num_gates
++;
453 new_gate
->mpath
= mpath
;
454 spin_lock_bh(&tbl
->gates_lock
);
455 hlist_add_head_rcu(&new_gate
->list
, tbl
->known_gates
);
456 spin_unlock_bh(&tbl
->gates_lock
);
458 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
459 mpath
->sdata
->name
, mpath
->dst
,
460 mpath
->sdata
->u
.mesh
.num_gates
);
468 * mesh_gate_del - remove a mesh gate from the list of known gates
469 * @tbl: table which holds our list of known gates
472 * Returns: 0 on success
474 * Locking: must be called inside rcu_read_lock() section
476 static int mesh_gate_del(struct mesh_table
*tbl
, struct mesh_path
*mpath
)
478 struct mpath_node
*gate
;
479 struct hlist_node
*p
, *q
;
481 tbl
= rcu_dereference(tbl
);
483 hlist_for_each_entry_safe(gate
, p
, q
, tbl
->known_gates
, list
)
484 if (gate
->mpath
== mpath
) {
485 spin_lock_bh(&tbl
->gates_lock
);
486 hlist_del_rcu(&gate
->list
);
487 call_rcu(&gate
->rcu
, mesh_gate_node_reclaim
);
488 spin_unlock_bh(&tbl
->gates_lock
);
489 mpath
->sdata
->u
.mesh
.num_gates
--;
490 mpath
->is_gate
= false;
491 mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
492 "%d known gates\n", mpath
->sdata
->name
,
493 mpath
->dst
, mpath
->sdata
->u
.mesh
.num_gates
);
502 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
503 * @mpath: gate path to add to table
505 int mesh_path_add_gate(struct mesh_path
*mpath
)
507 return mesh_gate_add(mesh_paths
, mpath
);
511 * mesh_gate_num - number of gates known to this interface
514 int mesh_gate_num(struct ieee80211_sub_if_data
*sdata
)
516 return sdata
->u
.mesh
.num_gates
;
520 * mesh_path_add - allocate and add a new path to the mesh path table
521 * @addr: destination address of the path (ETH_ALEN length)
522 * @sdata: local subif
524 * Returns: 0 on success
526 * State: the initial state of the new path is set to 0
528 int mesh_path_add(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
530 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
531 struct ieee80211_local
*local
= sdata
->local
;
532 struct mesh_table
*tbl
;
533 struct mesh_path
*mpath
, *new_mpath
;
534 struct mpath_node
*node
, *new_node
;
535 struct hlist_head
*bucket
;
536 struct hlist_node
*n
;
541 if (memcmp(dst
, sdata
->vif
.addr
, ETH_ALEN
) == 0)
542 /* never add ourselves as neighbours */
545 if (is_multicast_ether_addr(dst
))
548 if (atomic_add_unless(&sdata
->u
.mesh
.mpaths
, 1, MESH_MAX_MPATHS
) == 0)
552 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
556 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
560 read_lock_bh(&pathtbl_resize_lock
);
561 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
562 new_mpath
->sdata
= sdata
;
563 new_mpath
->flags
= 0;
564 skb_queue_head_init(&new_mpath
->frame_queue
);
565 new_node
->mpath
= new_mpath
;
566 new_mpath
->timer
.data
= (unsigned long) new_mpath
;
567 new_mpath
->timer
.function
= mesh_path_timer
;
568 new_mpath
->exp_time
= jiffies
;
569 spin_lock_init(&new_mpath
->state_lock
);
570 init_timer(&new_mpath
->timer
);
572 tbl
= resize_dereference_mesh_paths();
574 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
575 bucket
= &tbl
->hash_buckets
[hash_idx
];
577 spin_lock_bh(&tbl
->hashwlock
[hash_idx
]);
580 hlist_for_each_entry(node
, n
, bucket
, list
) {
582 if (mpath
->sdata
== sdata
&& memcmp(dst
, mpath
->dst
, ETH_ALEN
) == 0)
586 hlist_add_head_rcu(&new_node
->list
, bucket
);
587 if (atomic_inc_return(&tbl
->entries
) >=
588 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
591 mesh_paths_generation
++;
593 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
594 read_unlock_bh(&pathtbl_resize_lock
);
596 set_bit(MESH_WORK_GROW_MPATH_TABLE
, &ifmsh
->wrkq_flags
);
597 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
602 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
603 read_unlock_bh(&pathtbl_resize_lock
);
608 atomic_dec(&sdata
->u
.mesh
.mpaths
);
612 static void mesh_table_free_rcu(struct rcu_head
*rcu
)
614 struct mesh_table
*tbl
= container_of(rcu
, struct mesh_table
, rcu_head
);
616 mesh_table_free(tbl
, false);
619 void mesh_mpath_table_grow(void)
621 struct mesh_table
*oldtbl
, *newtbl
;
623 write_lock_bh(&pathtbl_resize_lock
);
624 oldtbl
= resize_dereference_mesh_paths();
625 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
628 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
629 __mesh_table_free(newtbl
);
632 rcu_assign_pointer(mesh_paths
, newtbl
);
634 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
637 write_unlock_bh(&pathtbl_resize_lock
);
640 void mesh_mpp_table_grow(void)
642 struct mesh_table
*oldtbl
, *newtbl
;
644 write_lock_bh(&pathtbl_resize_lock
);
645 oldtbl
= resize_dereference_mpp_paths();
646 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
649 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
650 __mesh_table_free(newtbl
);
653 rcu_assign_pointer(mpp_paths
, newtbl
);
654 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
657 write_unlock_bh(&pathtbl_resize_lock
);
660 int mpp_path_add(u8
*dst
, u8
*mpp
, struct ieee80211_sub_if_data
*sdata
)
662 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
663 struct ieee80211_local
*local
= sdata
->local
;
664 struct mesh_table
*tbl
;
665 struct mesh_path
*mpath
, *new_mpath
;
666 struct mpath_node
*node
, *new_node
;
667 struct hlist_head
*bucket
;
668 struct hlist_node
*n
;
673 if (memcmp(dst
, sdata
->vif
.addr
, ETH_ALEN
) == 0)
674 /* never add ourselves as neighbours */
677 if (is_multicast_ether_addr(dst
))
681 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
685 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
689 read_lock_bh(&pathtbl_resize_lock
);
690 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
691 memcpy(new_mpath
->mpp
, mpp
, ETH_ALEN
);
692 new_mpath
->sdata
= sdata
;
693 new_mpath
->flags
= 0;
694 skb_queue_head_init(&new_mpath
->frame_queue
);
695 new_node
->mpath
= new_mpath
;
696 init_timer(&new_mpath
->timer
);
697 new_mpath
->exp_time
= jiffies
;
698 spin_lock_init(&new_mpath
->state_lock
);
700 tbl
= resize_dereference_mpp_paths();
702 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
703 bucket
= &tbl
->hash_buckets
[hash_idx
];
705 spin_lock_bh(&tbl
->hashwlock
[hash_idx
]);
708 hlist_for_each_entry(node
, n
, bucket
, list
) {
710 if (mpath
->sdata
== sdata
&& memcmp(dst
, mpath
->dst
, ETH_ALEN
) == 0)
714 hlist_add_head_rcu(&new_node
->list
, bucket
);
715 if (atomic_inc_return(&tbl
->entries
) >=
716 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
719 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
720 read_unlock_bh(&pathtbl_resize_lock
);
722 set_bit(MESH_WORK_GROW_MPP_TABLE
, &ifmsh
->wrkq_flags
);
723 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
728 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
729 read_unlock_bh(&pathtbl_resize_lock
);
739 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
741 * @sta: broken peer link
743 * This function must be called from the rate control algorithm if enough
744 * delivery errors suggest that a peer link is no longer usable.
746 void mesh_plink_broken(struct sta_info
*sta
)
748 struct mesh_table
*tbl
;
749 static const u8 bcast
[ETH_ALEN
] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
750 struct mesh_path
*mpath
;
751 struct mpath_node
*node
;
752 struct hlist_node
*p
;
753 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
755 __le16 reason
= cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE
);
758 tbl
= rcu_dereference(mesh_paths
);
759 for_each_mesh_entry(tbl
, p
, node
, i
) {
761 if (rcu_dereference(mpath
->next_hop
) == sta
&&
762 mpath
->flags
& MESH_PATH_ACTIVE
&&
763 !(mpath
->flags
& MESH_PATH_FIXED
)) {
764 spin_lock_bh(&mpath
->state_lock
);
765 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
767 spin_unlock_bh(&mpath
->state_lock
);
768 mesh_path_error_tx(sdata
->u
.mesh
.mshcfg
.element_ttl
,
769 mpath
->dst
, cpu_to_le32(mpath
->sn
),
770 reason
, bcast
, sdata
);
776 static void mesh_path_node_reclaim(struct rcu_head
*rp
)
778 struct mpath_node
*node
= container_of(rp
, struct mpath_node
, rcu
);
779 struct ieee80211_sub_if_data
*sdata
= node
->mpath
->sdata
;
781 del_timer_sync(&node
->mpath
->timer
);
782 atomic_dec(&sdata
->u
.mesh
.mpaths
);
787 /* needs to be called with the corresponding hashwlock taken */
788 static void __mesh_path_del(struct mesh_table
*tbl
, struct mpath_node
*node
)
790 struct mesh_path
*mpath
;
792 spin_lock(&mpath
->state_lock
);
793 mpath
->flags
|= MESH_PATH_RESOLVING
;
795 mesh_gate_del(tbl
, mpath
);
796 hlist_del_rcu(&node
->list
);
797 call_rcu(&node
->rcu
, mesh_path_node_reclaim
);
798 spin_unlock(&mpath
->state_lock
);
799 atomic_dec(&tbl
->entries
);
803 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
805 * @sta - mesh peer to match
807 * RCU notes: this function is called when a mesh plink transitions from
808 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
809 * allows path creation. This will happen before the sta can be freed (because
810 * sta_info_destroy() calls this) so any reader in a rcu read block will be
811 * protected against the plink disappearing.
813 void mesh_path_flush_by_nexthop(struct sta_info
*sta
)
815 struct mesh_table
*tbl
;
816 struct mesh_path
*mpath
;
817 struct mpath_node
*node
;
818 struct hlist_node
*p
;
822 read_lock_bh(&pathtbl_resize_lock
);
823 tbl
= resize_dereference_mesh_paths();
824 for_each_mesh_entry(tbl
, p
, node
, i
) {
826 if (rcu_dereference(mpath
->next_hop
) == sta
) {
827 spin_lock_bh(&tbl
->hashwlock
[i
]);
828 __mesh_path_del(tbl
, node
);
829 spin_unlock_bh(&tbl
->hashwlock
[i
]);
832 read_unlock_bh(&pathtbl_resize_lock
);
836 static void table_flush_by_iface(struct mesh_table
*tbl
,
837 struct ieee80211_sub_if_data
*sdata
)
839 struct mesh_path
*mpath
;
840 struct mpath_node
*node
;
841 struct hlist_node
*p
;
844 WARN_ON(!rcu_read_lock_held());
845 for_each_mesh_entry(tbl
, p
, node
, i
) {
847 if (mpath
->sdata
!= sdata
)
849 spin_lock_bh(&tbl
->hashwlock
[i
]);
850 __mesh_path_del(tbl
, node
);
851 spin_unlock_bh(&tbl
->hashwlock
[i
]);
856 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
858 * This function deletes both mesh paths as well as mesh portal paths.
860 * @sdata - interface data to match
863 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data
*sdata
)
865 struct mesh_table
*tbl
;
868 read_lock_bh(&pathtbl_resize_lock
);
869 tbl
= resize_dereference_mesh_paths();
870 table_flush_by_iface(tbl
, sdata
);
871 tbl
= resize_dereference_mpp_paths();
872 table_flush_by_iface(tbl
, sdata
);
873 read_unlock_bh(&pathtbl_resize_lock
);
878 * mesh_path_del - delete a mesh path from the table
880 * @addr: dst address (ETH_ALEN length)
881 * @sdata: local subif
883 * Returns: 0 if successful
885 int mesh_path_del(u8
*addr
, struct ieee80211_sub_if_data
*sdata
)
887 struct mesh_table
*tbl
;
888 struct mesh_path
*mpath
;
889 struct mpath_node
*node
;
890 struct hlist_head
*bucket
;
891 struct hlist_node
*n
;
895 read_lock_bh(&pathtbl_resize_lock
);
896 tbl
= resize_dereference_mesh_paths();
897 hash_idx
= mesh_table_hash(addr
, sdata
, tbl
);
898 bucket
= &tbl
->hash_buckets
[hash_idx
];
900 spin_lock_bh(&tbl
->hashwlock
[hash_idx
]);
901 hlist_for_each_entry(node
, n
, bucket
, list
) {
903 if (mpath
->sdata
== sdata
&&
904 memcmp(addr
, mpath
->dst
, ETH_ALEN
) == 0) {
905 __mesh_path_del(tbl
, node
);
912 mesh_paths_generation
++;
913 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
914 read_unlock_bh(&pathtbl_resize_lock
);
919 * mesh_path_tx_pending - sends pending frames in a mesh path queue
921 * @mpath: mesh path to activate
923 * Locking: the state_lock of the mpath structure must NOT be held when calling
926 void mesh_path_tx_pending(struct mesh_path
*mpath
)
928 if (mpath
->flags
& MESH_PATH_ACTIVE
)
929 ieee80211_add_pending_skbs(mpath
->sdata
->local
,
930 &mpath
->frame_queue
);
934 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
936 * @mpath: mesh path whose queue will be emptied
938 * If there is only one gate, the frames are transferred from the failed mpath
939 * queue to that gate's queue. If there are more than one gates, the frames
940 * are copied from each gate to the next. After frames are copied, the
941 * mpath queues are emptied onto the transmission queue.
943 int mesh_path_send_to_gates(struct mesh_path
*mpath
)
945 struct ieee80211_sub_if_data
*sdata
= mpath
->sdata
;
946 struct hlist_node
*n
;
947 struct mesh_table
*tbl
;
948 struct mesh_path
*from_mpath
= mpath
;
949 struct mpath_node
*gate
= NULL
;
951 struct hlist_head
*known_gates
;
954 tbl
= rcu_dereference(mesh_paths
);
955 known_gates
= tbl
->known_gates
;
959 return -EHOSTUNREACH
;
961 hlist_for_each_entry_rcu(gate
, n
, known_gates
, list
) {
962 if (gate
->mpath
->sdata
!= sdata
)
965 if (gate
->mpath
->flags
& MESH_PATH_ACTIVE
) {
966 mpath_dbg("Forwarding to %pM\n", gate
->mpath
->dst
);
967 mesh_path_move_to_queue(gate
->mpath
, from_mpath
, copy
);
968 from_mpath
= gate
->mpath
;
971 mpath_dbg("Not forwarding %p\n", gate
->mpath
);
972 mpath_dbg("flags %x\n", gate
->mpath
->flags
);
976 hlist_for_each_entry_rcu(gate
, n
, known_gates
, list
)
977 if (gate
->mpath
->sdata
== sdata
) {
978 mpath_dbg("Sending to %pM\n", gate
->mpath
->dst
);
979 mesh_path_tx_pending(gate
->mpath
);
982 return (from_mpath
== mpath
) ? -EHOSTUNREACH
: 0;
986 * mesh_path_discard_frame - discard a frame whose path could not be resolved
988 * @skb: frame to discard
989 * @sdata: network subif the frame was to be sent through
991 * If the frame was being forwarded from another MP, a PERR frame will be sent
992 * to the precursor. The precursor's address (i.e. the previous hop) was saved
993 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
994 * the destination is successfully resolved.
996 * Locking: the function must me called within a rcu_read_lock region
998 void mesh_path_discard_frame(struct sk_buff
*skb
,
999 struct ieee80211_sub_if_data
*sdata
)
1001 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*) skb
->data
;
1002 struct mesh_path
*mpath
;
1004 __le16 reason
= cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD
);
1006 if (memcmp(hdr
->addr4
, sdata
->vif
.addr
, ETH_ALEN
) != 0) {
1012 mpath
= mesh_path_lookup(da
, sdata
);
1014 spin_lock_bh(&mpath
->state_lock
);
1016 spin_unlock_bh(&mpath
->state_lock
);
1019 mesh_path_error_tx(sdata
->u
.mesh
.mshcfg
.element_ttl
, skb
->data
,
1020 cpu_to_le32(sn
), reason
, ra
, sdata
);
1024 sdata
->u
.mesh
.mshstats
.dropped_frames_no_route
++;
1028 * mesh_path_flush_pending - free the pending queue of a mesh path
1030 * @mpath: mesh path whose queue has to be freed
1032 * Locking: the function must me called within a rcu_read_lock region
1034 void mesh_path_flush_pending(struct mesh_path
*mpath
)
1036 struct sk_buff
*skb
;
1038 while ((skb
= skb_dequeue(&mpath
->frame_queue
)) != NULL
)
1039 mesh_path_discard_frame(skb
, mpath
->sdata
);
1043 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1045 * @mpath: the mesh path to modify
1046 * @next_hop: the next hop to force
1048 * Locking: this function must be called holding mpath->state_lock
1050 void mesh_path_fix_nexthop(struct mesh_path
*mpath
, struct sta_info
*next_hop
)
1052 spin_lock_bh(&mpath
->state_lock
);
1053 mesh_path_assign_nexthop(mpath
, next_hop
);
1056 mpath
->hop_count
= 0;
1057 mpath
->exp_time
= 0;
1058 mpath
->flags
|= MESH_PATH_FIXED
;
1059 mesh_path_activate(mpath
);
1060 spin_unlock_bh(&mpath
->state_lock
);
1061 mesh_path_tx_pending(mpath
);
1064 static void mesh_path_node_free(struct hlist_node
*p
, bool free_leafs
)
1066 struct mesh_path
*mpath
;
1067 struct mpath_node
*node
= hlist_entry(p
, struct mpath_node
, list
);
1068 mpath
= node
->mpath
;
1071 del_timer_sync(&mpath
->timer
);
1077 static int mesh_path_node_copy(struct hlist_node
*p
, struct mesh_table
*newtbl
)
1079 struct mesh_path
*mpath
;
1080 struct mpath_node
*node
, *new_node
;
1083 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
1084 if (new_node
== NULL
)
1087 node
= hlist_entry(p
, struct mpath_node
, list
);
1088 mpath
= node
->mpath
;
1089 new_node
->mpath
= mpath
;
1090 hash_idx
= mesh_table_hash(mpath
->dst
, mpath
->sdata
, newtbl
);
1091 hlist_add_head(&new_node
->list
,
1092 &newtbl
->hash_buckets
[hash_idx
]);
1096 int mesh_pathtbl_init(void)
1098 struct mesh_table
*tbl_path
, *tbl_mpp
;
1101 tbl_path
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
1104 tbl_path
->free_node
= &mesh_path_node_free
;
1105 tbl_path
->copy_node
= &mesh_path_node_copy
;
1106 tbl_path
->mean_chain_len
= MEAN_CHAIN_LEN
;
1107 tbl_path
->known_gates
= kzalloc(sizeof(struct hlist_head
), GFP_ATOMIC
);
1108 if (!tbl_path
->known_gates
) {
1112 INIT_HLIST_HEAD(tbl_path
->known_gates
);
1115 tbl_mpp
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
1120 tbl_mpp
->free_node
= &mesh_path_node_free
;
1121 tbl_mpp
->copy_node
= &mesh_path_node_copy
;
1122 tbl_mpp
->mean_chain_len
= MEAN_CHAIN_LEN
;
1123 tbl_mpp
->known_gates
= kzalloc(sizeof(struct hlist_head
), GFP_ATOMIC
);
1124 if (!tbl_mpp
->known_gates
) {
1128 INIT_HLIST_HEAD(tbl_mpp
->known_gates
);
1130 /* Need no locking since this is during init */
1131 RCU_INIT_POINTER(mesh_paths
, tbl_path
);
1132 RCU_INIT_POINTER(mpp_paths
, tbl_mpp
);
1137 mesh_table_free(tbl_mpp
, true);
1139 mesh_table_free(tbl_path
, true);
1143 void mesh_path_expire(struct ieee80211_sub_if_data
*sdata
)
1145 struct mesh_table
*tbl
;
1146 struct mesh_path
*mpath
;
1147 struct mpath_node
*node
;
1148 struct hlist_node
*p
;
1152 tbl
= rcu_dereference(mesh_paths
);
1153 for_each_mesh_entry(tbl
, p
, node
, i
) {
1154 if (node
->mpath
->sdata
!= sdata
)
1156 mpath
= node
->mpath
;
1157 if ((!(mpath
->flags
& MESH_PATH_RESOLVING
)) &&
1158 (!(mpath
->flags
& MESH_PATH_FIXED
)) &&
1159 time_after(jiffies
, mpath
->exp_time
+ MESH_PATH_EXPIRE
))
1160 mesh_path_del(mpath
->dst
, mpath
->sdata
);
1165 void mesh_pathtbl_unregister(void)
1167 /* no need for locking during exit path */
1168 mesh_table_free(rcu_dereference_protected(mesh_paths
, 1), true);
1169 mesh_table_free(rcu_dereference_protected(mpp_paths
, 1), true);