2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "ieee80211_i.h"
20 #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
21 #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
23 #define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
26 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
27 #define INIT_PATHS_SIZE_ORDER 2
29 /* Keep the mean chain length below this constant */
30 #define MEAN_CHAIN_LEN 2
32 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
33 time_after(jiffies, mpath->exp_time) && \
34 !(mpath->flags & MESH_PATH_FIXED))
37 struct hlist_node list
;
39 /* This indirection allows two different tables to point to the same
40 * mesh_path structure, useful when resizing
42 struct mesh_path
*mpath
;
45 static struct mesh_table __rcu
*mesh_paths
;
46 static struct mesh_table __rcu
*mpp_paths
; /* Store paths for MPP&MAP */
48 int mesh_paths_generation
;
50 /* This lock will have the grow table function as writer and add / delete nodes
51 * as readers. When reading the table (i.e. doing lookups) we are well protected
54 static DEFINE_RWLOCK(pathtbl_resize_lock
);
57 static inline struct mesh_table
*resize_dereference_mesh_paths(void)
59 return rcu_dereference_protected(mesh_paths
,
60 lockdep_is_held(&pathtbl_resize_lock
));
63 static inline struct mesh_table
*resize_dereference_mpp_paths(void)
65 return rcu_dereference_protected(mpp_paths
,
66 lockdep_is_held(&pathtbl_resize_lock
));
70 * CAREFUL -- "tbl" must not be an expression,
71 * in particular not an rcu_dereference(), since
72 * it's used twice. So it is illegal to do
73 * for_each_mesh_entry(rcu_dereference(...), ...)
75 #define for_each_mesh_entry(tbl, p, node, i) \
76 for (i = 0; i <= tbl->hash_mask; i++) \
77 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
80 static struct mesh_table
*mesh_table_alloc(int size_order
)
83 struct mesh_table
*newtbl
;
85 newtbl
= kmalloc(sizeof(struct mesh_table
), GFP_ATOMIC
);
89 newtbl
->hash_buckets
= kzalloc(sizeof(struct hlist_head
) *
90 (1 << size_order
), GFP_ATOMIC
);
92 if (!newtbl
->hash_buckets
) {
97 newtbl
->hashwlock
= kmalloc(sizeof(spinlock_t
) *
98 (1 << size_order
), GFP_ATOMIC
);
99 if (!newtbl
->hashwlock
) {
100 kfree(newtbl
->hash_buckets
);
105 newtbl
->size_order
= size_order
;
106 newtbl
->hash_mask
= (1 << size_order
) - 1;
107 atomic_set(&newtbl
->entries
, 0);
108 get_random_bytes(&newtbl
->hash_rnd
,
109 sizeof(newtbl
->hash_rnd
));
110 for (i
= 0; i
<= newtbl
->hash_mask
; i
++)
111 spin_lock_init(&newtbl
->hashwlock
[i
]);
116 static void __mesh_table_free(struct mesh_table
*tbl
)
118 kfree(tbl
->hash_buckets
);
119 kfree(tbl
->hashwlock
);
123 static void mesh_table_free(struct mesh_table
*tbl
, bool free_leafs
)
125 struct hlist_head
*mesh_hash
;
126 struct hlist_node
*p
, *q
;
129 mesh_hash
= tbl
->hash_buckets
;
130 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
131 spin_lock_bh(&tbl
->hashwlock
[i
]);
132 hlist_for_each_safe(p
, q
, &mesh_hash
[i
]) {
133 tbl
->free_node(p
, free_leafs
);
134 atomic_dec(&tbl
->entries
);
136 spin_unlock_bh(&tbl
->hashwlock
[i
]);
138 __mesh_table_free(tbl
);
141 static int mesh_table_grow(struct mesh_table
*oldtbl
,
142 struct mesh_table
*newtbl
)
144 struct hlist_head
*oldhash
;
145 struct hlist_node
*p
, *q
;
148 if (atomic_read(&oldtbl
->entries
)
149 < oldtbl
->mean_chain_len
* (oldtbl
->hash_mask
+ 1))
152 newtbl
->free_node
= oldtbl
->free_node
;
153 newtbl
->mean_chain_len
= oldtbl
->mean_chain_len
;
154 newtbl
->copy_node
= oldtbl
->copy_node
;
155 atomic_set(&newtbl
->entries
, atomic_read(&oldtbl
->entries
));
157 oldhash
= oldtbl
->hash_buckets
;
158 for (i
= 0; i
<= oldtbl
->hash_mask
; i
++)
159 hlist_for_each(p
, &oldhash
[i
])
160 if (oldtbl
->copy_node(p
, newtbl
) < 0)
166 for (i
= 0; i
<= newtbl
->hash_mask
; i
++) {
167 hlist_for_each_safe(p
, q
, &newtbl
->hash_buckets
[i
])
168 oldtbl
->free_node(p
, 0);
173 static u32
mesh_table_hash(u8
*addr
, struct ieee80211_sub_if_data
*sdata
,
174 struct mesh_table
*tbl
)
176 /* Use last four bytes of hw addr and interface index as hash index */
177 return jhash_2words(*(u32
*)(addr
+2), sdata
->dev
->ifindex
, tbl
->hash_rnd
)
184 * mesh_path_assign_nexthop - update mesh path next hop
186 * @mpath: mesh path to update
187 * @sta: next hop to assign
189 * Locking: mpath->state_lock must be held when calling this function
191 void mesh_path_assign_nexthop(struct mesh_path
*mpath
, struct sta_info
*sta
)
194 struct ieee80211_hdr
*hdr
;
195 struct sk_buff_head tmpq
;
198 rcu_assign_pointer(mpath
->next_hop
, sta
);
200 __skb_queue_head_init(&tmpq
);
202 spin_lock_irqsave(&mpath
->frame_queue
.lock
, flags
);
204 while ((skb
= __skb_dequeue(&mpath
->frame_queue
)) != NULL
) {
205 hdr
= (struct ieee80211_hdr
*) skb
->data
;
206 memcpy(hdr
->addr1
, sta
->sta
.addr
, ETH_ALEN
);
207 __skb_queue_tail(&tmpq
, skb
);
210 skb_queue_splice(&tmpq
, &mpath
->frame_queue
);
211 spin_unlock_irqrestore(&mpath
->frame_queue
.lock
, flags
);
216 * mesh_path_lookup - look up a path in the mesh path table
217 * @dst: hardware address (ETH_ALEN length) of destination
218 * @sdata: local subif
220 * Returns: pointer to the mesh path structure, or NULL if not found
222 * Locking: must be called within a read rcu section.
224 struct mesh_path
*mesh_path_lookup(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
226 struct mesh_path
*mpath
;
227 struct hlist_node
*n
;
228 struct hlist_head
*bucket
;
229 struct mesh_table
*tbl
;
230 struct mpath_node
*node
;
232 tbl
= rcu_dereference(mesh_paths
);
234 bucket
= &tbl
->hash_buckets
[mesh_table_hash(dst
, sdata
, tbl
)];
235 hlist_for_each_entry_rcu(node
, n
, bucket
, list
) {
237 if (mpath
->sdata
== sdata
&&
238 memcmp(dst
, mpath
->dst
, ETH_ALEN
) == 0) {
239 if (MPATH_EXPIRED(mpath
)) {
240 spin_lock_bh(&mpath
->state_lock
);
241 if (MPATH_EXPIRED(mpath
))
242 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
243 spin_unlock_bh(&mpath
->state_lock
);
251 struct mesh_path
*mpp_path_lookup(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
253 struct mesh_path
*mpath
;
254 struct hlist_node
*n
;
255 struct hlist_head
*bucket
;
256 struct mesh_table
*tbl
;
257 struct mpath_node
*node
;
259 tbl
= rcu_dereference(mpp_paths
);
261 bucket
= &tbl
->hash_buckets
[mesh_table_hash(dst
, sdata
, tbl
)];
262 hlist_for_each_entry_rcu(node
, n
, bucket
, list
) {
264 if (mpath
->sdata
== sdata
&&
265 memcmp(dst
, mpath
->dst
, ETH_ALEN
) == 0) {
266 if (MPATH_EXPIRED(mpath
)) {
267 spin_lock_bh(&mpath
->state_lock
);
268 if (MPATH_EXPIRED(mpath
))
269 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
270 spin_unlock_bh(&mpath
->state_lock
);
280 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
282 * @sdata: local subif, or NULL for all entries
284 * Returns: pointer to the mesh path structure, or NULL if not found.
286 * Locking: must be called within a read rcu section.
288 struct mesh_path
*mesh_path_lookup_by_idx(int idx
, struct ieee80211_sub_if_data
*sdata
)
290 struct mesh_table
*tbl
= rcu_dereference(mesh_paths
);
291 struct mpath_node
*node
;
292 struct hlist_node
*p
;
296 for_each_mesh_entry(tbl
, p
, node
, i
) {
297 if (sdata
&& node
->mpath
->sdata
!= sdata
)
300 if (MPATH_EXPIRED(node
->mpath
)) {
301 spin_lock_bh(&node
->mpath
->state_lock
);
302 if (MPATH_EXPIRED(node
->mpath
))
303 node
->mpath
->flags
&= ~MESH_PATH_ACTIVE
;
304 spin_unlock_bh(&node
->mpath
->state_lock
);
314 * mesh_path_add - allocate and add a new path to the mesh path table
315 * @addr: destination address of the path (ETH_ALEN length)
316 * @sdata: local subif
318 * Returns: 0 on success
320 * State: the initial state of the new path is set to 0
322 int mesh_path_add(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
324 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
325 struct ieee80211_local
*local
= sdata
->local
;
326 struct mesh_table
*tbl
;
327 struct mesh_path
*mpath
, *new_mpath
;
328 struct mpath_node
*node
, *new_node
;
329 struct hlist_head
*bucket
;
330 struct hlist_node
*n
;
335 if (memcmp(dst
, sdata
->vif
.addr
, ETH_ALEN
) == 0)
336 /* never add ourselves as neighbours */
339 if (is_multicast_ether_addr(dst
))
342 if (atomic_add_unless(&sdata
->u
.mesh
.mpaths
, 1, MESH_MAX_MPATHS
) == 0)
346 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
350 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
354 read_lock_bh(&pathtbl_resize_lock
);
355 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
356 new_mpath
->sdata
= sdata
;
357 new_mpath
->flags
= 0;
358 skb_queue_head_init(&new_mpath
->frame_queue
);
359 new_node
->mpath
= new_mpath
;
360 new_mpath
->timer
.data
= (unsigned long) new_mpath
;
361 new_mpath
->timer
.function
= mesh_path_timer
;
362 new_mpath
->exp_time
= jiffies
;
363 spin_lock_init(&new_mpath
->state_lock
);
364 init_timer(&new_mpath
->timer
);
366 tbl
= resize_dereference_mesh_paths();
368 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
369 bucket
= &tbl
->hash_buckets
[hash_idx
];
371 spin_lock_bh(&tbl
->hashwlock
[hash_idx
]);
374 hlist_for_each_entry(node
, n
, bucket
, list
) {
376 if (mpath
->sdata
== sdata
&& memcmp(dst
, mpath
->dst
, ETH_ALEN
) == 0)
380 hlist_add_head_rcu(&new_node
->list
, bucket
);
381 if (atomic_inc_return(&tbl
->entries
) >=
382 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
385 mesh_paths_generation
++;
387 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
388 read_unlock_bh(&pathtbl_resize_lock
);
390 set_bit(MESH_WORK_GROW_MPATH_TABLE
, &ifmsh
->wrkq_flags
);
391 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
396 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
397 read_unlock_bh(&pathtbl_resize_lock
);
402 atomic_dec(&sdata
->u
.mesh
.mpaths
);
406 static void mesh_table_free_rcu(struct rcu_head
*rcu
)
408 struct mesh_table
*tbl
= container_of(rcu
, struct mesh_table
, rcu_head
);
410 mesh_table_free(tbl
, false);
413 void mesh_mpath_table_grow(void)
415 struct mesh_table
*oldtbl
, *newtbl
;
417 write_lock_bh(&pathtbl_resize_lock
);
418 oldtbl
= resize_dereference_mesh_paths();
419 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
422 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
423 __mesh_table_free(newtbl
);
426 rcu_assign_pointer(mesh_paths
, newtbl
);
428 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
431 write_unlock_bh(&pathtbl_resize_lock
);
434 void mesh_mpp_table_grow(void)
436 struct mesh_table
*oldtbl
, *newtbl
;
438 write_lock_bh(&pathtbl_resize_lock
);
439 oldtbl
= resize_dereference_mpp_paths();
440 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
443 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
444 __mesh_table_free(newtbl
);
447 rcu_assign_pointer(mpp_paths
, newtbl
);
448 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
451 write_unlock_bh(&pathtbl_resize_lock
);
454 int mpp_path_add(u8
*dst
, u8
*mpp
, struct ieee80211_sub_if_data
*sdata
)
456 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
457 struct ieee80211_local
*local
= sdata
->local
;
458 struct mesh_table
*tbl
;
459 struct mesh_path
*mpath
, *new_mpath
;
460 struct mpath_node
*node
, *new_node
;
461 struct hlist_head
*bucket
;
462 struct hlist_node
*n
;
467 if (memcmp(dst
, sdata
->vif
.addr
, ETH_ALEN
) == 0)
468 /* never add ourselves as neighbours */
471 if (is_multicast_ether_addr(dst
))
475 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
479 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
483 read_lock_bh(&pathtbl_resize_lock
);
484 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
485 memcpy(new_mpath
->mpp
, mpp
, ETH_ALEN
);
486 new_mpath
->sdata
= sdata
;
487 new_mpath
->flags
= 0;
488 skb_queue_head_init(&new_mpath
->frame_queue
);
489 new_node
->mpath
= new_mpath
;
490 new_mpath
->exp_time
= jiffies
;
491 spin_lock_init(&new_mpath
->state_lock
);
493 tbl
= resize_dereference_mpp_paths();
495 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
496 bucket
= &tbl
->hash_buckets
[hash_idx
];
498 spin_lock_bh(&tbl
->hashwlock
[hash_idx
]);
501 hlist_for_each_entry(node
, n
, bucket
, list
) {
503 if (mpath
->sdata
== sdata
&& memcmp(dst
, mpath
->dst
, ETH_ALEN
) == 0)
507 hlist_add_head_rcu(&new_node
->list
, bucket
);
508 if (atomic_inc_return(&tbl
->entries
) >=
509 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
512 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
513 read_unlock_bh(&pathtbl_resize_lock
);
515 set_bit(MESH_WORK_GROW_MPP_TABLE
, &ifmsh
->wrkq_flags
);
516 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
521 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
522 read_unlock_bh(&pathtbl_resize_lock
);
532 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
534 * @sta: broken peer link
536 * This function must be called from the rate control algorithm if enough
537 * delivery errors suggest that a peer link is no longer usable.
539 void mesh_plink_broken(struct sta_info
*sta
)
541 struct mesh_table
*tbl
;
542 static const u8 bcast
[ETH_ALEN
] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
543 struct mesh_path
*mpath
;
544 struct mpath_node
*node
;
545 struct hlist_node
*p
;
546 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
548 __le16 reason
= cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE
);
551 tbl
= rcu_dereference(mesh_paths
);
552 for_each_mesh_entry(tbl
, p
, node
, i
) {
554 spin_lock_bh(&mpath
->state_lock
);
555 if (rcu_dereference(mpath
->next_hop
) == sta
&&
556 mpath
->flags
& MESH_PATH_ACTIVE
&&
557 !(mpath
->flags
& MESH_PATH_FIXED
)) {
558 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
560 spin_unlock_bh(&mpath
->state_lock
);
561 mesh_path_error_tx(sdata
->u
.mesh
.mshcfg
.element_ttl
,
562 mpath
->dst
, cpu_to_le32(mpath
->sn
),
563 reason
, bcast
, sdata
);
565 spin_unlock_bh(&mpath
->state_lock
);
571 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
573 * @sta - mesh peer to match
575 * RCU notes: this function is called when a mesh plink transitions from
576 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
577 * allows path creation. This will happen before the sta can be freed (because
578 * sta_info_destroy() calls this) so any reader in a rcu read block will be
579 * protected against the plink disappearing.
581 void mesh_path_flush_by_nexthop(struct sta_info
*sta
)
583 struct mesh_table
*tbl
;
584 struct mesh_path
*mpath
;
585 struct mpath_node
*node
;
586 struct hlist_node
*p
;
590 tbl
= rcu_dereference(mesh_paths
);
591 for_each_mesh_entry(tbl
, p
, node
, i
) {
593 if (rcu_dereference(mpath
->next_hop
) == sta
)
594 mesh_path_del(mpath
->dst
, mpath
->sdata
);
599 void mesh_path_flush(struct ieee80211_sub_if_data
*sdata
)
601 struct mesh_table
*tbl
;
602 struct mesh_path
*mpath
;
603 struct mpath_node
*node
;
604 struct hlist_node
*p
;
608 tbl
= rcu_dereference(mesh_paths
);
609 for_each_mesh_entry(tbl
, p
, node
, i
) {
611 if (mpath
->sdata
== sdata
)
612 mesh_path_del(mpath
->dst
, mpath
->sdata
);
617 static void mesh_path_node_reclaim(struct rcu_head
*rp
)
619 struct mpath_node
*node
= container_of(rp
, struct mpath_node
, rcu
);
620 struct ieee80211_sub_if_data
*sdata
= node
->mpath
->sdata
;
622 if (node
->mpath
->timer
.function
)
623 del_timer_sync(&node
->mpath
->timer
);
624 atomic_dec(&sdata
->u
.mesh
.mpaths
);
630 * mesh_path_del - delete a mesh path from the table
632 * @addr: dst address (ETH_ALEN length)
633 * @sdata: local subif
635 * Returns: 0 if successful
637 int mesh_path_del(u8
*addr
, struct ieee80211_sub_if_data
*sdata
)
639 struct mesh_table
*tbl
;
640 struct mesh_path
*mpath
;
641 struct mpath_node
*node
;
642 struct hlist_head
*bucket
;
643 struct hlist_node
*n
;
647 read_lock_bh(&pathtbl_resize_lock
);
648 tbl
= resize_dereference_mesh_paths();
649 hash_idx
= mesh_table_hash(addr
, sdata
, tbl
);
650 bucket
= &tbl
->hash_buckets
[hash_idx
];
652 spin_lock_bh(&tbl
->hashwlock
[hash_idx
]);
653 hlist_for_each_entry(node
, n
, bucket
, list
) {
655 if (mpath
->sdata
== sdata
&&
656 memcmp(addr
, mpath
->dst
, ETH_ALEN
) == 0) {
657 spin_lock(&mpath
->state_lock
);
658 mpath
->flags
|= MESH_PATH_RESOLVING
;
659 hlist_del_rcu(&node
->list
);
660 call_rcu(&node
->rcu
, mesh_path_node_reclaim
);
661 atomic_dec(&tbl
->entries
);
662 spin_unlock(&mpath
->state_lock
);
669 mesh_paths_generation
++;
670 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
671 read_unlock_bh(&pathtbl_resize_lock
);
676 * mesh_path_tx_pending - sends pending frames in a mesh path queue
678 * @mpath: mesh path to activate
680 * Locking: the state_lock of the mpath structure must NOT be held when calling
683 void mesh_path_tx_pending(struct mesh_path
*mpath
)
685 if (mpath
->flags
& MESH_PATH_ACTIVE
)
686 ieee80211_add_pending_skbs(mpath
->sdata
->local
,
687 &mpath
->frame_queue
);
691 * mesh_path_discard_frame - discard a frame whose path could not be resolved
693 * @skb: frame to discard
694 * @sdata: network subif the frame was to be sent through
696 * If the frame was being forwarded from another MP, a PERR frame will be sent
697 * to the precursor. The precursor's address (i.e. the previous hop) was saved
698 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
699 * the destination is successfully resolved.
701 * Locking: the function must me called within a rcu_read_lock region
703 void mesh_path_discard_frame(struct sk_buff
*skb
,
704 struct ieee80211_sub_if_data
*sdata
)
706 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*) skb
->data
;
707 struct mesh_path
*mpath
;
709 __le16 reason
= cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD
);
711 if (memcmp(hdr
->addr4
, sdata
->vif
.addr
, ETH_ALEN
) != 0) {
716 mpath
= mesh_path_lookup(da
, sdata
);
719 mesh_path_error_tx(sdata
->u
.mesh
.mshcfg
.element_ttl
, skb
->data
,
720 cpu_to_le32(sn
), reason
, ra
, sdata
);
724 sdata
->u
.mesh
.mshstats
.dropped_frames_no_route
++;
728 * mesh_path_flush_pending - free the pending queue of a mesh path
730 * @mpath: mesh path whose queue has to be freed
732 * Locking: the function must me called within a rcu_read_lock region
734 void mesh_path_flush_pending(struct mesh_path
*mpath
)
738 while ((skb
= skb_dequeue(&mpath
->frame_queue
)) &&
739 (mpath
->flags
& MESH_PATH_ACTIVE
))
740 mesh_path_discard_frame(skb
, mpath
->sdata
);
744 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
746 * @mpath: the mesh path to modify
747 * @next_hop: the next hop to force
749 * Locking: this function must be called holding mpath->state_lock
751 void mesh_path_fix_nexthop(struct mesh_path
*mpath
, struct sta_info
*next_hop
)
753 spin_lock_bh(&mpath
->state_lock
);
754 mesh_path_assign_nexthop(mpath
, next_hop
);
757 mpath
->hop_count
= 0;
759 mpath
->flags
|= MESH_PATH_FIXED
;
760 mesh_path_activate(mpath
);
761 spin_unlock_bh(&mpath
->state_lock
);
762 mesh_path_tx_pending(mpath
);
765 static void mesh_path_node_free(struct hlist_node
*p
, bool free_leafs
)
767 struct mesh_path
*mpath
;
768 struct mpath_node
*node
= hlist_entry(p
, struct mpath_node
, list
);
772 if (mpath
->timer
.function
)
773 del_timer_sync(&mpath
->timer
);
779 static int mesh_path_node_copy(struct hlist_node
*p
, struct mesh_table
*newtbl
)
781 struct mesh_path
*mpath
;
782 struct mpath_node
*node
, *new_node
;
785 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
786 if (new_node
== NULL
)
789 node
= hlist_entry(p
, struct mpath_node
, list
);
791 new_node
->mpath
= mpath
;
792 hash_idx
= mesh_table_hash(mpath
->dst
, mpath
->sdata
, newtbl
);
793 hlist_add_head(&new_node
->list
,
794 &newtbl
->hash_buckets
[hash_idx
]);
798 int mesh_pathtbl_init(void)
800 struct mesh_table
*tbl_path
, *tbl_mpp
;
802 tbl_path
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
805 tbl_path
->free_node
= &mesh_path_node_free
;
806 tbl_path
->copy_node
= &mesh_path_node_copy
;
807 tbl_path
->mean_chain_len
= MEAN_CHAIN_LEN
;
809 tbl_mpp
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
811 mesh_table_free(tbl_path
, true);
814 tbl_mpp
->free_node
= &mesh_path_node_free
;
815 tbl_mpp
->copy_node
= &mesh_path_node_copy
;
816 tbl_mpp
->mean_chain_len
= MEAN_CHAIN_LEN
;
818 /* Need no locking since this is during init */
819 RCU_INIT_POINTER(mesh_paths
, tbl_path
);
820 RCU_INIT_POINTER(mpp_paths
, tbl_mpp
);
825 void mesh_path_expire(struct ieee80211_sub_if_data
*sdata
)
827 struct mesh_table
*tbl
;
828 struct mesh_path
*mpath
;
829 struct mpath_node
*node
;
830 struct hlist_node
*p
;
834 tbl
= rcu_dereference(mesh_paths
);
835 for_each_mesh_entry(tbl
, p
, node
, i
) {
836 if (node
->mpath
->sdata
!= sdata
)
839 spin_lock_bh(&mpath
->state_lock
);
840 if ((!(mpath
->flags
& MESH_PATH_RESOLVING
)) &&
841 (!(mpath
->flags
& MESH_PATH_FIXED
)) &&
842 time_after(jiffies
, mpath
->exp_time
+ MESH_PATH_EXPIRE
)) {
843 spin_unlock_bh(&mpath
->state_lock
);
844 mesh_path_del(mpath
->dst
, mpath
->sdata
);
846 spin_unlock_bh(&mpath
->state_lock
);
851 void mesh_pathtbl_unregister(void)
853 /* no need for locking during exit path */
854 mesh_table_free(rcu_dereference_raw(mesh_paths
), true);
855 mesh_table_free(rcu_dereference_raw(mpp_paths
), true);