summaryrefslogtreecommitdiff
path: root/net/mac80211/mesh_pathtbl.c
diff options
context:
space:
mode:
authorJavier Cardona <javier@cozybit.com>2009-08-10 12:15:52 -0700
committerJohn W. Linville <linville@tuxdriver.com>2009-08-14 09:14:01 -0400
commit18889231e4527dfe23145efe318e74744794a95d (patch)
treebcb509dab37d80f6dbbfb6671a530aa882c9975e /net/mac80211/mesh_pathtbl.c
parent5b365834255d7c90fc724b032c814dfa297aacf9 (diff)
mac80211: Move mpath and mpp growth to mesh workqueue.
This prevents calling rcu_synchronize from within the tx path by moving the table growth code to the mesh workqueue. Move mesh_table_free and mesh_table_grow from mesh.c to mesh_pathtbl.c and declare them static. Also, re-enable mesh in Kconfig and update the configuration description. Signed-off-by: Javier Cardona <javier@cozybit.com> Tested-by: Andrey Yurovsky <andrey@cozybit.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r--net/mac80211/mesh_pathtbl.c146
1 files changed, 110 insertions, 36 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 431865a58622..751c4d0e2b36 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -39,6 +39,69 @@ static struct mesh_table *mesh_paths;
static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
+static void __mesh_table_free(struct mesh_table *tbl)
+{
+ kfree(tbl->hash_buckets);
+ kfree(tbl->hashwlock);
+ kfree(tbl);
+}
+
+void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
+{
+ struct hlist_head *mesh_hash;
+ struct hlist_node *p, *q;
+ int i;
+
+ mesh_hash = tbl->hash_buckets;
+ for (i = 0; i <= tbl->hash_mask; i++) {
+ spin_lock(&tbl->hashwlock[i]);
+ hlist_for_each_safe(p, q, &mesh_hash[i]) {
+ tbl->free_node(p, free_leafs);
+ atomic_dec(&tbl->entries);
+ }
+ spin_unlock(&tbl->hashwlock[i]);
+ }
+ __mesh_table_free(tbl);
+}
+
+static struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
+{
+ struct mesh_table *newtbl;
+ struct hlist_head *oldhash;
+ struct hlist_node *p, *q;
+ int i;
+
+ if (atomic_read(&tbl->entries)
+ < tbl->mean_chain_len * (tbl->hash_mask + 1))
+ goto endgrow;
+
+ newtbl = mesh_table_alloc(tbl->size_order + 1);
+ if (!newtbl)
+ goto endgrow;
+
+ newtbl->free_node = tbl->free_node;
+ newtbl->mean_chain_len = tbl->mean_chain_len;
+ newtbl->copy_node = tbl->copy_node;
+ atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
+
+ oldhash = tbl->hash_buckets;
+ for (i = 0; i <= tbl->hash_mask; i++)
+ hlist_for_each(p, &oldhash[i])
+ if (tbl->copy_node(p, newtbl) < 0)
+ goto errcopy;
+
+ return newtbl;
+
+errcopy:
+ for (i = 0; i <= newtbl->hash_mask; i++) {
+ hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
+ tbl->free_node(p, 0);
+ }
+ __mesh_table_free(newtbl);
+endgrow:
+ return NULL;
+}
+
/* This lock will have the grow table function as writer and add / delete nodes
* as readers. When reading the table (i.e. doing lookups) we are well protected
@@ -187,6 +250,8 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
*/
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_local *local = sdata->local;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
@@ -195,8 +260,6 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- might_sleep();
-
if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -208,11 +271,11 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
return -ENOSPC;
err = -ENOMEM;
- new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
+ new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath)
goto err_path_alloc;
- new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
+ new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node)
goto err_node_alloc;
@@ -250,20 +313,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
spin_unlock(&mesh_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock);
if (grow) {
- struct mesh_table *oldtbl, *newtbl;
-
- write_lock(&pathtbl_resize_lock);
- oldtbl = mesh_paths;
- newtbl = mesh_table_grow(mesh_paths);
- if (!newtbl) {
- write_unlock(&pathtbl_resize_lock);
- return 0;
- }
- rcu_assign_pointer(mesh_paths, newtbl);
- write_unlock(&pathtbl_resize_lock);
-
- synchronize_rcu();
- mesh_table_free(oldtbl, false);
+ set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
+ ieee80211_queue_work(&local->hw, &ifmsh->work);
}
return 0;
@@ -278,9 +329,46 @@ err_path_alloc:
return err;
}
+void mesh_mpath_table_grow(void)
+{
+ struct mesh_table *oldtbl, *newtbl;
+
+ write_lock(&pathtbl_resize_lock);
+ oldtbl = mesh_paths;
+ newtbl = mesh_table_grow(mesh_paths);
+ if (!newtbl) {
+ write_unlock(&pathtbl_resize_lock);
+ return;
+ }
+ rcu_assign_pointer(mesh_paths, newtbl);
+ write_unlock(&pathtbl_resize_lock);
+
+ synchronize_rcu();
+ mesh_table_free(oldtbl, false);
+}
+
+void mesh_mpp_table_grow(void)
+{
+ struct mesh_table *oldtbl, *newtbl;
+
+ write_lock(&pathtbl_resize_lock);
+ oldtbl = mpp_paths;
+ newtbl = mesh_table_grow(mpp_paths);
+ if (!newtbl) {
+ write_unlock(&pathtbl_resize_lock);
+ return;
+ }
+ rcu_assign_pointer(mpp_paths, newtbl);
+ write_unlock(&pathtbl_resize_lock);
+
+ synchronize_rcu();
+ mesh_table_free(oldtbl, false);
+}
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_local *local = sdata->local;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
@@ -289,8 +377,6 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- might_sleep();
-
if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -299,11 +385,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
return -ENOTSUPP;
err = -ENOMEM;
- new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
+ new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath)
goto err_path_alloc;
- new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
+ new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node)
goto err_node_alloc;
@@ -337,20 +423,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
spin_unlock(&mpp_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock);
if (grow) {
- struct mesh_table *oldtbl, *newtbl;
-
- write_lock(&pathtbl_resize_lock);
- oldtbl = mpp_paths;
- newtbl = mesh_table_grow(mpp_paths);
- if (!newtbl) {
- write_unlock(&pathtbl_resize_lock);
- return 0;
- }
- rcu_assign_pointer(mpp_paths, newtbl);
- write_unlock(&pathtbl_resize_lock);
-
- synchronize_rcu();
- mesh_table_free(oldtbl, false);
+ set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
+ ieee80211_queue_work(&local->hw, &ifmsh->work);
}
return 0;