Merge tag 'pci-v3.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[deliverable/linux.git] / net / mac80211 / chan.c
1 /*
2 * mac80211 - channel management
3 */
4
5 #include <linux/nl80211.h>
6 #include <linux/export.h>
7 #include <linux/rtnetlink.h>
8 #include <net/cfg80211.h>
9 #include "ieee80211_i.h"
10 #include "driver-ops.h"
11
12 static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
13 {
14 switch (sta->bandwidth) {
15 case IEEE80211_STA_RX_BW_20:
16 if (sta->ht_cap.ht_supported)
17 return NL80211_CHAN_WIDTH_20;
18 else
19 return NL80211_CHAN_WIDTH_20_NOHT;
20 case IEEE80211_STA_RX_BW_40:
21 return NL80211_CHAN_WIDTH_40;
22 case IEEE80211_STA_RX_BW_80:
23 return NL80211_CHAN_WIDTH_80;
24 case IEEE80211_STA_RX_BW_160:
25 /*
26 * This applied for both 160 and 80+80. since we use
27 * the returned value to consider degradation of
28 * ctx->conf.min_def, we have to make sure to take
29 * the bigger one (NL80211_CHAN_WIDTH_160).
30 * Otherwise we might try degrading even when not
31 * needed, as the max required sta_bw returned (80+80)
32 * might be smaller than the configured bw (160).
33 */
34 return NL80211_CHAN_WIDTH_160;
35 default:
36 WARN_ON(1);
37 return NL80211_CHAN_WIDTH_20;
38 }
39 }
40
41 static enum nl80211_chan_width
42 ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
43 {
44 enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
45 struct sta_info *sta;
46
47 rcu_read_lock();
48 list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
49 if (sdata != sta->sdata &&
50 !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
51 continue;
52
53 if (!sta->uploaded)
54 continue;
55
56 max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
57 }
58 rcu_read_unlock();
59
60 return max_bw;
61 }
62
63 static enum nl80211_chan_width
64 ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
65 struct ieee80211_chanctx_conf *conf)
66 {
67 struct ieee80211_sub_if_data *sdata;
68 enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
69
70 rcu_read_lock();
71 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
72 struct ieee80211_vif *vif = &sdata->vif;
73 enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
74
75 if (!ieee80211_sdata_running(sdata))
76 continue;
77
78 if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
79 continue;
80
81 switch (vif->type) {
82 case NL80211_IFTYPE_AP:
83 case NL80211_IFTYPE_AP_VLAN:
84 width = ieee80211_get_max_required_bw(sdata);
85 break;
86 case NL80211_IFTYPE_P2P_DEVICE:
87 continue;
88 case NL80211_IFTYPE_STATION:
89 case NL80211_IFTYPE_ADHOC:
90 case NL80211_IFTYPE_WDS:
91 case NL80211_IFTYPE_MESH_POINT:
92 width = vif->bss_conf.chandef.width;
93 break;
94 case NL80211_IFTYPE_UNSPECIFIED:
95 case NUM_NL80211_IFTYPES:
96 case NL80211_IFTYPE_MONITOR:
97 case NL80211_IFTYPE_P2P_CLIENT:
98 case NL80211_IFTYPE_P2P_GO:
99 WARN_ON_ONCE(1);
100 }
101 max_bw = max(max_bw, width);
102 }
103
104 /* use the configured bandwidth in case of monitor interface */
105 sdata = rcu_dereference(local->monitor_sdata);
106 if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == conf)
107 max_bw = max(max_bw, conf->def.width);
108
109 rcu_read_unlock();
110
111 return max_bw;
112 }
113
114 /*
115 * recalc the min required chan width of the channel context, which is
116 * the max of min required widths of all the interfaces bound to this
117 * channel context.
118 */
119 void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
120 struct ieee80211_chanctx *ctx)
121 {
122 enum nl80211_chan_width max_bw;
123 struct cfg80211_chan_def min_def;
124
125 lockdep_assert_held(&local->chanctx_mtx);
126
127 /* don't optimize 5MHz, 10MHz, and radar_enabled confs */
128 if (ctx->conf.def.width == NL80211_CHAN_WIDTH_5 ||
129 ctx->conf.def.width == NL80211_CHAN_WIDTH_10 ||
130 ctx->conf.radar_enabled) {
131 ctx->conf.min_def = ctx->conf.def;
132 return;
133 }
134
135 max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf);
136
137 /* downgrade chandef up to max_bw */
138 min_def = ctx->conf.def;
139 while (min_def.width > max_bw)
140 ieee80211_chandef_downgrade(&min_def);
141
142 if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def))
143 return;
144
145 ctx->conf.min_def = min_def;
146 if (!ctx->driver_present)
147 return;
148
149 drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_MIN_WIDTH);
150 }
151
152 static void ieee80211_change_chanctx(struct ieee80211_local *local,
153 struct ieee80211_chanctx *ctx,
154 const struct cfg80211_chan_def *chandef)
155 {
156 if (cfg80211_chandef_identical(&ctx->conf.def, chandef))
157 return;
158
159 WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
160
161 ctx->conf.def = *chandef;
162 drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH);
163 ieee80211_recalc_chanctx_min_def(local, ctx);
164
165 if (!local->use_chanctx) {
166 local->_oper_chandef = *chandef;
167 ieee80211_hw_config(local, 0);
168 }
169 }
170
171 static struct ieee80211_chanctx *
172 ieee80211_find_chanctx(struct ieee80211_local *local,
173 const struct cfg80211_chan_def *chandef,
174 enum ieee80211_chanctx_mode mode)
175 {
176 struct ieee80211_chanctx *ctx;
177
178 lockdep_assert_held(&local->chanctx_mtx);
179
180 if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
181 return NULL;
182
183 list_for_each_entry(ctx, &local->chanctx_list, list) {
184 const struct cfg80211_chan_def *compat;
185
186 if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
187 continue;
188
189 compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef);
190 if (!compat)
191 continue;
192
193 ieee80211_change_chanctx(local, ctx, compat);
194
195 return ctx;
196 }
197
198 return NULL;
199 }
200
201 static bool ieee80211_is_radar_required(struct ieee80211_local *local)
202 {
203 struct ieee80211_sub_if_data *sdata;
204
205 rcu_read_lock();
206 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
207 if (sdata->radar_required) {
208 rcu_read_unlock();
209 return true;
210 }
211 }
212 rcu_read_unlock();
213
214 return false;
215 }
216
217 static struct ieee80211_chanctx *
218 ieee80211_new_chanctx(struct ieee80211_local *local,
219 const struct cfg80211_chan_def *chandef,
220 enum ieee80211_chanctx_mode mode)
221 {
222 struct ieee80211_chanctx *ctx;
223 u32 changed;
224 int err;
225
226 lockdep_assert_held(&local->chanctx_mtx);
227
228 ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL);
229 if (!ctx)
230 return ERR_PTR(-ENOMEM);
231
232 ctx->conf.def = *chandef;
233 ctx->conf.rx_chains_static = 1;
234 ctx->conf.rx_chains_dynamic = 1;
235 ctx->mode = mode;
236 ctx->conf.radar_enabled = ieee80211_is_radar_required(local);
237 ieee80211_recalc_chanctx_min_def(local, ctx);
238 if (!local->use_chanctx)
239 local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
240
241 /* we hold the mutex to prevent idle from changing */
242 lockdep_assert_held(&local->mtx);
243 /* turn idle off *before* setting channel -- some drivers need that */
244 changed = ieee80211_idle_off(local);
245 if (changed)
246 ieee80211_hw_config(local, changed);
247
248 if (!local->use_chanctx) {
249 local->_oper_chandef = *chandef;
250 ieee80211_hw_config(local, 0);
251 } else {
252 err = drv_add_chanctx(local, ctx);
253 if (err) {
254 kfree(ctx);
255 ieee80211_recalc_idle(local);
256 return ERR_PTR(err);
257 }
258 }
259
260 /* and keep the mutex held until the new chanctx is on the list */
261 list_add_rcu(&ctx->list, &local->chanctx_list);
262
263 return ctx;
264 }
265
266 static void ieee80211_free_chanctx(struct ieee80211_local *local,
267 struct ieee80211_chanctx *ctx)
268 {
269 bool check_single_channel = false;
270 lockdep_assert_held(&local->chanctx_mtx);
271
272 WARN_ON_ONCE(ctx->refcount != 0);
273
274 if (!local->use_chanctx) {
275 struct cfg80211_chan_def *chandef = &local->_oper_chandef;
276 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
277 chandef->center_freq1 = chandef->chan->center_freq;
278 chandef->center_freq2 = 0;
279
280 /* NOTE: Disabling radar is only valid here for
281 * single channel context. To be sure, check it ...
282 */
283 if (local->hw.conf.radar_enabled)
284 check_single_channel = true;
285 local->hw.conf.radar_enabled = false;
286
287 ieee80211_hw_config(local, 0);
288 } else {
289 drv_remove_chanctx(local, ctx);
290 }
291
292 list_del_rcu(&ctx->list);
293 kfree_rcu(ctx, rcu_head);
294
295 /* throw a warning if this wasn't the only channel context. */
296 WARN_ON(check_single_channel && !list_empty(&local->chanctx_list));
297
298 ieee80211_recalc_idle(local);
299 }
300
301 static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
302 struct ieee80211_chanctx *ctx)
303 {
304 struct ieee80211_local *local = sdata->local;
305 int ret;
306
307 lockdep_assert_held(&local->chanctx_mtx);
308
309 ret = drv_assign_vif_chanctx(local, sdata, ctx);
310 if (ret)
311 return ret;
312
313 rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf);
314 ctx->refcount++;
315
316 ieee80211_recalc_txpower(sdata);
317 ieee80211_recalc_chanctx_min_def(local, ctx);
318 sdata->vif.bss_conf.idle = false;
319
320 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
321 sdata->vif.type != NL80211_IFTYPE_MONITOR)
322 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
323
324 return 0;
325 }
326
327 static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
328 struct ieee80211_chanctx *ctx)
329 {
330 struct ieee80211_chanctx_conf *conf = &ctx->conf;
331 struct ieee80211_sub_if_data *sdata;
332 const struct cfg80211_chan_def *compat = NULL;
333
334 lockdep_assert_held(&local->chanctx_mtx);
335
336 rcu_read_lock();
337 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
338
339 if (!ieee80211_sdata_running(sdata))
340 continue;
341 if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
342 continue;
343
344 if (!compat)
345 compat = &sdata->vif.bss_conf.chandef;
346
347 compat = cfg80211_chandef_compatible(
348 &sdata->vif.bss_conf.chandef, compat);
349 if (!compat)
350 break;
351 }
352 rcu_read_unlock();
353
354 if (WARN_ON_ONCE(!compat))
355 return;
356
357 ieee80211_change_chanctx(local, ctx, compat);
358 }
359
360 static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
361 struct ieee80211_chanctx *chanctx)
362 {
363 bool radar_enabled;
364
365 lockdep_assert_held(&local->chanctx_mtx);
366 /* for setting local->radar_detect_enabled */
367 lockdep_assert_held(&local->mtx);
368
369 radar_enabled = ieee80211_is_radar_required(local);
370
371 if (radar_enabled == chanctx->conf.radar_enabled)
372 return;
373
374 chanctx->conf.radar_enabled = radar_enabled;
375 local->radar_detect_enabled = chanctx->conf.radar_enabled;
376
377 if (!local->use_chanctx) {
378 local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
379 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
380 }
381
382 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
383 }
384
385 static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
386 struct ieee80211_chanctx *ctx)
387 {
388 struct ieee80211_local *local = sdata->local;
389
390 lockdep_assert_held(&local->chanctx_mtx);
391
392 ctx->refcount--;
393 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
394
395 sdata->vif.bss_conf.idle = true;
396
397 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
398 sdata->vif.type != NL80211_IFTYPE_MONITOR)
399 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
400
401 drv_unassign_vif_chanctx(local, sdata, ctx);
402
403 if (ctx->refcount > 0) {
404 ieee80211_recalc_chanctx_chantype(sdata->local, ctx);
405 ieee80211_recalc_smps_chanctx(local, ctx);
406 ieee80211_recalc_radar_chanctx(local, ctx);
407 ieee80211_recalc_chanctx_min_def(local, ctx);
408 }
409 }
410
411 static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
412 {
413 struct ieee80211_local *local = sdata->local;
414 struct ieee80211_chanctx_conf *conf;
415 struct ieee80211_chanctx *ctx;
416
417 lockdep_assert_held(&local->chanctx_mtx);
418
419 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
420 lockdep_is_held(&local->chanctx_mtx));
421 if (!conf)
422 return;
423
424 ctx = container_of(conf, struct ieee80211_chanctx, conf);
425
426 ieee80211_unassign_vif_chanctx(sdata, ctx);
427 if (ctx->refcount == 0)
428 ieee80211_free_chanctx(local, ctx);
429 }
430
431 void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
432 struct ieee80211_chanctx *chanctx)
433 {
434 struct ieee80211_sub_if_data *sdata;
435 u8 rx_chains_static, rx_chains_dynamic;
436
437 lockdep_assert_held(&local->chanctx_mtx);
438
439 rx_chains_static = 1;
440 rx_chains_dynamic = 1;
441
442 rcu_read_lock();
443 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
444 u8 needed_static, needed_dynamic;
445
446 if (!ieee80211_sdata_running(sdata))
447 continue;
448
449 if (rcu_access_pointer(sdata->vif.chanctx_conf) !=
450 &chanctx->conf)
451 continue;
452
453 switch (sdata->vif.type) {
454 case NL80211_IFTYPE_P2P_DEVICE:
455 continue;
456 case NL80211_IFTYPE_STATION:
457 if (!sdata->u.mgd.associated)
458 continue;
459 break;
460 case NL80211_IFTYPE_AP_VLAN:
461 continue;
462 case NL80211_IFTYPE_AP:
463 case NL80211_IFTYPE_ADHOC:
464 case NL80211_IFTYPE_WDS:
465 case NL80211_IFTYPE_MESH_POINT:
466 break;
467 default:
468 WARN_ON_ONCE(1);
469 }
470
471 switch (sdata->smps_mode) {
472 default:
473 WARN_ONCE(1, "Invalid SMPS mode %d\n",
474 sdata->smps_mode);
475 /* fall through */
476 case IEEE80211_SMPS_OFF:
477 needed_static = sdata->needed_rx_chains;
478 needed_dynamic = sdata->needed_rx_chains;
479 break;
480 case IEEE80211_SMPS_DYNAMIC:
481 needed_static = 1;
482 needed_dynamic = sdata->needed_rx_chains;
483 break;
484 case IEEE80211_SMPS_STATIC:
485 needed_static = 1;
486 needed_dynamic = 1;
487 break;
488 }
489
490 rx_chains_static = max(rx_chains_static, needed_static);
491 rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
492 }
493 rcu_read_unlock();
494
495 if (!local->use_chanctx) {
496 if (rx_chains_static > 1)
497 local->smps_mode = IEEE80211_SMPS_OFF;
498 else if (rx_chains_dynamic > 1)
499 local->smps_mode = IEEE80211_SMPS_DYNAMIC;
500 else
501 local->smps_mode = IEEE80211_SMPS_STATIC;
502 ieee80211_hw_config(local, 0);
503 }
504
505 if (rx_chains_static == chanctx->conf.rx_chains_static &&
506 rx_chains_dynamic == chanctx->conf.rx_chains_dynamic)
507 return;
508
509 chanctx->conf.rx_chains_static = rx_chains_static;
510 chanctx->conf.rx_chains_dynamic = rx_chains_dynamic;
511 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS);
512 }
513
514 int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
515 const struct cfg80211_chan_def *chandef,
516 enum ieee80211_chanctx_mode mode)
517 {
518 struct ieee80211_local *local = sdata->local;
519 struct ieee80211_chanctx *ctx;
520 int ret;
521
522 lockdep_assert_held(&local->mtx);
523
524 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
525
526 mutex_lock(&local->chanctx_mtx);
527 __ieee80211_vif_release_channel(sdata);
528
529 ctx = ieee80211_find_chanctx(local, chandef, mode);
530 if (!ctx)
531 ctx = ieee80211_new_chanctx(local, chandef, mode);
532 if (IS_ERR(ctx)) {
533 ret = PTR_ERR(ctx);
534 goto out;
535 }
536
537 sdata->vif.bss_conf.chandef = *chandef;
538
539 ret = ieee80211_assign_vif_chanctx(sdata, ctx);
540 if (ret) {
541 /* if assign fails refcount stays the same */
542 if (ctx->refcount == 0)
543 ieee80211_free_chanctx(local, ctx);
544 goto out;
545 }
546
547 ieee80211_recalc_smps_chanctx(local, ctx);
548 ieee80211_recalc_radar_chanctx(local, ctx);
549 out:
550 mutex_unlock(&local->chanctx_mtx);
551 return ret;
552 }
553
554 int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
555 u32 *changed)
556 {
557 struct ieee80211_local *local = sdata->local;
558 struct ieee80211_chanctx_conf *conf;
559 struct ieee80211_chanctx *ctx;
560 const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
561 int ret;
562 u32 chanctx_changed = 0;
563
564 lockdep_assert_held(&local->mtx);
565
566 /* should never be called if not performing a channel switch. */
567 if (WARN_ON(!sdata->vif.csa_active))
568 return -EINVAL;
569
570 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
571 IEEE80211_CHAN_DISABLED))
572 return -EINVAL;
573
574 mutex_lock(&local->chanctx_mtx);
575 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
576 lockdep_is_held(&local->chanctx_mtx));
577 if (!conf) {
578 ret = -EINVAL;
579 goto out;
580 }
581
582 ctx = container_of(conf, struct ieee80211_chanctx, conf);
583 if (ctx->refcount != 1) {
584 ret = -EINVAL;
585 goto out;
586 }
587
588 if (sdata->vif.bss_conf.chandef.width != chandef->width) {
589 chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
590 *changed |= BSS_CHANGED_BANDWIDTH;
591 }
592
593 sdata->vif.bss_conf.chandef = *chandef;
594 ctx->conf.def = *chandef;
595
596 chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
597 drv_change_chanctx(local, ctx, chanctx_changed);
598
599 ieee80211_recalc_chanctx_chantype(local, ctx);
600 ieee80211_recalc_smps_chanctx(local, ctx);
601 ieee80211_recalc_radar_chanctx(local, ctx);
602 ieee80211_recalc_chanctx_min_def(local, ctx);
603
604 ret = 0;
605 out:
606 mutex_unlock(&local->chanctx_mtx);
607 return ret;
608 }
609
610 int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
611 const struct cfg80211_chan_def *chandef,
612 u32 *changed)
613 {
614 struct ieee80211_local *local = sdata->local;
615 struct ieee80211_chanctx_conf *conf;
616 struct ieee80211_chanctx *ctx;
617 int ret;
618
619 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
620 IEEE80211_CHAN_DISABLED))
621 return -EINVAL;
622
623 mutex_lock(&local->chanctx_mtx);
624 if (cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) {
625 ret = 0;
626 goto out;
627 }
628
629 if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
630 sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) {
631 ret = -EINVAL;
632 goto out;
633 }
634
635 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
636 lockdep_is_held(&local->chanctx_mtx));
637 if (!conf) {
638 ret = -EINVAL;
639 goto out;
640 }
641
642 ctx = container_of(conf, struct ieee80211_chanctx, conf);
643 if (!cfg80211_chandef_compatible(&conf->def, chandef)) {
644 ret = -EINVAL;
645 goto out;
646 }
647
648 sdata->vif.bss_conf.chandef = *chandef;
649
650 ieee80211_recalc_chanctx_chantype(local, ctx);
651
652 *changed |= BSS_CHANGED_BANDWIDTH;
653 ret = 0;
654 out:
655 mutex_unlock(&local->chanctx_mtx);
656 return ret;
657 }
658
659 void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
660 {
661 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
662
663 lockdep_assert_held(&sdata->local->mtx);
664
665 mutex_lock(&sdata->local->chanctx_mtx);
666 __ieee80211_vif_release_channel(sdata);
667 mutex_unlock(&sdata->local->chanctx_mtx);
668 }
669
670 void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
671 {
672 struct ieee80211_local *local = sdata->local;
673 struct ieee80211_sub_if_data *ap;
674 struct ieee80211_chanctx_conf *conf;
675
676 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss))
677 return;
678
679 ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
680
681 mutex_lock(&local->chanctx_mtx);
682
683 conf = rcu_dereference_protected(ap->vif.chanctx_conf,
684 lockdep_is_held(&local->chanctx_mtx));
685 rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
686 mutex_unlock(&local->chanctx_mtx);
687 }
688
689 void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
690 bool clear)
691 {
692 struct ieee80211_local *local = sdata->local;
693 struct ieee80211_sub_if_data *vlan;
694 struct ieee80211_chanctx_conf *conf;
695
696 ASSERT_RTNL();
697
698 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
699 return;
700
701 mutex_lock(&local->chanctx_mtx);
702
703 /*
704 * Check that conf exists, even when clearing this function
705 * must be called with the AP's channel context still there
706 * as it would otherwise cause VLANs to have an invalid
707 * channel context pointer for a while, possibly pointing
708 * to a channel context that has already been freed.
709 */
710 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
711 lockdep_is_held(&local->chanctx_mtx));
712 WARN_ON(!conf);
713
714 if (clear)
715 conf = NULL;
716
717 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
718 rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
719
720 mutex_unlock(&local->chanctx_mtx);
721 }
722
723 void ieee80211_iter_chan_contexts_atomic(
724 struct ieee80211_hw *hw,
725 void (*iter)(struct ieee80211_hw *hw,
726 struct ieee80211_chanctx_conf *chanctx_conf,
727 void *data),
728 void *iter_data)
729 {
730 struct ieee80211_local *local = hw_to_local(hw);
731 struct ieee80211_chanctx *ctx;
732
733 rcu_read_lock();
734 list_for_each_entry_rcu(ctx, &local->chanctx_list, list)
735 if (ctx->driver_present)
736 iter(hw, &ctx->conf, iter_data);
737 rcu_read_unlock();
738 }
739 EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic);
This page took 0.046234 seconds and 5 git commands to generate.