Commit | Line | Data |
---|---|---|
8ca151b5 JB |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
51368bf7 | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
8ca151b5 JB |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | |
22 | * USA | |
23 | * | |
24 | * The full GNU General Public License is included in this distribution | |
410dc5aa | 25 | * in the file called COPYING. |
8ca151b5 JB |
26 | * |
27 | * Contact Information: | |
28 | * Intel Linux Wireless <ilw@linux.intel.com> | |
29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
30 | * | |
31 | * BSD LICENSE | |
32 | * | |
51368bf7 | 33 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
8ca151b5 JB |
34 | * All rights reserved. |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * | |
40 | * * Redistributions of source code must retain the above copyright | |
41 | * notice, this list of conditions and the following disclaimer. | |
42 | * * Redistributions in binary form must reproduce the above copyright | |
43 | * notice, this list of conditions and the following disclaimer in | |
44 | * the documentation and/or other materials provided with the | |
45 | * distribution. | |
46 | * * Neither the name Intel Corporation nor the names of its | |
47 | * contributors may be used to endorse or promote products derived | |
48 | * from this software without specific prior written permission. | |
49 | * | |
50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
61 | * | |
62 | *****************************************************************************/ | |
63 | ||
64 | #include <linux/jiffies.h> | |
65 | #include <net/mac80211.h> | |
66 | ||
67 | #include "iwl-notif-wait.h" | |
68 | #include "iwl-trans.h" | |
69 | #include "fw-api.h" | |
70 | #include "time-event.h" | |
71 | #include "mvm.h" | |
72 | #include "iwl-io.h" | |
73 | #include "iwl-prph.h" | |
74 | ||
75 | /* A TimeUnit is 1024 microsecond */ | |
8ca151b5 JB |
76 | #define MSEC_TO_TU(_msec) (_msec*1000/1024) |
77 | ||
e635c797 IP |
78 | /* |
79 | * For the high priority TE use a time event type that has similar priority to | |
80 | * the FW's action scan priority. | |
456f6ddf | 81 | */ |
e635c797 IP |
82 | #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE |
83 | #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC | |
456f6ddf | 84 | |
8ca151b5 JB |
85 | void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, |
86 | struct iwl_mvm_time_event_data *te_data) | |
87 | { | |
88 | lockdep_assert_held(&mvm->time_event_lock); | |
89 | ||
90 | if (te_data->id == TE_MAX) | |
91 | return; | |
92 | ||
93 | list_del(&te_data->list); | |
94 | te_data->running = false; | |
95 | te_data->uid = 0; | |
96 | te_data->id = TE_MAX; | |
97 | te_data->vif = NULL; | |
98 | } | |
99 | ||
100 | void iwl_mvm_roc_done_wk(struct work_struct *wk) | |
101 | { | |
102 | struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); | |
103 | ||
104 | synchronize_net(); | |
105 | ||
106 | /* | |
107 | * Flush the offchannel queue -- this is called when the time | |
108 | * event finishes or is cancelled, so that frames queued for it | |
109 | * won't get stuck on the queue and be transmitted in the next | |
110 | * time event. | |
111 | * We have to send the command asynchronously since this cannot | |
112 | * be under the mutex for locking reasons, but that's not an | |
113 | * issue as it will have to complete before the next command is | |
114 | * executed, and a new time event means a new command. | |
115 | */ | |
398e8c6c | 116 | iwl_mvm_flush_tx_path(mvm, BIT(IWL_MVM_OFFCHANNEL_QUEUE), false); |
8ca151b5 JB |
117 | } |
118 | ||
119 | static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) | |
120 | { | |
121 | /* | |
122 | * First, clear the ROC_RUNNING status bit. This will cause the TX | |
123 | * path to drop offchannel transmissions. That would also be done | |
124 | * by mac80211, but it is racy, in particular in the case that the | |
125 | * time event actually completed in the firmware (which is handled | |
126 | * in iwl_mvm_te_handle_notif). | |
127 | */ | |
128 | clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); | |
9f45c36d | 129 | iwl_mvm_unref(mvm, IWL_MVM_REF_ROC); |
8ca151b5 JB |
130 | |
131 | /* | |
132 | * Of course, our status bit is just as racy as mac80211, so in | |
133 | * addition, fire off the work struct which will drop all frames | |
134 | * from the hardware queues that made it through the race. First | |
135 | * it will of course synchronize the TX path to make sure that | |
136 | * any *new* TX will be rejected. | |
137 | */ | |
138 | schedule_work(&mvm->roc_done_wk); | |
139 | } | |
140 | ||
7f0a7c67 AO |
141 | static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm) |
142 | { | |
143 | struct ieee80211_vif *csa_vif; | |
144 | ||
145 | rcu_read_lock(); | |
146 | ||
147 | csa_vif = rcu_dereference(mvm->csa_vif); | |
148 | if (!csa_vif || !csa_vif->csa_active) | |
149 | goto out_unlock; | |
150 | ||
151 | IWL_DEBUG_TE(mvm, "CSA NOA started\n"); | |
152 | ||
153 | /* | |
154 | * CSA NoA is started but we still have beacons to | |
155 | * transmit on the current channel. | |
156 | * So we just do nothing here and the switch | |
157 | * will be performed on the last TBTT. | |
158 | */ | |
159 | if (!ieee80211_csa_is_complete(csa_vif)) { | |
160 | IWL_WARN(mvm, "CSA NOA started too early\n"); | |
161 | goto out_unlock; | |
162 | } | |
163 | ||
164 | ieee80211_csa_finish(csa_vif); | |
165 | ||
166 | rcu_read_unlock(); | |
167 | ||
168 | RCU_INIT_POINTER(mvm->csa_vif, NULL); | |
169 | ||
170 | return; | |
171 | ||
172 | out_unlock: | |
173 | rcu_read_unlock(); | |
174 | } | |
175 | ||
05739794 JB |
176 | static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, |
177 | struct ieee80211_vif *vif, | |
178 | const char *errmsg) | |
179 | { | |
180 | if (vif->type != NL80211_IFTYPE_STATION) | |
181 | return false; | |
182 | if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) | |
183 | return false; | |
184 | if (errmsg) | |
185 | IWL_ERR(mvm, "%s\n", errmsg); | |
186 | ieee80211_connection_loss(vif); | |
187 | return true; | |
188 | } | |
189 | ||
8ca151b5 JB |
190 | /* |
191 | * Handles a FW notification for an event that is known to the driver. | |
192 | * | |
193 | * @mvm: the mvm component | |
194 | * @te_data: the time event data | |
195 | * @notif: the notification data corresponding the time event data. | |
196 | */ | |
197 | static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, | |
198 | struct iwl_mvm_time_event_data *te_data, | |
199 | struct iwl_time_event_notif *notif) | |
200 | { | |
201 | lockdep_assert_held(&mvm->time_event_lock); | |
202 | ||
203 | IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n", | |
204 | le32_to_cpu(notif->unique_id), | |
205 | le32_to_cpu(notif->action)); | |
206 | ||
207 | /* | |
208 | * The FW sends the start/end time event notifications even for events | |
209 | * that it fails to schedule. This is indicated in the status field of | |
210 | * the notification. This happens in cases that the scheduler cannot | |
211 | * find a schedule that can handle the event (for example requesting a | |
212 | * P2P Device discoveribility, while there are other higher priority | |
213 | * events in the system). | |
214 | */ | |
9fc3fe96 EG |
215 | if (!le32_to_cpu(notif->status)) { |
216 | bool start = le32_to_cpu(notif->action) & | |
217 | TE_V2_NOTIF_HOST_EVENT_START; | |
218 | IWL_WARN(mvm, "Time Event %s notification failure\n", | |
219 | start ? "start" : "end"); | |
05739794 JB |
220 | if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { |
221 | iwl_mvm_te_clear_data(mvm, te_data); | |
222 | return; | |
223 | } | |
224 | } | |
8ca151b5 | 225 | |
f8f03c3e | 226 | if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) { |
8ca151b5 JB |
227 | IWL_DEBUG_TE(mvm, |
228 | "TE ended - current time %lu, estimated end %lu\n", | |
229 | jiffies, te_data->end_jiffies); | |
230 | ||
231 | if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { | |
232 | ieee80211_remain_on_channel_expired(mvm->hw); | |
233 | iwl_mvm_roc_finished(mvm); | |
234 | } | |
235 | ||
236 | /* | |
237 | * By now, we should have finished association | |
238 | * and know the dtim period. | |
239 | */ | |
05739794 | 240 | iwl_mvm_te_check_disconnect(mvm, te_data->vif, |
2e515bf0 | 241 | "No association and the time event is over already..."); |
8ca151b5 | 242 | iwl_mvm_te_clear_data(mvm, te_data); |
f8f03c3e | 243 | } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) { |
8ca151b5 | 244 | te_data->running = true; |
e7f1935c | 245 | te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); |
8ca151b5 JB |
246 | |
247 | if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { | |
248 | set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); | |
9f45c36d | 249 | iwl_mvm_ref(mvm, IWL_MVM_REF_ROC); |
8ca151b5 | 250 | ieee80211_ready_on_channel(mvm->hw); |
7f0a7c67 AO |
251 | } else if (te_data->vif->type == NL80211_IFTYPE_AP) { |
252 | if (le32_to_cpu(notif->status)) | |
253 | iwl_mvm_csa_noa_start(mvm); | |
254 | else | |
255 | IWL_DEBUG_TE(mvm, "CSA NOA failed to start\n"); | |
256 | ||
257 | /* we don't need it anymore */ | |
258 | iwl_mvm_te_clear_data(mvm, te_data); | |
8ca151b5 JB |
259 | } |
260 | } else { | |
261 | IWL_WARN(mvm, "Got TE with unknown action\n"); | |
262 | } | |
263 | } | |
264 | ||
265 | /* | |
266 | * The Rx handler for time event notifications | |
267 | */ | |
268 | int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, | |
269 | struct iwl_rx_cmd_buffer *rxb, | |
270 | struct iwl_device_cmd *cmd) | |
271 | { | |
272 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
273 | struct iwl_time_event_notif *notif = (void *)pkt->data; | |
274 | struct iwl_mvm_time_event_data *te_data, *tmp; | |
275 | ||
276 | IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n", | |
277 | le32_to_cpu(notif->unique_id), | |
278 | le32_to_cpu(notif->action)); | |
279 | ||
280 | spin_lock_bh(&mvm->time_event_lock); | |
281 | list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) { | |
282 | if (le32_to_cpu(notif->unique_id) == te_data->uid) | |
283 | iwl_mvm_te_handle_notif(mvm, te_data, notif); | |
284 | } | |
285 | spin_unlock_bh(&mvm->time_event_lock); | |
286 | ||
287 | return 0; | |
288 | } | |
289 | ||
ffdf968d JB |
290 | static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait, |
291 | struct iwl_rx_packet *pkt, void *data) | |
8ca151b5 JB |
292 | { |
293 | struct iwl_mvm *mvm = | |
294 | container_of(notif_wait, struct iwl_mvm, notif_wait); | |
295 | struct iwl_mvm_time_event_data *te_data = data; | |
8ca151b5 | 296 | struct iwl_time_event_resp *resp; |
65b30348 | 297 | int resp_len = iwl_rx_packet_payload_len(pkt); |
8ca151b5 | 298 | |
ffdf968d JB |
299 | if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD)) |
300 | return true; | |
8ca151b5 | 301 | |
65b30348 | 302 | if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { |
ffdf968d JB |
303 | IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n"); |
304 | return true; | |
305 | } | |
8ca151b5 | 306 | |
ffdf968d | 307 | resp = (void *)pkt->data; |
e3722822 JB |
308 | |
309 | /* we should never get a response to another TIME_EVENT_CMD here */ | |
310 | if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id)) | |
311 | return false; | |
312 | ||
ffdf968d JB |
313 | te_data->uid = le32_to_cpu(resp->unique_id); |
314 | IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", | |
315 | te_data->uid); | |
316 | return true; | |
317 | } | |
8ca151b5 | 318 | |
ffdf968d JB |
319 | static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, |
320 | struct ieee80211_vif *vif, | |
321 | struct iwl_mvm_time_event_data *te_data, | |
a373f67c | 322 | struct iwl_time_event_cmd *te_cmd) |
ffdf968d JB |
323 | { |
324 | static const u8 time_event_response[] = { TIME_EVENT_CMD }; | |
325 | struct iwl_notification_wait wait_time_event; | |
326 | int ret; | |
327 | ||
328 | lockdep_assert_held(&mvm->mutex); | |
329 | ||
93630dc3 JB |
330 | IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n", |
331 | le32_to_cpu(te_cmd->duration)); | |
332 | ||
ffdf968d JB |
333 | spin_lock_bh(&mvm->time_event_lock); |
334 | if (WARN_ON(te_data->id != TE_MAX)) { | |
335 | spin_unlock_bh(&mvm->time_event_lock); | |
336 | return -EIO; | |
337 | } | |
338 | te_data->vif = vif; | |
339 | te_data->duration = le32_to_cpu(te_cmd->duration); | |
340 | te_data->id = le32_to_cpu(te_cmd->id); | |
341 | list_add_tail(&te_data->list, &mvm->time_event_list); | |
342 | spin_unlock_bh(&mvm->time_event_lock); | |
343 | ||
344 | /* | |
345 | * Use a notification wait, which really just processes the | |
346 | * command response and doesn't wait for anything, in order | |
347 | * to be able to process the response and get the UID inside | |
348 | * the RX path. Using CMD_WANT_SKB doesn't work because it | |
349 | * stores the buffer and then wakes up this thread, by which | |
350 | * time another notification (that the time event started) | |
351 | * might already be processed unsuccessfully. | |
352 | */ | |
353 | iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, | |
354 | time_event_response, | |
355 | ARRAY_SIZE(time_event_response), | |
356 | iwl_mvm_time_event_response, te_data); | |
357 | ||
a1022927 | 358 | ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, |
a373f67c | 359 | sizeof(*te_cmd), te_cmd); |
ffdf968d JB |
360 | if (ret) { |
361 | IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); | |
362 | iwl_remove_notification(&mvm->notif_wait, &wait_time_event); | |
363 | goto out_clear_te; | |
364 | } | |
8ca151b5 | 365 | |
ffdf968d JB |
366 | /* No need to wait for anything, so just pass 1 (0 isn't valid) */ |
367 | ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); | |
368 | /* should never fail */ | |
369 | WARN_ON_ONCE(ret); | |
370 | ||
371 | if (ret) { | |
372 | out_clear_te: | |
373 | spin_lock_bh(&mvm->time_event_lock); | |
374 | iwl_mvm_te_clear_data(mvm, te_data); | |
375 | spin_unlock_bh(&mvm->time_event_lock); | |
376 | } | |
377 | return ret; | |
8ca151b5 JB |
378 | } |
379 | ||
380 | void iwl_mvm_protect_session(struct iwl_mvm *mvm, | |
381 | struct ieee80211_vif *vif, | |
016d27e1 JB |
382 | u32 duration, u32 min_duration, |
383 | u32 max_delay) | |
8ca151b5 JB |
384 | { |
385 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
386 | struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; | |
a373f67c | 387 | struct iwl_time_event_cmd time_cmd = {}; |
8ca151b5 JB |
388 | |
389 | lockdep_assert_held(&mvm->mutex); | |
390 | ||
391 | if (te_data->running && | |
e7f1935c | 392 | time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { |
8ca151b5 JB |
393 | IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", |
394 | jiffies_to_msecs(te_data->end_jiffies - jiffies)); | |
395 | return; | |
396 | } | |
397 | ||
398 | if (te_data->running) { | |
399 | IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n", | |
400 | te_data->uid, | |
401 | jiffies_to_msecs(te_data->end_jiffies - jiffies)); | |
402 | /* | |
403 | * we don't have enough time | |
404 | * cancel the current TE and issue a new one | |
405 | * Of course it would be better to remove the old one only | |
406 | * when the new one is added, but we don't care if we are off | |
407 | * channel for a bit. All we need to do, is not to return | |
408 | * before we actually begin to be on the channel. | |
409 | */ | |
410 | iwl_mvm_stop_session_protection(mvm, vif); | |
411 | } | |
412 | ||
8ca151b5 JB |
413 | time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); |
414 | time_cmd.id_and_color = | |
415 | cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); | |
416 | time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC); | |
417 | ||
418 | time_cmd.apply_time = | |
419 | cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG)); | |
ffdf968d | 420 | |
f8f03c3e | 421 | time_cmd.max_frags = TE_V2_FRAG_NONE; |
016d27e1 | 422 | time_cmd.max_delay = cpu_to_le32(max_delay); |
8ca151b5 JB |
423 | /* TODO: why do we need to interval = bi if it is not periodic? */ |
424 | time_cmd.interval = cpu_to_le32(1); | |
8ca151b5 | 425 | time_cmd.duration = cpu_to_le32(duration); |
f8f03c3e EL |
426 | time_cmd.repeat = 1; |
427 | time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | | |
1f6bf078 EG |
428 | TE_V2_NOTIF_HOST_EVENT_END | |
429 | T2_V2_START_IMMEDIATELY); | |
8ca151b5 | 430 | |
ffdf968d | 431 | iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); |
8ca151b5 JB |
432 | } |
433 | ||
434 | /* | |
435 | * Explicit request to remove a time event. The removal of a time event needs to | |
436 | * be synchronized with the flow of a time event's end notification, which also | |
437 | * removes the time event from the op mode data structures. | |
438 | */ | |
439 | void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, | |
440 | struct iwl_mvm_vif *mvmvif, | |
441 | struct iwl_mvm_time_event_data *te_data) | |
442 | { | |
a373f67c | 443 | struct iwl_time_event_cmd time_cmd = {}; |
8ca151b5 JB |
444 | u32 id, uid; |
445 | int ret; | |
446 | ||
447 | /* | |
448 | * It is possible that by the time we got to this point the time | |
449 | * event was already removed. | |
450 | */ | |
451 | spin_lock_bh(&mvm->time_event_lock); | |
452 | ||
453 | /* Save time event uid before clearing its data */ | |
454 | uid = te_data->uid; | |
455 | id = te_data->id; | |
456 | ||
457 | /* | |
458 | * The clear_data function handles time events that were already removed | |
459 | */ | |
460 | iwl_mvm_te_clear_data(mvm, te_data); | |
461 | spin_unlock_bh(&mvm->time_event_lock); | |
462 | ||
463 | /* | |
464 | * It is possible that by the time we try to remove it, the time event | |
465 | * has already ended and removed. In such a case there is no need to | |
466 | * send a removal command. | |
467 | */ | |
468 | if (id == TE_MAX) { | |
469 | IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", uid); | |
470 | return; | |
471 | } | |
472 | ||
473 | /* When we remove a TE, the UID is to be set in the id field */ | |
474 | time_cmd.id = cpu_to_le32(uid); | |
475 | time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); | |
476 | time_cmd.id_and_color = | |
477 | cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); | |
478 | ||
479 | IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); | |
a1022927 | 480 | ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, |
a373f67c | 481 | sizeof(time_cmd), &time_cmd); |
8ca151b5 JB |
482 | if (WARN_ON(ret)) |
483 | return; | |
484 | } | |
485 | ||
486 | void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, | |
487 | struct ieee80211_vif *vif) | |
488 | { | |
489 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
490 | struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; | |
491 | ||
492 | lockdep_assert_held(&mvm->mutex); | |
493 | iwl_mvm_remove_time_event(mvm, mvmvif, te_data); | |
494 | } | |
495 | ||
8ca151b5 | 496 | int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
e635c797 | 497 | int duration, enum ieee80211_roc_type type) |
8ca151b5 JB |
498 | { |
499 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
500 | struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; | |
a373f67c | 501 | struct iwl_time_event_cmd time_cmd = {}; |
8ca151b5 JB |
502 | |
503 | lockdep_assert_held(&mvm->mutex); | |
504 | if (te_data->running) { | |
505 | IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n"); | |
506 | return -EBUSY; | |
507 | } | |
508 | ||
509 | /* | |
510 | * Flush the done work, just in case it's still pending, so that | |
511 | * the work it does can complete and we can accept new frames. | |
512 | */ | |
513 | flush_work(&mvm->roc_done_wk); | |
514 | ||
8ca151b5 JB |
515 | time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); |
516 | time_cmd.id_and_color = | |
517 | cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); | |
e635c797 IP |
518 | |
519 | switch (type) { | |
520 | case IEEE80211_ROC_TYPE_NORMAL: | |
521 | time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL); | |
522 | break; | |
523 | case IEEE80211_ROC_TYPE_MGMT_TX: | |
524 | time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX); | |
525 | break; | |
526 | default: | |
527 | WARN_ONCE(1, "Got an invalid ROC type\n"); | |
528 | return -EINVAL; | |
529 | } | |
8ca151b5 JB |
530 | |
531 | time_cmd.apply_time = cpu_to_le32(0); | |
8ca151b5 JB |
532 | time_cmd.interval = cpu_to_le32(1); |
533 | ||
534 | /* | |
e635c797 | 535 | * The P2P Device TEs can have lower priority than other events |
8ca151b5 | 536 | * that are being scheduled by the driver/fw, and thus it might not be |
e635c797 IP |
537 | * scheduled. To improve the chances of it being scheduled, allow them |
538 | * to be fragmented, and in addition allow them to be delayed. | |
8ca151b5 | 539 | */ |
f8f03c3e | 540 | time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS); |
8ca151b5 JB |
541 | time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); |
542 | time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); | |
f8f03c3e EL |
543 | time_cmd.repeat = 1; |
544 | time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | | |
1f6bf078 EG |
545 | TE_V2_NOTIF_HOST_EVENT_END | |
546 | T2_V2_START_IMMEDIATELY); | |
8ca151b5 | 547 | |
ffdf968d | 548 | return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); |
8ca151b5 JB |
549 | } |
550 | ||
551 | void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm) | |
552 | { | |
553 | struct iwl_mvm_vif *mvmvif; | |
554 | struct iwl_mvm_time_event_data *te_data; | |
555 | ||
556 | lockdep_assert_held(&mvm->mutex); | |
557 | ||
558 | /* | |
559 | * Iterate over the list of time events and find the time event that is | |
560 | * associated with a P2P_DEVICE interface. | |
561 | * This assumes that a P2P_DEVICE interface can have only a single time | |
562 | * event at any given time and this time event coresponds to a ROC | |
563 | * request | |
564 | */ | |
565 | mvmvif = NULL; | |
566 | spin_lock_bh(&mvm->time_event_lock); | |
567 | list_for_each_entry(te_data, &mvm->time_event_list, list) { | |
568 | if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { | |
569 | mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); | |
570 | break; | |
571 | } | |
572 | } | |
573 | spin_unlock_bh(&mvm->time_event_lock); | |
574 | ||
575 | if (!mvmvif) { | |
576 | IWL_WARN(mvm, "P2P_DEVICE no remain on channel event\n"); | |
577 | return; | |
578 | } | |
579 | ||
580 | iwl_mvm_remove_time_event(mvm, mvmvif, te_data); | |
581 | ||
582 | iwl_mvm_roc_finished(mvm); | |
583 | } | |
7f0a7c67 AO |
584 | |
585 | int iwl_mvm_schedule_csa_noa(struct iwl_mvm *mvm, | |
586 | struct ieee80211_vif *vif, | |
587 | u32 duration, u32 apply_time) | |
588 | { | |
589 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
590 | struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; | |
591 | struct iwl_time_event_cmd time_cmd = {}; | |
592 | ||
593 | lockdep_assert_held(&mvm->mutex); | |
594 | ||
595 | if (te_data->running) { | |
596 | IWL_DEBUG_TE(mvm, "CS NOA is already scheduled\n"); | |
597 | return -EBUSY; | |
598 | } | |
599 | ||
600 | time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); | |
601 | time_cmd.id_and_color = | |
602 | cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); | |
603 | time_cmd.id = cpu_to_le32(TE_P2P_GO_CSA_NOA); | |
604 | time_cmd.apply_time = cpu_to_le32(apply_time); | |
605 | time_cmd.max_frags = TE_V2_FRAG_NONE; | |
606 | time_cmd.duration = cpu_to_le32(duration); | |
607 | time_cmd.repeat = 1; | |
608 | time_cmd.interval = cpu_to_le32(1); | |
609 | time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | | |
610 | TE_V2_ABSENCE); | |
611 | ||
612 | return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); | |
613 | } |