]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/iwlwifi/mvm/time-event.c
Merge branches 'debug-choice', 'devel-stable' and 'misc' into for-linus
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / iwlwifi / mvm / time-event.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64 #include <linux/jiffies.h>
65 #include <net/mac80211.h>
66
67 #include "iwl-notif-wait.h"
68 #include "iwl-trans.h"
69 #include "fw-api.h"
70 #include "time-event.h"
71 #include "mvm.h"
72 #include "iwl-io.h"
73 #include "iwl-prph.h"
74
75 /* A TimeUnit is 1024 microsecond */
76 #define TU_TO_JIFFIES(_tu) (usecs_to_jiffies((_tu) * 1024))
77 #define MSEC_TO_TU(_msec) (_msec*1000/1024)
78
79 /*
80 * For the high priority TE use a time event type that has similar priority to
81 * the FW's action scan priority.
82 */
83 #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
84 #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
85
86 void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
87 struct iwl_mvm_time_event_data *te_data)
88 {
89 lockdep_assert_held(&mvm->time_event_lock);
90
91 if (te_data->id == TE_MAX)
92 return;
93
94 list_del(&te_data->list);
95 te_data->running = false;
96 te_data->uid = 0;
97 te_data->id = TE_MAX;
98 te_data->vif = NULL;
99 }
100
101 void iwl_mvm_roc_done_wk(struct work_struct *wk)
102 {
103 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
104
105 synchronize_net();
106
107 /*
108 * Flush the offchannel queue -- this is called when the time
109 * event finishes or is cancelled, so that frames queued for it
110 * won't get stuck on the queue and be transmitted in the next
111 * time event.
112 * We have to send the command asynchronously since this cannot
113 * be under the mutex for locking reasons, but that's not an
114 * issue as it will have to complete before the next command is
115 * executed, and a new time event means a new command.
116 */
117 iwl_mvm_flush_tx_path(mvm, BIT(IWL_MVM_OFFCHANNEL_QUEUE), false);
118 }
119
120 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
121 {
122 /*
123 * First, clear the ROC_RUNNING status bit. This will cause the TX
124 * path to drop offchannel transmissions. That would also be done
125 * by mac80211, but it is racy, in particular in the case that the
126 * time event actually completed in the firmware (which is handled
127 * in iwl_mvm_te_handle_notif).
128 */
129 clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
130
131 /*
132 * Of course, our status bit is just as racy as mac80211, so in
133 * addition, fire off the work struct which will drop all frames
134 * from the hardware queues that made it through the race. First
135 * it will of course synchronize the TX path to make sure that
136 * any *new* TX will be rejected.
137 */
138 schedule_work(&mvm->roc_done_wk);
139 }
140
141 static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
142 struct ieee80211_vif *vif,
143 const char *errmsg)
144 {
145 if (vif->type != NL80211_IFTYPE_STATION)
146 return false;
147 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
148 return false;
149 if (errmsg)
150 IWL_ERR(mvm, "%s\n", errmsg);
151 ieee80211_connection_loss(vif);
152 return true;
153 }
154
155 /*
156 * Handles a FW notification for an event that is known to the driver.
157 *
158 * @mvm: the mvm component
159 * @te_data: the time event data
160 * @notif: the notification data corresponding the time event data.
161 */
162 static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
163 struct iwl_mvm_time_event_data *te_data,
164 struct iwl_time_event_notif *notif)
165 {
166 lockdep_assert_held(&mvm->time_event_lock);
167
168 IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
169 le32_to_cpu(notif->unique_id),
170 le32_to_cpu(notif->action));
171
172 /*
173 * The FW sends the start/end time event notifications even for events
174 * that it fails to schedule. This is indicated in the status field of
175 * the notification. This happens in cases that the scheduler cannot
176 * find a schedule that can handle the event (for example requesting a
177 * P2P Device discoveribility, while there are other higher priority
178 * events in the system).
179 */
180 if (WARN_ONCE(!le32_to_cpu(notif->status),
181 "Failed to schedule time event\n")) {
182 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
183 iwl_mvm_te_clear_data(mvm, te_data);
184 return;
185 }
186 }
187
188 if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
189 IWL_DEBUG_TE(mvm,
190 "TE ended - current time %lu, estimated end %lu\n",
191 jiffies, te_data->end_jiffies);
192
193 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
194 ieee80211_remain_on_channel_expired(mvm->hw);
195 iwl_mvm_roc_finished(mvm);
196 }
197
198 /*
199 * By now, we should have finished association
200 * and know the dtim period.
201 */
202 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
203 "No assocation and the time event is over already...");
204 iwl_mvm_te_clear_data(mvm, te_data);
205 } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
206 te_data->running = true;
207 te_data->end_jiffies = jiffies +
208 TU_TO_JIFFIES(te_data->duration);
209
210 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
211 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
212 ieee80211_ready_on_channel(mvm->hw);
213 }
214 } else {
215 IWL_WARN(mvm, "Got TE with unknown action\n");
216 }
217 }
218
219 /*
220 * The Rx handler for time event notifications
221 */
222 int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
223 struct iwl_rx_cmd_buffer *rxb,
224 struct iwl_device_cmd *cmd)
225 {
226 struct iwl_rx_packet *pkt = rxb_addr(rxb);
227 struct iwl_time_event_notif *notif = (void *)pkt->data;
228 struct iwl_mvm_time_event_data *te_data, *tmp;
229
230 IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
231 le32_to_cpu(notif->unique_id),
232 le32_to_cpu(notif->action));
233
234 spin_lock_bh(&mvm->time_event_lock);
235 list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
236 if (le32_to_cpu(notif->unique_id) == te_data->uid)
237 iwl_mvm_te_handle_notif(mvm, te_data, notif);
238 }
239 spin_unlock_bh(&mvm->time_event_lock);
240
241 return 0;
242 }
243
244 static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
245 struct iwl_rx_packet *pkt, void *data)
246 {
247 struct iwl_mvm *mvm =
248 container_of(notif_wait, struct iwl_mvm, notif_wait);
249 struct iwl_mvm_time_event_data *te_data = data;
250 struct iwl_time_event_resp *resp;
251 int resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
252
253 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
254 return true;
255
256 if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
257 IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
258 return true;
259 }
260
261 resp = (void *)pkt->data;
262
263 /* we should never get a response to another TIME_EVENT_CMD here */
264 if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
265 return false;
266
267 te_data->uid = le32_to_cpu(resp->unique_id);
268 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
269 te_data->uid);
270 return true;
271 }
272
273 static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
274 struct ieee80211_vif *vif,
275 struct iwl_mvm_time_event_data *te_data,
276 struct iwl_time_event_cmd *te_cmd)
277 {
278 static const u8 time_event_response[] = { TIME_EVENT_CMD };
279 struct iwl_notification_wait wait_time_event;
280 int ret;
281
282 lockdep_assert_held(&mvm->mutex);
283
284 IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
285 le32_to_cpu(te_cmd->duration));
286
287 spin_lock_bh(&mvm->time_event_lock);
288 if (WARN_ON(te_data->id != TE_MAX)) {
289 spin_unlock_bh(&mvm->time_event_lock);
290 return -EIO;
291 }
292 te_data->vif = vif;
293 te_data->duration = le32_to_cpu(te_cmd->duration);
294 te_data->id = le32_to_cpu(te_cmd->id);
295 list_add_tail(&te_data->list, &mvm->time_event_list);
296 spin_unlock_bh(&mvm->time_event_lock);
297
298 /*
299 * Use a notification wait, which really just processes the
300 * command response and doesn't wait for anything, in order
301 * to be able to process the response and get the UID inside
302 * the RX path. Using CMD_WANT_SKB doesn't work because it
303 * stores the buffer and then wakes up this thread, by which
304 * time another notification (that the time event started)
305 * might already be processed unsuccessfully.
306 */
307 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
308 time_event_response,
309 ARRAY_SIZE(time_event_response),
310 iwl_mvm_time_event_response, te_data);
311
312 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
313 sizeof(*te_cmd), te_cmd);
314 if (ret) {
315 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
316 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
317 goto out_clear_te;
318 }
319
320 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
321 ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
322 /* should never fail */
323 WARN_ON_ONCE(ret);
324
325 if (ret) {
326 out_clear_te:
327 spin_lock_bh(&mvm->time_event_lock);
328 iwl_mvm_te_clear_data(mvm, te_data);
329 spin_unlock_bh(&mvm->time_event_lock);
330 }
331 return ret;
332 }
333
334 void iwl_mvm_protect_session(struct iwl_mvm *mvm,
335 struct ieee80211_vif *vif,
336 u32 duration, u32 min_duration)
337 {
338 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
339 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
340 struct iwl_time_event_cmd time_cmd = {};
341
342 lockdep_assert_held(&mvm->mutex);
343
344 if (te_data->running &&
345 time_after(te_data->end_jiffies,
346 jiffies + TU_TO_JIFFIES(min_duration))) {
347 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
348 jiffies_to_msecs(te_data->end_jiffies - jiffies));
349 return;
350 }
351
352 if (te_data->running) {
353 IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
354 te_data->uid,
355 jiffies_to_msecs(te_data->end_jiffies - jiffies));
356 /*
357 * we don't have enough time
358 * cancel the current TE and issue a new one
359 * Of course it would be better to remove the old one only
360 * when the new one is added, but we don't care if we are off
361 * channel for a bit. All we need to do, is not to return
362 * before we actually begin to be on the channel.
363 */
364 iwl_mvm_stop_session_protection(mvm, vif);
365 }
366
367 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
368 time_cmd.id_and_color =
369 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
370 time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
371
372 time_cmd.apply_time =
373 cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
374
375 time_cmd.dep_policy = TE_INDEPENDENT;
376 time_cmd.is_present = cpu_to_le32(1);
377 time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE);
378 time_cmd.max_delay = cpu_to_le32(500);
379 /* TODO: why do we need to interval = bi if it is not periodic? */
380 time_cmd.interval = cpu_to_le32(1);
381 time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1));
382 time_cmd.duration = cpu_to_le32(duration);
383 time_cmd.repeat = cpu_to_le32(1);
384 time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
385 TE_NOTIF_HOST_EVENT_END);
386
387 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
388 }
389
390 /*
391 * Explicit request to remove a time event. The removal of a time event needs to
392 * be synchronized with the flow of a time event's end notification, which also
393 * removes the time event from the op mode data structures.
394 */
395 void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
396 struct iwl_mvm_vif *mvmvif,
397 struct iwl_mvm_time_event_data *te_data)
398 {
399 struct iwl_time_event_cmd time_cmd = {};
400 u32 id, uid;
401 int ret;
402
403 /*
404 * It is possible that by the time we got to this point the time
405 * event was already removed.
406 */
407 spin_lock_bh(&mvm->time_event_lock);
408
409 /* Save time event uid before clearing its data */
410 uid = te_data->uid;
411 id = te_data->id;
412
413 /*
414 * The clear_data function handles time events that were already removed
415 */
416 iwl_mvm_te_clear_data(mvm, te_data);
417 spin_unlock_bh(&mvm->time_event_lock);
418
419 /*
420 * It is possible that by the time we try to remove it, the time event
421 * has already ended and removed. In such a case there is no need to
422 * send a removal command.
423 */
424 if (id == TE_MAX) {
425 IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", uid);
426 return;
427 }
428
429 /* When we remove a TE, the UID is to be set in the id field */
430 time_cmd.id = cpu_to_le32(uid);
431 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
432 time_cmd.id_and_color =
433 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
434
435 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
436 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
437 sizeof(time_cmd), &time_cmd);
438 if (WARN_ON(ret))
439 return;
440 }
441
442 void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
443 struct ieee80211_vif *vif)
444 {
445 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
446 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
447
448 lockdep_assert_held(&mvm->mutex);
449 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
450 }
451
452 int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
453 int duration, enum ieee80211_roc_type type)
454 {
455 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
456 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
457 struct iwl_time_event_cmd time_cmd = {};
458
459 lockdep_assert_held(&mvm->mutex);
460 if (te_data->running) {
461 IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
462 return -EBUSY;
463 }
464
465 /*
466 * Flush the done work, just in case it's still pending, so that
467 * the work it does can complete and we can accept new frames.
468 */
469 flush_work(&mvm->roc_done_wk);
470
471 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
472 time_cmd.id_and_color =
473 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
474
475 switch (type) {
476 case IEEE80211_ROC_TYPE_NORMAL:
477 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
478 break;
479 case IEEE80211_ROC_TYPE_MGMT_TX:
480 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
481 break;
482 default:
483 WARN_ONCE(1, "Got an invalid ROC type\n");
484 return -EINVAL;
485 }
486
487 time_cmd.apply_time = cpu_to_le32(0);
488 time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
489 time_cmd.is_present = cpu_to_le32(1);
490 time_cmd.interval = cpu_to_le32(1);
491
492 /*
493 * The P2P Device TEs can have lower priority than other events
494 * that are being scheduled by the driver/fw, and thus it might not be
495 * scheduled. To improve the chances of it being scheduled, allow them
496 * to be fragmented, and in addition allow them to be delayed.
497 */
498 time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20);
499 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
500 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
501 time_cmd.repeat = cpu_to_le32(1);
502 time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
503 TE_NOTIF_HOST_EVENT_END);
504
505 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
506 }
507
508 void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)
509 {
510 struct iwl_mvm_vif *mvmvif;
511 struct iwl_mvm_time_event_data *te_data;
512
513 lockdep_assert_held(&mvm->mutex);
514
515 /*
516 * Iterate over the list of time events and find the time event that is
517 * associated with a P2P_DEVICE interface.
518 * This assumes that a P2P_DEVICE interface can have only a single time
519 * event at any given time and this time event coresponds to a ROC
520 * request
521 */
522 mvmvif = NULL;
523 spin_lock_bh(&mvm->time_event_lock);
524 list_for_each_entry(te_data, &mvm->time_event_list, list) {
525 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
526 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
527 break;
528 }
529 }
530 spin_unlock_bh(&mvm->time_event_lock);
531
532 if (!mvmvif) {
533 IWL_WARN(mvm, "P2P_DEVICE no remain on channel event\n");
534 return;
535 }
536
537 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
538
539 iwl_mvm_roc_finished(mvm);
540 }