]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/wireless/iwlwifi/mvm/rs.c
iwlwifi: don't disable SCD chain extension on newer devices
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / iwlwifi / mvm / rs.c
1 /******************************************************************************
2 *
3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26 #include <linux/kernel.h>
27 #include <linux/skbuff.h>
28 #include <linux/slab.h>
29 #include <net/mac80211.h>
30
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/delay.h>
34
35 #include <linux/workqueue.h>
36 #include "rs.h"
37 #include "fw-api.h"
38 #include "sta.h"
39 #include "iwl-op-mode.h"
40 #include "mvm.h"
41
42 #define RS_NAME "iwl-mvm-rs"
43
44 #define NUM_TRY_BEFORE_ANT_TOGGLE 1
45 #define RS_LEGACY_RETRIES_PER_RATE 1
46 #define RS_HT_VHT_RETRIES_PER_RATE 2
47 #define RS_HT_VHT_RETRIES_PER_RATE_TW 1
48 #define RS_INITIAL_MIMO_NUM_RATES 3
49 #define RS_INITIAL_SISO_NUM_RATES 3
50 #define RS_INITIAL_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
51 #define RS_SECONDARY_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
52 #define RS_SECONDARY_SISO_NUM_RATES 3
53 #define RS_SECONDARY_SISO_RETRIES 1
54
55 #define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
56 #define IWL_RATE_MIN_FAILURE_TH 3 /* min failures to calc tpt */
57 #define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
58
59 /* max allowed rate miss before sync LQ cmd */
60 #define IWL_MISSED_RATE_MAX 15
61 #define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ)
62
63
64 static u8 rs_ht_to_legacy[] = {
65 [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
66 [IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX,
67 [IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX,
68 [IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX,
69 [IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX,
70 [IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX,
71 [IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX,
72 [IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX,
73 [IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX,
74 [IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX,
75 };
76
77 static const u8 ant_toggle_lookup[] = {
78 [ANT_NONE] = ANT_NONE,
79 [ANT_A] = ANT_B,
80 [ANT_B] = ANT_C,
81 [ANT_AB] = ANT_BC,
82 [ANT_C] = ANT_A,
83 [ANT_AC] = ANT_AB,
84 [ANT_BC] = ANT_AC,
85 [ANT_ABC] = ANT_ABC,
86 };
87
88 #define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
89 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
90 IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
91 IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
92 IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
93 IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
94 IWL_RATE_##rp##M_INDEX, \
95 IWL_RATE_##rn##M_INDEX }
96
97 #define IWL_DECLARE_MCS_RATE(s) \
98 [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP, \
99 IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
100 IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
101 IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
102 IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
103 IWL_RATE_INVM_INDEX, \
104 IWL_RATE_INVM_INDEX }
105
106 /*
107 * Parameter order:
108 * rate, ht rate, prev rate, next rate
109 *
110 * If there isn't a valid next or previous rate then INV is used which
111 * maps to IWL_RATE_INVALID
112 *
113 */
114 static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
115 IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */
116 IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
117 IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
118 IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
119 IWL_DECLARE_RATE_INFO(6, 0, 5, 11), /* 6mbps ; MCS 0 */
120 IWL_DECLARE_RATE_INFO(9, INV, 6, 11), /* 9mbps */
121 IWL_DECLARE_RATE_INFO(12, 1, 11, 18), /* 12mbps ; MCS 1 */
122 IWL_DECLARE_RATE_INFO(18, 2, 12, 24), /* 18mbps ; MCS 2 */
123 IWL_DECLARE_RATE_INFO(24, 3, 18, 36), /* 24mbps ; MCS 3 */
124 IWL_DECLARE_RATE_INFO(36, 4, 24, 48), /* 36mbps ; MCS 4 */
125 IWL_DECLARE_RATE_INFO(48, 5, 36, 54), /* 48mbps ; MCS 5 */
126 IWL_DECLARE_RATE_INFO(54, 6, 48, INV), /* 54mbps ; MCS 6 */
127 IWL_DECLARE_MCS_RATE(7), /* MCS 7 */
128 IWL_DECLARE_MCS_RATE(8), /* MCS 8 */
129 IWL_DECLARE_MCS_RATE(9), /* MCS 9 */
130 };
131
132 enum rs_action {
133 RS_ACTION_STAY = 0,
134 RS_ACTION_DOWNSCALE = -1,
135 RS_ACTION_UPSCALE = 1,
136 };
137
138 enum rs_column_mode {
139 RS_INVALID = 0,
140 RS_LEGACY,
141 RS_SISO,
142 RS_MIMO2,
143 };
144
145 #define MAX_NEXT_COLUMNS 5
146 #define MAX_COLUMN_CHECKS 3
147
148 typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
149 struct ieee80211_sta *sta,
150 struct iwl_scale_tbl_info *tbl);
151
152 struct rs_tx_column {
153 enum rs_column_mode mode;
154 u8 ant;
155 bool sgi;
156 enum rs_column next_columns[MAX_NEXT_COLUMNS];
157 allow_column_func_t checks[MAX_COLUMN_CHECKS];
158 };
159
160 static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
161 struct iwl_scale_tbl_info *tbl)
162 {
163 if (!sta->ht_cap.ht_supported)
164 return false;
165
166 if (sta->smps_mode == IEEE80211_SMPS_STATIC)
167 return false;
168
169 if (num_of_ant(mvm->fw->valid_tx_ant) < 2)
170 return false;
171
172 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
173 return false;
174
175 return true;
176 }
177
178 static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
179 struct iwl_scale_tbl_info *tbl)
180 {
181 if (!sta->ht_cap.ht_supported)
182 return false;
183
184 return true;
185 }
186
187 static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
188 struct iwl_scale_tbl_info *tbl)
189 {
190 struct rs_rate *rate = &tbl->rate;
191 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
192 struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
193
194 if (is_ht20(rate) && (ht_cap->cap &
195 IEEE80211_HT_CAP_SGI_20))
196 return true;
197 if (is_ht40(rate) && (ht_cap->cap &
198 IEEE80211_HT_CAP_SGI_40))
199 return true;
200 if (is_ht80(rate) && (vht_cap->cap &
201 IEEE80211_VHT_CAP_SHORT_GI_80))
202 return true;
203
204 return false;
205 }
206
207 static const struct rs_tx_column rs_tx_columns[] = {
208 [RS_COLUMN_LEGACY_ANT_A] = {
209 .mode = RS_LEGACY,
210 .ant = ANT_A,
211 .next_columns = {
212 RS_COLUMN_LEGACY_ANT_B,
213 RS_COLUMN_SISO_ANT_A,
214 RS_COLUMN_SISO_ANT_B,
215 RS_COLUMN_MIMO2,
216 RS_COLUMN_MIMO2_SGI,
217 },
218 },
219 [RS_COLUMN_LEGACY_ANT_B] = {
220 .mode = RS_LEGACY,
221 .ant = ANT_B,
222 .next_columns = {
223 RS_COLUMN_LEGACY_ANT_A,
224 RS_COLUMN_SISO_ANT_A,
225 RS_COLUMN_SISO_ANT_B,
226 RS_COLUMN_MIMO2,
227 RS_COLUMN_MIMO2_SGI,
228 },
229 },
230 [RS_COLUMN_SISO_ANT_A] = {
231 .mode = RS_SISO,
232 .ant = ANT_A,
233 .next_columns = {
234 RS_COLUMN_SISO_ANT_B,
235 RS_COLUMN_MIMO2,
236 RS_COLUMN_SISO_ANT_A_SGI,
237 RS_COLUMN_SISO_ANT_B_SGI,
238 RS_COLUMN_MIMO2_SGI,
239 },
240 .checks = {
241 rs_siso_allow,
242 },
243 },
244 [RS_COLUMN_SISO_ANT_B] = {
245 .mode = RS_SISO,
246 .ant = ANT_B,
247 .next_columns = {
248 RS_COLUMN_SISO_ANT_A,
249 RS_COLUMN_MIMO2,
250 RS_COLUMN_SISO_ANT_B_SGI,
251 RS_COLUMN_SISO_ANT_A_SGI,
252 RS_COLUMN_MIMO2_SGI,
253 },
254 .checks = {
255 rs_siso_allow,
256 },
257 },
258 [RS_COLUMN_SISO_ANT_A_SGI] = {
259 .mode = RS_SISO,
260 .ant = ANT_A,
261 .sgi = true,
262 .next_columns = {
263 RS_COLUMN_SISO_ANT_B_SGI,
264 RS_COLUMN_MIMO2_SGI,
265 RS_COLUMN_SISO_ANT_A,
266 RS_COLUMN_SISO_ANT_B,
267 RS_COLUMN_MIMO2,
268 },
269 .checks = {
270 rs_siso_allow,
271 rs_sgi_allow,
272 },
273 },
274 [RS_COLUMN_SISO_ANT_B_SGI] = {
275 .mode = RS_SISO,
276 .ant = ANT_B,
277 .sgi = true,
278 .next_columns = {
279 RS_COLUMN_SISO_ANT_A_SGI,
280 RS_COLUMN_MIMO2_SGI,
281 RS_COLUMN_SISO_ANT_B,
282 RS_COLUMN_SISO_ANT_A,
283 RS_COLUMN_MIMO2,
284 },
285 .checks = {
286 rs_siso_allow,
287 rs_sgi_allow,
288 },
289 },
290 [RS_COLUMN_MIMO2] = {
291 .mode = RS_MIMO2,
292 .ant = ANT_AB,
293 .next_columns = {
294 RS_COLUMN_SISO_ANT_A,
295 RS_COLUMN_SISO_ANT_B,
296 RS_COLUMN_SISO_ANT_A_SGI,
297 RS_COLUMN_SISO_ANT_B_SGI,
298 RS_COLUMN_MIMO2_SGI,
299 },
300 .checks = {
301 rs_mimo_allow,
302 },
303 },
304 [RS_COLUMN_MIMO2_SGI] = {
305 .mode = RS_MIMO2,
306 .ant = ANT_AB,
307 .sgi = true,
308 .next_columns = {
309 RS_COLUMN_SISO_ANT_A_SGI,
310 RS_COLUMN_SISO_ANT_B_SGI,
311 RS_COLUMN_SISO_ANT_A,
312 RS_COLUMN_SISO_ANT_B,
313 RS_COLUMN_MIMO2,
314 },
315 .checks = {
316 rs_mimo_allow,
317 rs_sgi_allow,
318 },
319 },
320 };
321
322 static inline u8 rs_extract_rate(u32 rate_n_flags)
323 {
324 /* also works for HT because bits 7:6 are zero there */
325 return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK);
326 }
327
328 static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
329 {
330 int idx = 0;
331
332 if (rate_n_flags & RATE_MCS_HT_MSK) {
333 idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
334 idx += IWL_RATE_MCS_0_INDEX;
335
336 /* skip 9M not supported in HT*/
337 if (idx >= IWL_RATE_9M_INDEX)
338 idx += 1;
339 if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
340 return idx;
341 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
342 idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
343 idx += IWL_RATE_MCS_0_INDEX;
344
345 /* skip 9M not supported in VHT*/
346 if (idx >= IWL_RATE_9M_INDEX)
347 idx++;
348 if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
349 return idx;
350 } else {
351 /* legacy rate format, search for match in table */
352
353 u8 legacy_rate = rs_extract_rate(rate_n_flags);
354 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
355 if (iwl_rates[idx].plcp == legacy_rate)
356 return idx;
357 }
358
359 return IWL_RATE_INVALID;
360 }
361
362 static void rs_rate_scale_perform(struct iwl_mvm *mvm,
363 struct sk_buff *skb,
364 struct ieee80211_sta *sta,
365 struct iwl_lq_sta *lq_sta);
366 static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
367 struct ieee80211_sta *sta,
368 struct iwl_lq_sta *lq_sta,
369 const struct rs_rate *initial_rate);
370 static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
371
372 /**
373 * The following tables contain the expected throughput metrics for all rates
374 *
375 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
376 *
377 * where invalid entries are zeros.
378 *
379 * CCK rates are only valid in legacy table and will only be used in G
380 * (2.4 GHz) band.
381 */
382
383 static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
384 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
385 };
386
387 /* Expected TpT tables. 4 indexes:
388 * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
389 */
390 static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
391 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
392 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
393 {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
394 {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
395 };
396
397 static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
398 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
399 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
400 {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
401 {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
402 };
403
404 static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
405 {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
406 {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
407 {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
408 {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
409 };
410
411 static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
412 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
413 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
414 {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
415 {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
416 };
417
418 static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
419 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
420 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
421 {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
422 {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
423 };
424
425 static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
426 {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
427 {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
428 {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
429 {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
430 };
431
432 /* mbps, mcs */
433 static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
434 { "1", "BPSK DSSS"},
435 { "2", "QPSK DSSS"},
436 {"5.5", "BPSK CCK"},
437 { "11", "QPSK CCK"},
438 { "6", "BPSK 1/2"},
439 { "9", "BPSK 1/2"},
440 { "12", "QPSK 1/2"},
441 { "18", "QPSK 3/4"},
442 { "24", "16QAM 1/2"},
443 { "36", "16QAM 3/4"},
444 { "48", "64QAM 2/3"},
445 { "54", "64QAM 3/4"},
446 { "60", "64QAM 5/6"},
447 };
448
449 #define MCS_INDEX_PER_STREAM (8)
450
451 static const char *rs_pretty_ant(u8 ant)
452 {
453 static const char * const ant_name[] = {
454 [ANT_NONE] = "None",
455 [ANT_A] = "A",
456 [ANT_B] = "B",
457 [ANT_AB] = "AB",
458 [ANT_C] = "C",
459 [ANT_AC] = "AC",
460 [ANT_BC] = "BC",
461 [ANT_ABC] = "ABC",
462 };
463
464 if (ant > ANT_ABC)
465 return "UNKNOWN";
466
467 return ant_name[ant];
468 }
469
470 static const char *rs_pretty_lq_type(enum iwl_table_type type)
471 {
472 static const char * const lq_types[] = {
473 [LQ_NONE] = "NONE",
474 [LQ_LEGACY_A] = "LEGACY_A",
475 [LQ_LEGACY_G] = "LEGACY_G",
476 [LQ_HT_SISO] = "HT SISO",
477 [LQ_HT_MIMO2] = "HT MIMO",
478 [LQ_VHT_SISO] = "VHT SISO",
479 [LQ_VHT_MIMO2] = "VHT MIMO",
480 };
481
482 if (type < LQ_NONE || type >= LQ_MAX)
483 return "UNKNOWN";
484
485 return lq_types[type];
486 }
487
488 static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
489 const char *prefix)
490 {
491 IWL_DEBUG_RATE(mvm, "%s: (%s: %d) ANT: %s BW: %d SGI: %d\n",
492 prefix, rs_pretty_lq_type(rate->type),
493 rate->index, rs_pretty_ant(rate->ant),
494 rate->bw, rate->sgi);
495 }
496
497 static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
498 {
499 window->data = 0;
500 window->success_counter = 0;
501 window->success_ratio = IWL_INVALID_VALUE;
502 window->counter = 0;
503 window->average_tpt = IWL_INVALID_VALUE;
504 }
505
506 static void rs_rate_scale_clear_tbl_windows(struct iwl_scale_tbl_info *tbl)
507 {
508 int i;
509
510 for (i = 0; i < IWL_RATE_COUNT; i++)
511 rs_rate_scale_clear_window(&tbl->win[i]);
512
513 for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
514 rs_rate_scale_clear_window(&tbl->tpc_win[i]);
515 }
516
517 static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
518 {
519 return (ant_type & valid_antenna) == ant_type;
520 }
521
522 static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
523 struct iwl_lq_sta *lq_data, u8 tid,
524 struct ieee80211_sta *sta)
525 {
526 int ret = -EAGAIN;
527
528 IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
529 sta->addr, tid);
530 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
531 if (ret == -EAGAIN) {
532 /*
533 * driver and mac80211 is out of sync
534 * this might be cause by reloading firmware
535 * stop the tx ba session here
536 */
537 IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
538 tid);
539 ieee80211_stop_tx_ba_session(sta, tid);
540 }
541 return ret;
542 }
543
544 static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid,
545 struct iwl_lq_sta *lq_data,
546 struct ieee80211_sta *sta)
547 {
548 if (tid < IWL_MAX_TID_COUNT)
549 rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta);
550 else
551 IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n",
552 tid, IWL_MAX_TID_COUNT);
553 }
554
555 static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
556 {
557 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
558 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
559 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
560 }
561
562 /*
563 * Static function to get the expected throughput from an iwl_scale_tbl_info
564 * that wraps a NULL pointer check
565 */
566 static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
567 {
568 if (tbl->expected_tpt)
569 return tbl->expected_tpt[rs_index];
570 return 0;
571 }
572
573 /**
574 * rs_collect_tx_data - Update the success/failure sliding window
575 *
576 * We keep a sliding window of the last 62 packets transmitted
577 * at this rate. window->data contains the bitmask of successful
578 * packets.
579 */
580 static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
581 int scale_index, int attempts, int successes,
582 struct iwl_rate_scale_data *window)
583 {
584 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
585 s32 fail_count, tpt;
586
587 /* Get expected throughput */
588 tpt = get_expected_tpt(tbl, scale_index);
589
590 /*
591 * Keep track of only the latest 62 tx frame attempts in this rate's
592 * history window; anything older isn't really relevant any more.
593 * If we have filled up the sliding window, drop the oldest attempt;
594 * if the oldest attempt (highest bit in bitmap) shows "success",
595 * subtract "1" from the success counter (this is the main reason
596 * we keep these bitmaps!).
597 */
598 while (attempts > 0) {
599 if (window->counter >= IWL_RATE_MAX_WINDOW) {
600 /* remove earliest */
601 window->counter = IWL_RATE_MAX_WINDOW - 1;
602
603 if (window->data & mask) {
604 window->data &= ~mask;
605 window->success_counter--;
606 }
607 }
608
609 /* Increment frames-attempted counter */
610 window->counter++;
611
612 /* Shift bitmap by one frame to throw away oldest history */
613 window->data <<= 1;
614
615 /* Mark the most recent #successes attempts as successful */
616 if (successes > 0) {
617 window->success_counter++;
618 window->data |= 0x1;
619 successes--;
620 }
621
622 attempts--;
623 }
624
625 /* Calculate current success ratio, avoid divide-by-0! */
626 if (window->counter > 0)
627 window->success_ratio = 128 * (100 * window->success_counter)
628 / window->counter;
629 else
630 window->success_ratio = IWL_INVALID_VALUE;
631
632 fail_count = window->counter - window->success_counter;
633
634 /* Calculate average throughput, if we have enough history. */
635 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
636 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
637 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
638 else
639 window->average_tpt = IWL_INVALID_VALUE;
640
641 return 0;
642 }
643
644 static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
645 struct iwl_scale_tbl_info *tbl,
646 int scale_index, int attempts, int successes,
647 u8 reduced_txp)
648 {
649 struct iwl_rate_scale_data *window = NULL;
650 int ret;
651
652 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
653 return -EINVAL;
654
655 if (tbl->column != RS_COLUMN_INVALID) {
656 lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
657 lq_sta->tx_stats[tbl->column][scale_index].success += successes;
658 }
659
660 /* Select window for current tx bit rate */
661 window = &(tbl->win[scale_index]);
662
663 ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
664 window);
665 if (ret)
666 return ret;
667
668 if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
669 return -EINVAL;
670
671 window = &tbl->tpc_win[reduced_txp];
672 return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
673 window);
674 }
675
676 /* Convert rs_rate object into ucode rate bitmask */
677 static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
678 struct rs_rate *rate)
679 {
680 u32 ucode_rate = 0;
681 int index = rate->index;
682
683 ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) &
684 RATE_MCS_ANT_ABC_MSK);
685
686 if (is_legacy(rate)) {
687 ucode_rate |= iwl_rates[index].plcp;
688 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
689 ucode_rate |= RATE_MCS_CCK_MSK;
690 return ucode_rate;
691 }
692
693 if (is_ht(rate)) {
694 if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
695 IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
696 index = IWL_LAST_HT_RATE;
697 }
698 ucode_rate |= RATE_MCS_HT_MSK;
699
700 if (is_ht_siso(rate))
701 ucode_rate |= iwl_rates[index].plcp_ht_siso;
702 else if (is_ht_mimo2(rate))
703 ucode_rate |= iwl_rates[index].plcp_ht_mimo2;
704 else
705 WARN_ON_ONCE(1);
706 } else if (is_vht(rate)) {
707 if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
708 IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
709 index = IWL_LAST_VHT_RATE;
710 }
711 ucode_rate |= RATE_MCS_VHT_MSK;
712 if (is_vht_siso(rate))
713 ucode_rate |= iwl_rates[index].plcp_vht_siso;
714 else if (is_vht_mimo2(rate))
715 ucode_rate |= iwl_rates[index].plcp_vht_mimo2;
716 else
717 WARN_ON_ONCE(1);
718
719 } else {
720 IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
721 }
722
723 ucode_rate |= rate->bw;
724 if (rate->sgi)
725 ucode_rate |= RATE_MCS_SGI_MSK;
726
727 return ucode_rate;
728 }
729
730 /* Convert a ucode rate into an rs_rate object */
731 static int rs_rate_from_ucode_rate(const u32 ucode_rate,
732 enum ieee80211_band band,
733 struct rs_rate *rate)
734 {
735 u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
736 u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate);
737 u8 nss;
738
739 memset(rate, 0, sizeof(*rate));
740 rate->index = iwl_hwrate_to_plcp_idx(ucode_rate);
741
742 if (rate->index == IWL_RATE_INVALID)
743 return -EINVAL;
744
745 rate->ant = (ant_msk >> RATE_MCS_ANT_POS);
746
747 /* Legacy */
748 if (!(ucode_rate & RATE_MCS_HT_MSK) &&
749 !(ucode_rate & RATE_MCS_VHT_MSK)) {
750 if (num_of_ant == 1) {
751 if (band == IEEE80211_BAND_5GHZ)
752 rate->type = LQ_LEGACY_A;
753 else
754 rate->type = LQ_LEGACY_G;
755 }
756
757 return 0;
758 }
759
760 /* HT or VHT */
761 if (ucode_rate & RATE_MCS_SGI_MSK)
762 rate->sgi = true;
763
764 rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
765
766 if (ucode_rate & RATE_MCS_HT_MSK) {
767 nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >>
768 RATE_HT_MCS_NSS_POS) + 1;
769
770 if (nss == 1) {
771 rate->type = LQ_HT_SISO;
772 WARN_ON_ONCE(num_of_ant != 1);
773 } else if (nss == 2) {
774 rate->type = LQ_HT_MIMO2;
775 WARN_ON_ONCE(num_of_ant != 2);
776 } else {
777 WARN_ON_ONCE(1);
778 }
779 } else if (ucode_rate & RATE_MCS_VHT_MSK) {
780 nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
781 RATE_VHT_MCS_NSS_POS) + 1;
782
783 if (nss == 1) {
784 rate->type = LQ_VHT_SISO;
785 WARN_ON_ONCE(num_of_ant != 1);
786 } else if (nss == 2) {
787 rate->type = LQ_VHT_MIMO2;
788 WARN_ON_ONCE(num_of_ant != 2);
789 } else {
790 WARN_ON_ONCE(1);
791 }
792 }
793
794 WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160);
795 WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
796 !is_vht(rate));
797
798 return 0;
799 }
800
801 /* switch to another antenna/antennas and return 1 */
802 /* if no other valid antenna found, return 0 */
803 static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
804 {
805 u8 new_ant_type;
806
807 if (!rate->ant || rate->ant > ANT_ABC)
808 return 0;
809
810 if (!rs_is_valid_ant(valid_ant, rate->ant))
811 return 0;
812
813 new_ant_type = ant_toggle_lookup[rate->ant];
814
815 while ((new_ant_type != rate->ant) &&
816 !rs_is_valid_ant(valid_ant, new_ant_type))
817 new_ant_type = ant_toggle_lookup[new_ant_type];
818
819 if (new_ant_type == rate->ant)
820 return 0;
821
822 rate->ant = new_ant_type;
823
824 return 1;
825 }
826
827 static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
828 struct rs_rate *rate)
829 {
830 if (is_legacy(rate))
831 return lq_sta->active_legacy_rate;
832 else if (is_siso(rate))
833 return lq_sta->active_siso_rate;
834 else if (is_mimo2(rate))
835 return lq_sta->active_mimo2_rate;
836
837 WARN_ON_ONCE(1);
838 return 0;
839 }
840
841 static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
842 int rate_type)
843 {
844 u8 high = IWL_RATE_INVALID;
845 u8 low = IWL_RATE_INVALID;
846
847 /* 802.11A or ht walks to the next literal adjacent rate in
848 * the rate table */
849 if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) {
850 int i;
851 u32 mask;
852
853 /* Find the previous rate that is in the rate mask */
854 i = index - 1;
855 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
856 if (rate_mask & mask) {
857 low = i;
858 break;
859 }
860 }
861
862 /* Find the next rate that is in the rate mask */
863 i = index + 1;
864 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
865 if (rate_mask & mask) {
866 high = i;
867 break;
868 }
869 }
870
871 return (high << 8) | low;
872 }
873
874 low = index;
875 while (low != IWL_RATE_INVALID) {
876 low = iwl_rates[low].prev_rs;
877 if (low == IWL_RATE_INVALID)
878 break;
879 if (rate_mask & (1 << low))
880 break;
881 IWL_DEBUG_RATE(mvm, "Skipping masked lower rate: %d\n", low);
882 }
883
884 high = index;
885 while (high != IWL_RATE_INVALID) {
886 high = iwl_rates[high].next_rs;
887 if (high == IWL_RATE_INVALID)
888 break;
889 if (rate_mask & (1 << high))
890 break;
891 IWL_DEBUG_RATE(mvm, "Skipping masked higher rate: %d\n", high);
892 }
893
894 return (high << 8) | low;
895 }
896
897 static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta,
898 struct rs_rate *rate)
899 {
900 return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate);
901 }
902
903 /* Get the next supported lower rate in the current column.
904 * Return true if bottom rate in the current column was reached
905 */
906 static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
907 struct rs_rate *rate)
908 {
909 u8 low;
910 u16 high_low;
911 u16 rate_mask;
912 struct iwl_mvm *mvm = lq_sta->drv;
913
914 rate_mask = rs_get_supported_rates(lq_sta, rate);
915 high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask,
916 rate->type);
917 low = high_low & 0xff;
918
919 /* Bottom rate of column reached */
920 if (low == IWL_RATE_INVALID)
921 return true;
922
923 rate->index = low;
924 return false;
925 }
926
927 /* Get the next rate to use following a column downgrade */
928 static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
929 struct rs_rate *rate)
930 {
931 struct iwl_mvm *mvm = lq_sta->drv;
932
933 if (is_legacy(rate)) {
934 /* No column to downgrade from Legacy */
935 return;
936 } else if (is_siso(rate)) {
937 /* Downgrade to Legacy if we were in SISO */
938 if (lq_sta->band == IEEE80211_BAND_5GHZ)
939 rate->type = LQ_LEGACY_A;
940 else
941 rate->type = LQ_LEGACY_G;
942
943 rate->bw = RATE_MCS_CHAN_WIDTH_20;
944
945 WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
946 rate->index > IWL_RATE_MCS_9_INDEX);
947
948 rate->index = rs_ht_to_legacy[rate->index];
949 } else {
950 /* Downgrade to SISO with same MCS if in MIMO */
951 rate->type = is_vht_mimo2(rate) ?
952 LQ_VHT_SISO : LQ_HT_SISO;
953 }
954
955
956 if (num_of_ant(rate->ant) > 1)
957 rate->ant = first_antenna(mvm->fw->valid_tx_ant);
958
959 /* Relevant in both switching to SISO or Legacy */
960 rate->sgi = false;
961
962 if (!rs_rate_supported(lq_sta, rate))
963 rs_get_lower_rate_in_column(lq_sta, rate);
964 }
965
966 /* Simple function to compare two rate scale table types */
967 static inline bool rs_rate_match(struct rs_rate *a,
968 struct rs_rate *b)
969 {
970 return (a->type == b->type) && (a->ant == b->ant) && (a->sgi == b->sgi);
971 }
972
973 static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
974 {
975 if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
976 return RATE_MCS_CHAN_WIDTH_40;
977 else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
978 return RATE_MCS_CHAN_WIDTH_80;
979 else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
980 return RATE_MCS_CHAN_WIDTH_160;
981
982 return RATE_MCS_CHAN_WIDTH_20;
983 }
984
985 /*
986 * mac80211 sends us Tx status
987 */
988 static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
989 struct ieee80211_sta *sta, void *priv_sta,
990 struct sk_buff *skb)
991 {
992 int legacy_success;
993 int retries;
994 int mac_index, i;
995 struct iwl_lq_sta *lq_sta = priv_sta;
996 struct iwl_lq_cmd *table;
997 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
998 struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r;
999 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1000 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1001 enum mac80211_rate_control_flags mac_flags;
1002 u32 ucode_rate;
1003 struct rs_rate rate;
1004 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
1005 u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
1006
1007 /* Treat uninitialized rate scaling data same as non-existing. */
1008 if (!lq_sta) {
1009 IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
1010 return;
1011 } else if (!lq_sta->drv) {
1012 IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
1013 return;
1014 }
1015
1016 if (!ieee80211_is_data(hdr->frame_control) ||
1017 info->flags & IEEE80211_TX_CTL_NO_ACK)
1018 return;
1019
1020 /* This packet was aggregated but doesn't carry status info */
1021 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
1022 !(info->flags & IEEE80211_TX_STAT_AMPDU))
1023 return;
1024
1025 /*
1026 * Ignore this Tx frame response if its initial rate doesn't match
1027 * that of latest Link Quality command. There may be stragglers
1028 * from a previous Link Quality command, but we're no longer interested
1029 * in those; they're either from the "active" mode while we're trying
1030 * to check "search" mode, or a prior "search" mode after we've moved
1031 * to a new "search" mode (which might become the new "active" mode).
1032 */
1033 table = &lq_sta->lq;
1034 ucode_rate = le32_to_cpu(table->rs_table[0]);
1035 rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
1036 if (info->band == IEEE80211_BAND_5GHZ)
1037 rate.index -= IWL_FIRST_OFDM_RATE;
1038 mac_flags = info->status.rates[0].flags;
1039 mac_index = info->status.rates[0].idx;
1040 /* For HT packets, map MCS to PLCP */
1041 if (mac_flags & IEEE80211_TX_RC_MCS) {
1042 /* Remove # of streams */
1043 mac_index &= RATE_HT_MCS_RATE_CODE_MSK;
1044 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
1045 mac_index++;
1046 /*
1047 * mac80211 HT index is always zero-indexed; we need to move
1048 * HT OFDM rates after CCK rates in 2.4 GHz band
1049 */
1050 if (info->band == IEEE80211_BAND_2GHZ)
1051 mac_index += IWL_FIRST_OFDM_RATE;
1052 } else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) {
1053 mac_index &= RATE_VHT_MCS_RATE_CODE_MSK;
1054 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
1055 mac_index++;
1056 }
1057
1058 /* Here we actually compare this rate to the latest LQ command */
1059 if ((mac_index < 0) ||
1060 (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
1061 (rate.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
1062 (rate.ant != info->status.antenna) ||
1063 (!!(ucode_rate & RATE_MCS_HT_MSK) !=
1064 !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
1065 (!!(ucode_rate & RATE_MCS_VHT_MSK) !=
1066 !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
1067 (!!(ucode_rate & RATE_HT_MCS_GF_MSK) !=
1068 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
1069 (rate.index != mac_index)) {
1070 IWL_DEBUG_RATE(mvm,
1071 "initial rate %d does not match %d (0x%x)\n",
1072 mac_index, rate.index, ucode_rate);
1073 /*
1074 * Since rates mis-match, the last LQ command may have failed.
1075 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
1076 * ... driver.
1077 */
1078 lq_sta->missed_rate_counter++;
1079 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
1080 lq_sta->missed_rate_counter = 0;
1081 IWL_DEBUG_RATE(mvm,
1082 "Too many rates mismatch. Send sync LQ. rs_state %d\n",
1083 lq_sta->rs_state);
1084 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
1085 }
1086 /* Regardless, ignore this status info for outdated rate */
1087 return;
1088 } else
1089 /* Rate did match, so reset the missed_rate_counter */
1090 lq_sta->missed_rate_counter = 0;
1091
1092 /* Figure out if rate scale algorithm is in active or search table */
1093 if (rs_rate_match(&rate,
1094 &(lq_sta->lq_info[lq_sta->active_tbl].rate))) {
1095 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1096 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1097 } else if (rs_rate_match(&rate,
1098 &lq_sta->lq_info[1 - lq_sta->active_tbl].rate)) {
1099 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1100 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1101 } else {
1102 IWL_DEBUG_RATE(mvm,
1103 "Neither active nor search matches tx rate\n");
1104 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1105 rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
1106 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1107 rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
1108 rs_dump_rate(mvm, &rate, "ACTUAL");
1109
1110 /*
1111 * no matching table found, let's by-pass the data collection
1112 * and continue to perform rate scale to find the rate table
1113 */
1114 rs_stay_in_table(lq_sta, true);
1115 goto done;
1116 }
1117
1118 /*
1119 * Updating the frame history depends on whether packets were
1120 * aggregated.
1121 *
1122 * For aggregation, all packets were transmitted at the same rate, the
1123 * first index into rate scale table.
1124 */
1125 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1126 ucode_rate = le32_to_cpu(table->rs_table[0]);
1127 rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
1128 rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
1129 info->status.ampdu_len,
1130 info->status.ampdu_ack_len,
1131 reduced_txp);
1132
1133 /* Update success/fail counts if not searching for new mode */
1134 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
1135 lq_sta->total_success += info->status.ampdu_ack_len;
1136 lq_sta->total_failed += (info->status.ampdu_len -
1137 info->status.ampdu_ack_len);
1138 }
1139 } else {
1140 /*
1141 * For legacy, update frame history with for each Tx retry.
1142 */
1143 retries = info->status.rates[0].count - 1;
1144 /* HW doesn't send more than 15 retries */
1145 retries = min(retries, 15);
1146
1147 /* The last transmission may have been successful */
1148 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
1149 /* Collect data for each rate used during failed TX attempts */
1150 for (i = 0; i <= retries; ++i) {
1151 ucode_rate = le32_to_cpu(table->rs_table[i]);
1152 rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
1153 /*
1154 * Only collect stats if retried rate is in the same RS
1155 * table as active/search.
1156 */
1157 if (rs_rate_match(&rate, &curr_tbl->rate))
1158 tmp_tbl = curr_tbl;
1159 else if (rs_rate_match(&rate, &other_tbl->rate))
1160 tmp_tbl = other_tbl;
1161 else
1162 continue;
1163
1164 rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
1165 i < retries ? 0 : legacy_success,
1166 reduced_txp);
1167 }
1168
1169 /* Update success/fail counts if not searching for new mode */
1170 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
1171 lq_sta->total_success += legacy_success;
1172 lq_sta->total_failed += retries + (1 - legacy_success);
1173 }
1174 }
1175 /* The last TX rate is cached in lq_sta; it's set in if/else above */
1176 lq_sta->last_rate_n_flags = ucode_rate;
1177 IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
1178 done:
1179 /* See if there's a better rate or modulation mode to try. */
1180 if (sta && sta->supp_rates[sband->band])
1181 rs_rate_scale_perform(mvm, skb, sta, lq_sta);
1182 }
1183
1184 /*
1185 * Begin a period of staying with a selected modulation mode.
1186 * Set "stay_in_tbl" flag to prevent any mode switches.
1187 * Set frame tx success limits according to legacy vs. high-throughput,
1188 * and reset overall (spanning all rates) tx success history statistics.
1189 * These control how long we stay using same modulation mode before
1190 * searching for a new mode.
1191 */
1192 static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
1193 struct iwl_lq_sta *lq_sta)
1194 {
1195 IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
1196 lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
1197 if (is_legacy) {
1198 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1199 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1200 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1201 } else {
1202 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1203 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1204 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1205 }
1206 lq_sta->table_count = 0;
1207 lq_sta->total_failed = 0;
1208 lq_sta->total_success = 0;
1209 lq_sta->flush_timer = jiffies;
1210 lq_sta->visited_columns = 0;
1211 }
1212
1213 static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1214 const struct rs_tx_column *column,
1215 u32 bw)
1216 {
1217 /* Used to choose among HT tables */
1218 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1219
1220 if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
1221 column->mode != RS_SISO &&
1222 column->mode != RS_MIMO2))
1223 return expected_tpt_legacy;
1224
1225 /* Legacy rates have only one table */
1226 if (column->mode == RS_LEGACY)
1227 return expected_tpt_legacy;
1228
1229 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1230 /* Choose among many HT tables depending on number of streams
1231 * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
1232 * status */
1233 if (column->mode == RS_SISO) {
1234 switch (bw) {
1235 case RATE_MCS_CHAN_WIDTH_20:
1236 ht_tbl_pointer = expected_tpt_siso_20MHz;
1237 break;
1238 case RATE_MCS_CHAN_WIDTH_40:
1239 ht_tbl_pointer = expected_tpt_siso_40MHz;
1240 break;
1241 case RATE_MCS_CHAN_WIDTH_80:
1242 ht_tbl_pointer = expected_tpt_siso_80MHz;
1243 break;
1244 default:
1245 WARN_ON_ONCE(1);
1246 }
1247 } else if (column->mode == RS_MIMO2) {
1248 switch (bw) {
1249 case RATE_MCS_CHAN_WIDTH_20:
1250 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1251 break;
1252 case RATE_MCS_CHAN_WIDTH_40:
1253 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1254 break;
1255 case RATE_MCS_CHAN_WIDTH_80:
1256 ht_tbl_pointer = expected_tpt_mimo2_80MHz;
1257 break;
1258 default:
1259 WARN_ON_ONCE(1);
1260 }
1261 } else {
1262 WARN_ON_ONCE(1);
1263 }
1264
1265 if (!column->sgi && !lq_sta->is_agg) /* Normal */
1266 return ht_tbl_pointer[0];
1267 else if (column->sgi && !lq_sta->is_agg) /* SGI */
1268 return ht_tbl_pointer[1];
1269 else if (!column->sgi && lq_sta->is_agg) /* AGG */
1270 return ht_tbl_pointer[2];
1271 else /* AGG+SGI */
1272 return ht_tbl_pointer[3];
1273 }
1274
1275 static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1276 struct iwl_scale_tbl_info *tbl)
1277 {
1278 struct rs_rate *rate = &tbl->rate;
1279 const struct rs_tx_column *column = &rs_tx_columns[tbl->column];
1280
1281 tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
1282 }
1283
1284 /*
1285 * Find starting rate for new "search" high-throughput mode of modulation.
1286 * Goal is to find lowest expected rate (under perfect conditions) that is
1287 * above the current measured throughput of "active" mode, to give new mode
1288 * a fair chance to prove itself without too many challenges.
1289 *
1290 * This gets called when transitioning to more aggressive modulation
1291 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1292 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1293 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1294 * bit rate will typically need to increase, but not if performance was bad.
1295 */
1296 static s32 rs_get_best_rate(struct iwl_mvm *mvm,
1297 struct iwl_lq_sta *lq_sta,
1298 struct iwl_scale_tbl_info *tbl, /* "search" */
1299 u16 rate_mask, s8 index)
1300 {
1301 /* "active" values */
1302 struct iwl_scale_tbl_info *active_tbl =
1303 &(lq_sta->lq_info[lq_sta->active_tbl]);
1304 s32 active_sr = active_tbl->win[index].success_ratio;
1305 s32 active_tpt = active_tbl->expected_tpt[index];
1306 /* expected "search" throughput */
1307 const u16 *tpt_tbl = tbl->expected_tpt;
1308
1309 s32 new_rate, high, low, start_hi;
1310 u16 high_low;
1311 s8 rate = index;
1312
1313 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1314
1315 while (1) {
1316 high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
1317 tbl->rate.type);
1318
1319 low = high_low & 0xff;
1320 high = (high_low >> 8) & 0xff;
1321
1322 /*
1323 * Lower the "search" bit rate, to give new "search" mode
1324 * approximately the same throughput as "active" if:
1325 *
1326 * 1) "Active" mode has been working modestly well (but not
1327 * great), and expected "search" throughput (under perfect
1328 * conditions) at candidate rate is above the actual
1329 * measured "active" throughput (but less than expected
1330 * "active" throughput under perfect conditions).
1331 * OR
1332 * 2) "Active" mode has been working perfectly or very well
1333 * and expected "search" throughput (under perfect
1334 * conditions) at candidate rate is above expected
1335 * "active" throughput (under perfect conditions).
1336 */
1337 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1338 ((active_sr > RS_SR_FORCE_DECREASE) &&
1339 (active_sr <= IWL_RATE_HIGH_TH) &&
1340 (tpt_tbl[rate] <= active_tpt))) ||
1341 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1342 (tpt_tbl[rate] > active_tpt))) {
1343 /* (2nd or later pass)
1344 * If we've already tried to raise the rate, and are
1345 * now trying to lower it, use the higher rate. */
1346 if (start_hi != IWL_RATE_INVALID) {
1347 new_rate = start_hi;
1348 break;
1349 }
1350
1351 new_rate = rate;
1352
1353 /* Loop again with lower rate */
1354 if (low != IWL_RATE_INVALID)
1355 rate = low;
1356
1357 /* Lower rate not available, use the original */
1358 else
1359 break;
1360
1361 /* Else try to raise the "search" rate to match "active" */
1362 } else {
1363 /* (2nd or later pass)
1364 * If we've already tried to lower the rate, and are
1365 * now trying to raise it, use the lower rate. */
1366 if (new_rate != IWL_RATE_INVALID)
1367 break;
1368
1369 /* Loop again with higher rate */
1370 else if (high != IWL_RATE_INVALID) {
1371 start_hi = high;
1372 rate = high;
1373
1374 /* Higher rate not available, use the original */
1375 } else {
1376 new_rate = rate;
1377 break;
1378 }
1379 }
1380 }
1381
1382 return new_rate;
1383 }
1384
1385 static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
1386 {
1387 if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
1388 return RATE_MCS_CHAN_WIDTH_80;
1389 else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
1390 return RATE_MCS_CHAN_WIDTH_40;
1391
1392 return RATE_MCS_CHAN_WIDTH_20;
1393 }
1394
1395 /*
1396 * Check whether we should continue using same modulation mode, or
1397 * begin search for a new mode, based on:
1398 * 1) # tx successes or failures while using this mode
1399 * 2) # times calling this function
1400 * 3) elapsed time in this mode (not used, for now)
1401 */
1402 static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1403 {
1404 struct iwl_scale_tbl_info *tbl;
1405 int active_tbl;
1406 int flush_interval_passed = 0;
1407 struct iwl_mvm *mvm;
1408
1409 mvm = lq_sta->drv;
1410 active_tbl = lq_sta->active_tbl;
1411
1412 tbl = &(lq_sta->lq_info[active_tbl]);
1413
1414 /* If we've been disallowing search, see if we should now allow it */
1415 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
1416 /* Elapsed time using current modulation mode */
1417 if (lq_sta->flush_timer)
1418 flush_interval_passed =
1419 time_after(jiffies,
1420 (unsigned long)(lq_sta->flush_timer +
1421 RS_STAY_IN_COLUMN_TIMEOUT));
1422
1423 /*
1424 * Check if we should allow search for new modulation mode.
1425 * If many frames have failed or succeeded, or we've used
1426 * this same modulation for a long time, allow search, and
1427 * reset history stats that keep track of whether we should
1428 * allow a new search. Also (below) reset all bitmaps and
1429 * stats in active history.
1430 */
1431 if (force_search ||
1432 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1433 (lq_sta->total_success > lq_sta->max_success_limit) ||
1434 ((!lq_sta->search_better_tbl) &&
1435 (lq_sta->flush_timer) && (flush_interval_passed))) {
1436 IWL_DEBUG_RATE(mvm,
1437 "LQ: stay is expired %d %d %d\n",
1438 lq_sta->total_failed,
1439 lq_sta->total_success,
1440 flush_interval_passed);
1441
1442 /* Allow search for new mode */
1443 lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED;
1444 IWL_DEBUG_RATE(mvm,
1445 "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n");
1446 lq_sta->total_failed = 0;
1447 lq_sta->total_success = 0;
1448 lq_sta->flush_timer = 0;
1449 /* mark the current column as visited */
1450 lq_sta->visited_columns = BIT(tbl->column);
1451 /*
1452 * Else if we've used this modulation mode enough repetitions
1453 * (regardless of elapsed time or success/failure), reset
1454 * history bitmaps and rate-specific stats for all rates in
1455 * active table.
1456 */
1457 } else {
1458 lq_sta->table_count++;
1459 if (lq_sta->table_count >=
1460 lq_sta->table_count_limit) {
1461 lq_sta->table_count = 0;
1462
1463 IWL_DEBUG_RATE(mvm,
1464 "LQ: stay in table clear win\n");
1465 rs_rate_scale_clear_tbl_windows(tbl);
1466 }
1467 }
1468
1469 /* If transitioning to allow "search", reset all history
1470 * bitmaps and stats in active table (this will become the new
1471 * "search" table). */
1472 if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
1473 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
1474 rs_rate_scale_clear_tbl_windows(tbl);
1475 }
1476 }
1477 }
1478
1479 /*
1480 * setup rate table in uCode
1481 */
1482 static void rs_update_rate_tbl(struct iwl_mvm *mvm,
1483 struct ieee80211_sta *sta,
1484 struct iwl_lq_sta *lq_sta,
1485 struct rs_rate *rate)
1486 {
1487 rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
1488 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
1489 }
1490
1491 static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
1492 struct ieee80211_hdr *hdr)
1493 {
1494 u8 tid = IWL_MAX_TID_COUNT;
1495
1496 if (ieee80211_is_data_qos(hdr->frame_control)) {
1497 u8 *qc = ieee80211_get_qos_ctl(hdr);
1498 tid = qc[0] & 0xf;
1499 }
1500
1501 if (unlikely(tid > IWL_MAX_TID_COUNT))
1502 tid = IWL_MAX_TID_COUNT;
1503
1504 return tid;
1505 }
1506
1507 static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1508 struct iwl_lq_sta *lq_sta,
1509 struct ieee80211_sta *sta,
1510 struct iwl_scale_tbl_info *tbl)
1511 {
1512 int i, j, n;
1513 enum rs_column next_col_id;
1514 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
1515 const struct rs_tx_column *next_col;
1516 allow_column_func_t allow_func;
1517 u8 valid_ants = mvm->fw->valid_tx_ant;
1518 const u16 *expected_tpt_tbl;
1519 s32 tpt, max_expected_tpt;
1520
1521 for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
1522 next_col_id = curr_col->next_columns[i];
1523
1524 if (next_col_id == RS_COLUMN_INVALID)
1525 continue;
1526
1527 if (lq_sta->visited_columns & BIT(next_col_id)) {
1528 IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n",
1529 next_col_id);
1530 continue;
1531 }
1532
1533 next_col = &rs_tx_columns[next_col_id];
1534
1535 if (!rs_is_valid_ant(valid_ants, next_col->ant)) {
1536 IWL_DEBUG_RATE(mvm,
1537 "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n",
1538 next_col_id, valid_ants, next_col->ant);
1539 continue;
1540 }
1541
1542 for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
1543 allow_func = next_col->checks[j];
1544 if (allow_func && !allow_func(mvm, sta, tbl))
1545 break;
1546 }
1547
1548 if (j != MAX_COLUMN_CHECKS) {
1549 IWL_DEBUG_RATE(mvm,
1550 "Skip column %d: not allowed (check %d failed)\n",
1551 next_col_id, j);
1552
1553 continue;
1554 }
1555
1556 tpt = lq_sta->last_tpt / 100;
1557 expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
1558 tbl->rate.bw);
1559 if (WARN_ON_ONCE(!expected_tpt_tbl))
1560 continue;
1561
1562 max_expected_tpt = 0;
1563 for (n = 0; n < IWL_RATE_COUNT; n++)
1564 if (expected_tpt_tbl[n] > max_expected_tpt)
1565 max_expected_tpt = expected_tpt_tbl[n];
1566
1567 if (tpt >= max_expected_tpt) {
1568 IWL_DEBUG_RATE(mvm,
1569 "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
1570 next_col_id, max_expected_tpt, tpt);
1571 continue;
1572 }
1573
1574 break;
1575 }
1576
1577 if (i == MAX_NEXT_COLUMNS)
1578 return RS_COLUMN_INVALID;
1579
1580 IWL_DEBUG_RATE(mvm, "Found potential column %d\n", next_col_id);
1581
1582 return next_col_id;
1583 }
1584
1585 static int rs_switch_to_column(struct iwl_mvm *mvm,
1586 struct iwl_lq_sta *lq_sta,
1587 struct ieee80211_sta *sta,
1588 enum rs_column col_id)
1589 {
1590 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1591 struct iwl_scale_tbl_info *search_tbl =
1592 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1593 struct rs_rate *rate = &search_tbl->rate;
1594 const struct rs_tx_column *column = &rs_tx_columns[col_id];
1595 const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
1596 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1597 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1598 u16 rate_mask = 0;
1599 u32 rate_idx = 0;
1600
1601 memcpy(search_tbl, tbl, sz);
1602
1603 rate->sgi = column->sgi;
1604 rate->ant = column->ant;
1605
1606 if (column->mode == RS_LEGACY) {
1607 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1608 rate->type = LQ_LEGACY_A;
1609 else
1610 rate->type = LQ_LEGACY_G;
1611
1612 rate_mask = lq_sta->active_legacy_rate;
1613 } else if (column->mode == RS_SISO) {
1614 rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
1615 rate_mask = lq_sta->active_siso_rate;
1616 } else if (column->mode == RS_MIMO2) {
1617 rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
1618 rate_mask = lq_sta->active_mimo2_rate;
1619 } else {
1620 WARN_ON_ONCE("Bad column mode");
1621 }
1622
1623 rate->bw = rs_bw_from_sta_bw(sta);
1624 search_tbl->column = col_id;
1625 rs_set_expected_tpt_table(lq_sta, search_tbl);
1626
1627 lq_sta->visited_columns |= BIT(col_id);
1628
1629 /* Get the best matching rate if we're changing modes. e.g.
1630 * SISO->MIMO, LEGACY->SISO, MIMO->SISO
1631 */
1632 if (curr_column->mode != column->mode) {
1633 rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl,
1634 rate_mask, rate->index);
1635
1636 if ((rate_idx == IWL_RATE_INVALID) ||
1637 !(BIT(rate_idx) & rate_mask)) {
1638 IWL_DEBUG_RATE(mvm,
1639 "can not switch with index %d"
1640 " rate mask %x\n",
1641 rate_idx, rate_mask);
1642
1643 goto err;
1644 }
1645
1646 rate->index = rate_idx;
1647 }
1648
1649 IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n",
1650 col_id, rate->index);
1651
1652 return 0;
1653
1654 err:
1655 rate->type = LQ_NONE;
1656 return -1;
1657 }
1658
1659 static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
1660 struct iwl_scale_tbl_info *tbl,
1661 s32 sr, int low, int high,
1662 int current_tpt,
1663 int low_tpt, int high_tpt)
1664 {
1665 enum rs_action action = RS_ACTION_STAY;
1666
1667 /* Too many failures, decrease rate */
1668 if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) {
1669 IWL_DEBUG_RATE(mvm,
1670 "decrease rate because of low SR\n");
1671 action = RS_ACTION_DOWNSCALE;
1672 /* No throughput measured yet for adjacent rates; try increase. */
1673 } else if ((low_tpt == IWL_INVALID_VALUE) &&
1674 (high_tpt == IWL_INVALID_VALUE)) {
1675 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) {
1676 IWL_DEBUG_RATE(mvm,
1677 "Good SR and no high rate measurement. "
1678 "Increase rate\n");
1679 action = RS_ACTION_UPSCALE;
1680 } else if (low != IWL_RATE_INVALID) {
1681 IWL_DEBUG_RATE(mvm,
1682 "Remain in current rate\n");
1683 action = RS_ACTION_STAY;
1684 }
1685 }
1686
1687 /* Both adjacent throughputs are measured, but neither one has better
1688 * throughput; we're using the best rate, don't change it!
1689 */
1690 else if ((low_tpt != IWL_INVALID_VALUE) &&
1691 (high_tpt != IWL_INVALID_VALUE) &&
1692 (low_tpt < current_tpt) &&
1693 (high_tpt < current_tpt)) {
1694 IWL_DEBUG_RATE(mvm,
1695 "Both high and low are worse. "
1696 "Maintain rate\n");
1697 action = RS_ACTION_STAY;
1698 }
1699
1700 /* At least one adjacent rate's throughput is measured,
1701 * and may have better performance.
1702 */
1703 else {
1704 /* Higher adjacent rate's throughput is measured */
1705 if (high_tpt != IWL_INVALID_VALUE) {
1706 /* Higher rate has better throughput */
1707 if (high_tpt > current_tpt &&
1708 sr >= IWL_RATE_INCREASE_TH) {
1709 IWL_DEBUG_RATE(mvm,
1710 "Higher rate is better and good "
1711 "SR. Increate rate\n");
1712 action = RS_ACTION_UPSCALE;
1713 } else {
1714 IWL_DEBUG_RATE(mvm,
1715 "Higher rate isn't better OR "
1716 "no good SR. Maintain rate\n");
1717 action = RS_ACTION_STAY;
1718 }
1719
1720 /* Lower adjacent rate's throughput is measured */
1721 } else if (low_tpt != IWL_INVALID_VALUE) {
1722 /* Lower rate has better throughput */
1723 if (low_tpt > current_tpt) {
1724 IWL_DEBUG_RATE(mvm,
1725 "Lower rate is better. "
1726 "Decrease rate\n");
1727 action = RS_ACTION_DOWNSCALE;
1728 } else if (sr >= IWL_RATE_INCREASE_TH) {
1729 IWL_DEBUG_RATE(mvm,
1730 "Lower rate isn't better and "
1731 "good SR. Increase rate\n");
1732 action = RS_ACTION_UPSCALE;
1733 }
1734 }
1735 }
1736
1737 /* Sanity check; asked for decrease, but success rate or throughput
1738 * has been good at old rate. Don't change it.
1739 */
1740 if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID) &&
1741 ((sr > IWL_RATE_HIGH_TH) ||
1742 (current_tpt > (100 * tbl->expected_tpt[low])))) {
1743 IWL_DEBUG_RATE(mvm,
1744 "Sanity check failed. Maintain rate\n");
1745 action = RS_ACTION_STAY;
1746 }
1747
1748 return action;
1749 }
1750
1751 static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
1752 int *weaker, int *stronger)
1753 {
1754 *weaker = index + TPC_TX_POWER_STEP;
1755 if (*weaker > TPC_MAX_REDUCTION)
1756 *weaker = TPC_INVALID;
1757
1758 *stronger = index - TPC_TX_POWER_STEP;
1759 if (*stronger < 0)
1760 *stronger = TPC_INVALID;
1761 }
1762
1763 static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct rs_rate *rate,
1764 enum ieee80211_band band)
1765 {
1766 int index = rate->index;
1767
1768 /*
1769 * allow tpc only if power management is enabled, or bt coex
1770 * activity grade allows it and we are on 2.4Ghz.
1771 */
1772 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM &&
1773 !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
1774 return false;
1775
1776 IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
1777 if (is_legacy(rate))
1778 return index == IWL_RATE_54M_INDEX;
1779 if (is_ht(rate))
1780 return index == IWL_RATE_MCS_7_INDEX;
1781 if (is_vht(rate))
1782 return index == IWL_RATE_MCS_7_INDEX ||
1783 index == IWL_RATE_MCS_8_INDEX ||
1784 index == IWL_RATE_MCS_9_INDEX;
1785
1786 WARN_ON_ONCE(1);
1787 return false;
1788 }
1789
1790 enum tpc_action {
1791 TPC_ACTION_STAY,
1792 TPC_ACTION_DECREASE,
1793 TPC_ACTION_INCREASE,
1794 TPC_ACTION_NO_RESTIRCTION,
1795 };
1796
1797 static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
1798 s32 sr, int weak, int strong,
1799 int current_tpt,
1800 int weak_tpt, int strong_tpt)
1801 {
1802 /* stay until we have valid tpt */
1803 if (current_tpt == IWL_INVALID_VALUE) {
1804 IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
1805 return TPC_ACTION_STAY;
1806 }
1807
1808 /* Too many failures, increase txp */
1809 if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
1810 IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
1811 return TPC_ACTION_NO_RESTIRCTION;
1812 }
1813
1814 /* try decreasing first if applicable */
1815 if (weak != TPC_INVALID) {
1816 if (weak_tpt == IWL_INVALID_VALUE &&
1817 (strong_tpt == IWL_INVALID_VALUE ||
1818 current_tpt >= strong_tpt)) {
1819 IWL_DEBUG_RATE(mvm,
1820 "no weak txp measurement. decrease txp\n");
1821 return TPC_ACTION_DECREASE;
1822 }
1823
1824 if (weak_tpt > current_tpt) {
1825 IWL_DEBUG_RATE(mvm,
1826 "lower txp has better tpt. decrease txp\n");
1827 return TPC_ACTION_DECREASE;
1828 }
1829 }
1830
1831 /* next, increase if needed */
1832 if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
1833 if (weak_tpt == IWL_INVALID_VALUE &&
1834 strong_tpt != IWL_INVALID_VALUE &&
1835 current_tpt < strong_tpt) {
1836 IWL_DEBUG_RATE(mvm,
1837 "higher txp has better tpt. increase txp\n");
1838 return TPC_ACTION_INCREASE;
1839 }
1840
1841 if (weak_tpt < current_tpt &&
1842 (strong_tpt == IWL_INVALID_VALUE ||
1843 strong_tpt > current_tpt)) {
1844 IWL_DEBUG_RATE(mvm,
1845 "lower txp has worse tpt. increase txp\n");
1846 return TPC_ACTION_INCREASE;
1847 }
1848 }
1849
1850 IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
1851 return TPC_ACTION_STAY;
1852 }
1853
1854 static bool rs_tpc_perform(struct iwl_mvm *mvm,
1855 struct ieee80211_sta *sta,
1856 struct iwl_lq_sta *lq_sta,
1857 struct iwl_scale_tbl_info *tbl)
1858 {
1859 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
1860 struct ieee80211_vif *vif = mvm_sta->vif;
1861 struct ieee80211_chanctx_conf *chanctx_conf;
1862 enum ieee80211_band band;
1863 struct iwl_rate_scale_data *window;
1864 struct rs_rate *rate = &tbl->rate;
1865 enum tpc_action action;
1866 s32 sr;
1867 u8 cur = lq_sta->lq.reduced_tpc;
1868 int current_tpt;
1869 int weak, strong;
1870 int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
1871
1872 #ifdef CONFIG_MAC80211_DEBUGFS
1873 if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
1874 IWL_DEBUG_RATE(mvm, "fixed tpc: %d",
1875 lq_sta->dbg_fixed_txp_reduction);
1876 lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
1877 return cur != lq_sta->dbg_fixed_txp_reduction;
1878 }
1879 #endif
1880
1881 rcu_read_lock();
1882 chanctx_conf = rcu_dereference(vif->chanctx_conf);
1883 if (WARN_ON(!chanctx_conf))
1884 band = IEEE80211_NUM_BANDS;
1885 else
1886 band = chanctx_conf->def.chan->band;
1887 rcu_read_unlock();
1888
1889 if (!rs_tpc_allowed(mvm, rate, band)) {
1890 IWL_DEBUG_RATE(mvm,
1891 "tpc is not allowed. remove txp restrictions");
1892 lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
1893 return cur != TPC_NO_REDUCTION;
1894 }
1895
1896 rs_get_adjacent_txp(mvm, cur, &weak, &strong);
1897
1898 /* Collect measured throughputs for current and adjacent rates */
1899 window = tbl->tpc_win;
1900 sr = window[cur].success_ratio;
1901 current_tpt = window[cur].average_tpt;
1902 if (weak != TPC_INVALID)
1903 weak_tpt = window[weak].average_tpt;
1904 if (strong != TPC_INVALID)
1905 strong_tpt = window[strong].average_tpt;
1906
1907 IWL_DEBUG_RATE(mvm,
1908 "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
1909 cur, current_tpt, sr, weak, strong,
1910 weak_tpt, strong_tpt);
1911
1912 action = rs_get_tpc_action(mvm, sr, weak, strong,
1913 current_tpt, weak_tpt, strong_tpt);
1914
1915 /* override actions if we are on the edge */
1916 if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
1917 IWL_DEBUG_RATE(mvm, "already in lowest txp, stay");
1918 action = TPC_ACTION_STAY;
1919 } else if (strong == TPC_INVALID &&
1920 (action == TPC_ACTION_INCREASE ||
1921 action == TPC_ACTION_NO_RESTIRCTION)) {
1922 IWL_DEBUG_RATE(mvm, "already in highest txp, stay");
1923 action = TPC_ACTION_STAY;
1924 }
1925
1926 switch (action) {
1927 case TPC_ACTION_DECREASE:
1928 lq_sta->lq.reduced_tpc = weak;
1929 return true;
1930 case TPC_ACTION_INCREASE:
1931 lq_sta->lq.reduced_tpc = strong;
1932 return true;
1933 case TPC_ACTION_NO_RESTIRCTION:
1934 lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
1935 return true;
1936 case TPC_ACTION_STAY:
1937 /* do nothing */
1938 break;
1939 }
1940 return false;
1941 }
1942
1943 /*
1944 * Do rate scaling and search for new modulation mode.
1945 */
1946 static void rs_rate_scale_perform(struct iwl_mvm *mvm,
1947 struct sk_buff *skb,
1948 struct ieee80211_sta *sta,
1949 struct iwl_lq_sta *lq_sta)
1950 {
1951 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1952 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1953 int low = IWL_RATE_INVALID;
1954 int high = IWL_RATE_INVALID;
1955 int index;
1956 struct iwl_rate_scale_data *window = NULL;
1957 int current_tpt = IWL_INVALID_VALUE;
1958 int low_tpt = IWL_INVALID_VALUE;
1959 int high_tpt = IWL_INVALID_VALUE;
1960 u32 fail_count;
1961 enum rs_action scale_action = RS_ACTION_STAY;
1962 u16 rate_mask;
1963 u8 update_lq = 0;
1964 struct iwl_scale_tbl_info *tbl, *tbl1;
1965 u8 active_tbl = 0;
1966 u8 done_search = 0;
1967 u16 high_low;
1968 s32 sr;
1969 u8 tid = IWL_MAX_TID_COUNT;
1970 u8 prev_agg = lq_sta->is_agg;
1971 struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
1972 struct iwl_mvm_tid_data *tid_data;
1973 struct rs_rate *rate;
1974
1975 /* Send management frames and NO_ACK data using lowest rate. */
1976 /* TODO: this could probably be improved.. */
1977 if (!ieee80211_is_data(hdr->frame_control) ||
1978 info->flags & IEEE80211_TX_CTL_NO_ACK)
1979 return;
1980
1981 tid = rs_get_tid(lq_sta, hdr);
1982 if ((tid != IWL_MAX_TID_COUNT) &&
1983 (lq_sta->tx_agg_tid_en & (1 << tid))) {
1984 tid_data = &sta_priv->tid_data[tid];
1985 if (tid_data->state == IWL_AGG_OFF)
1986 lq_sta->is_agg = 0;
1987 else
1988 lq_sta->is_agg = 1;
1989 } else {
1990 lq_sta->is_agg = 0;
1991 }
1992
1993 /*
1994 * Select rate-scale / modulation-mode table to work with in
1995 * the rest of this function: "search" if searching for better
1996 * modulation mode, or "active" if doing rate scaling within a mode.
1997 */
1998 if (!lq_sta->search_better_tbl)
1999 active_tbl = lq_sta->active_tbl;
2000 else
2001 active_tbl = 1 - lq_sta->active_tbl;
2002
2003 tbl = &(lq_sta->lq_info[active_tbl]);
2004 rate = &tbl->rate;
2005
2006 if (prev_agg != lq_sta->is_agg) {
2007 IWL_DEBUG_RATE(mvm,
2008 "Aggregation changed: prev %d current %d. Update expected TPT table\n",
2009 prev_agg, lq_sta->is_agg);
2010 rs_set_expected_tpt_table(lq_sta, tbl);
2011 }
2012
2013 /* current tx rate */
2014 index = lq_sta->last_txrate_idx;
2015
2016 /* rates available for this association, and for modulation mode */
2017 rate_mask = rs_get_supported_rates(lq_sta, rate);
2018
2019 if (!(BIT(index) & rate_mask)) {
2020 IWL_ERR(mvm, "Current Rate is not valid\n");
2021 if (lq_sta->search_better_tbl) {
2022 /* revert to active table if search table is not valid*/
2023 rate->type = LQ_NONE;
2024 lq_sta->search_better_tbl = 0;
2025 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2026 rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
2027 }
2028 return;
2029 }
2030
2031 /* Get expected throughput table and history window for current rate */
2032 if (!tbl->expected_tpt) {
2033 IWL_ERR(mvm, "tbl->expected_tpt is NULL\n");
2034 return;
2035 }
2036
2037 /* force user max rate if set by user */
2038 if ((lq_sta->max_rate_idx != -1) &&
2039 (lq_sta->max_rate_idx < index)) {
2040 index = lq_sta->max_rate_idx;
2041 update_lq = 1;
2042 window = &(tbl->win[index]);
2043 IWL_DEBUG_RATE(mvm,
2044 "Forcing user max rate %d\n",
2045 index);
2046 goto lq_update;
2047 }
2048
2049 window = &(tbl->win[index]);
2050
2051 /*
2052 * If there is not enough history to calculate actual average
2053 * throughput, keep analyzing results of more tx frames, without
2054 * changing rate or mode (bypass most of the rest of this function).
2055 * Set up new rate table in uCode only if old rate is not supported
2056 * in current association (use new rate found above).
2057 */
2058 fail_count = window->counter - window->success_counter;
2059 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
2060 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
2061 IWL_DEBUG_RATE(mvm,
2062 "(%s: %d): Test Window: succ %d total %d\n",
2063 rs_pretty_lq_type(rate->type),
2064 index, window->success_counter, window->counter);
2065
2066 /* Can't calculate this yet; not enough history */
2067 window->average_tpt = IWL_INVALID_VALUE;
2068
2069 /* Should we stay with this modulation mode,
2070 * or search for a new one? */
2071 rs_stay_in_table(lq_sta, false);
2072
2073 goto out;
2074 }
2075 /* Else we have enough samples; calculate estimate of
2076 * actual average throughput */
2077 if (window->average_tpt != ((window->success_ratio *
2078 tbl->expected_tpt[index] + 64) / 128)) {
2079 window->average_tpt = ((window->success_ratio *
2080 tbl->expected_tpt[index] + 64) / 128);
2081 }
2082
2083 /* If we are searching for better modulation mode, check success. */
2084 if (lq_sta->search_better_tbl) {
2085 /* If good success, continue using the "search" mode;
2086 * no need to send new link quality command, since we're
2087 * continuing to use the setup that we've been trying. */
2088 if (window->average_tpt > lq_sta->last_tpt) {
2089 IWL_DEBUG_RATE(mvm,
2090 "SWITCHING TO NEW TABLE SR: %d "
2091 "cur-tpt %d old-tpt %d\n",
2092 window->success_ratio,
2093 window->average_tpt,
2094 lq_sta->last_tpt);
2095
2096 /* Swap tables; "search" becomes "active" */
2097 lq_sta->active_tbl = active_tbl;
2098 current_tpt = window->average_tpt;
2099 /* Else poor success; go back to mode in "active" table */
2100 } else {
2101 IWL_DEBUG_RATE(mvm,
2102 "GOING BACK TO THE OLD TABLE: SR %d "
2103 "cur-tpt %d old-tpt %d\n",
2104 window->success_ratio,
2105 window->average_tpt,
2106 lq_sta->last_tpt);
2107
2108 /* Nullify "search" table */
2109 rate->type = LQ_NONE;
2110
2111 /* Revert to "active" table */
2112 active_tbl = lq_sta->active_tbl;
2113 tbl = &(lq_sta->lq_info[active_tbl]);
2114
2115 /* Revert to "active" rate and throughput info */
2116 index = tbl->rate.index;
2117 current_tpt = lq_sta->last_tpt;
2118
2119 /* Need to set up a new rate table in uCode */
2120 update_lq = 1;
2121 }
2122
2123 /* Either way, we've made a decision; modulation mode
2124 * search is done, allow rate adjustment next time. */
2125 lq_sta->search_better_tbl = 0;
2126 done_search = 1; /* Don't switch modes below! */
2127 goto lq_update;
2128 }
2129
2130 /* (Else) not in search of better modulation mode, try for better
2131 * starting rate, while staying in this mode. */
2132 high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type);
2133 low = high_low & 0xff;
2134 high = (high_low >> 8) & 0xff;
2135
2136 /* If user set max rate, dont allow higher than user constrain */
2137 if ((lq_sta->max_rate_idx != -1) &&
2138 (lq_sta->max_rate_idx < high))
2139 high = IWL_RATE_INVALID;
2140
2141 sr = window->success_ratio;
2142
2143 /* Collect measured throughputs for current and adjacent rates */
2144 current_tpt = window->average_tpt;
2145 if (low != IWL_RATE_INVALID)
2146 low_tpt = tbl->win[low].average_tpt;
2147 if (high != IWL_RATE_INVALID)
2148 high_tpt = tbl->win[high].average_tpt;
2149
2150 IWL_DEBUG_RATE(mvm,
2151 "(%s: %d): cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
2152 rs_pretty_lq_type(rate->type), index, current_tpt, sr,
2153 low, high, low_tpt, high_tpt);
2154
2155 scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
2156 current_tpt, low_tpt, high_tpt);
2157
2158 /* Force a search in case BT doesn't like us being in MIMO */
2159 if (is_mimo(rate) &&
2160 !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) {
2161 IWL_DEBUG_RATE(mvm,
2162 "BT Coex forbids MIMO. Search for new config\n");
2163 rs_stay_in_table(lq_sta, true);
2164 goto lq_update;
2165 }
2166
2167 switch (scale_action) {
2168 case RS_ACTION_DOWNSCALE:
2169 /* Decrease starting rate, update uCode's rate table */
2170 if (low != IWL_RATE_INVALID) {
2171 update_lq = 1;
2172 index = low;
2173 } else {
2174 IWL_DEBUG_RATE(mvm,
2175 "At the bottom rate. Can't decrease\n");
2176 }
2177
2178 break;
2179 case RS_ACTION_UPSCALE:
2180 /* Increase starting rate, update uCode's rate table */
2181 if (high != IWL_RATE_INVALID) {
2182 update_lq = 1;
2183 index = high;
2184 } else {
2185 IWL_DEBUG_RATE(mvm,
2186 "At the top rate. Can't increase\n");
2187 }
2188
2189 break;
2190 case RS_ACTION_STAY:
2191 /* No change */
2192 update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
2193 break;
2194 default:
2195 break;
2196 }
2197
2198 lq_update:
2199 /* Replace uCode's rate table for the destination station. */
2200 if (update_lq) {
2201 tbl->rate.index = index;
2202 rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
2203 }
2204
2205 rs_stay_in_table(lq_sta, false);
2206
2207 /*
2208 * Search for new modulation mode if we're:
2209 * 1) Not changing rates right now
2210 * 2) Not just finishing up a search
2211 * 3) Allowing a new search
2212 */
2213 if (!update_lq && !done_search &&
2214 lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED
2215 && window->counter) {
2216 enum rs_column next_column;
2217
2218 /* Save current throughput to compare with "search" throughput*/
2219 lq_sta->last_tpt = current_tpt;
2220
2221 IWL_DEBUG_RATE(mvm,
2222 "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n",
2223 update_lq, done_search, lq_sta->rs_state,
2224 window->counter);
2225
2226 next_column = rs_get_next_column(mvm, lq_sta, sta, tbl);
2227 if (next_column != RS_COLUMN_INVALID) {
2228 int ret = rs_switch_to_column(mvm, lq_sta, sta,
2229 next_column);
2230 if (!ret)
2231 lq_sta->search_better_tbl = 1;
2232 } else {
2233 IWL_DEBUG_RATE(mvm,
2234 "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n");
2235 lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED;
2236 }
2237
2238 /* If new "search" mode was selected, set up in uCode table */
2239 if (lq_sta->search_better_tbl) {
2240 /* Access the "search" table, clear its history. */
2241 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2242 rs_rate_scale_clear_tbl_windows(tbl);
2243
2244 /* Use new "search" start rate */
2245 index = tbl->rate.index;
2246
2247 rs_dump_rate(mvm, &tbl->rate,
2248 "Switch to SEARCH TABLE:");
2249 rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
2250 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
2251 } else {
2252 done_search = 1;
2253 }
2254 }
2255
2256 if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) {
2257 /* If the "active" (non-search) mode was legacy,
2258 * and we've tried switching antennas,
2259 * but we haven't been able to try HT modes (not available),
2260 * stay with best antenna legacy modulation for a while
2261 * before next round of mode comparisons. */
2262 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2263 if (is_legacy(&tbl1->rate) && !sta->ht_cap.ht_supported) {
2264 IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
2265 rs_set_stay_in_table(mvm, 1, lq_sta);
2266 } else {
2267 /* If we're in an HT mode, and all 3 mode switch actions
2268 * have been tried and compared, stay in this best modulation
2269 * mode for a while before next round of mode comparisons. */
2270 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2271 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2272 (tid != IWL_MAX_TID_COUNT)) {
2273 tid_data = &sta_priv->tid_data[tid];
2274 if (tid_data->state == IWL_AGG_OFF) {
2275 IWL_DEBUG_RATE(mvm,
2276 "try to aggregate tid %d\n",
2277 tid);
2278 rs_tl_turn_on_agg(mvm, tid,
2279 lq_sta, sta);
2280 }
2281 }
2282 rs_set_stay_in_table(mvm, 0, lq_sta);
2283 }
2284 }
2285
2286 out:
2287 lq_sta->last_txrate_idx = index;
2288 }
2289
2290 /**
2291 * rs_initialize_lq - Initialize a station's hardware rate table
2292 *
2293 * The uCode's station table contains a table of fallback rates
2294 * for automatic fallback during transmission.
2295 *
2296 * NOTE: This sets up a default set of values. These will be replaced later
2297 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
2298 * rc80211_simple.
2299 *
2300 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2301 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2302 * which requires station table entry to exist).
2303 */
2304 static void rs_initialize_lq(struct iwl_mvm *mvm,
2305 struct ieee80211_sta *sta,
2306 struct iwl_lq_sta *lq_sta,
2307 enum ieee80211_band band,
2308 bool init)
2309 {
2310 struct iwl_scale_tbl_info *tbl;
2311 struct rs_rate *rate;
2312 int i;
2313 u8 active_tbl = 0;
2314 u8 valid_tx_ant;
2315
2316 if (!sta || !lq_sta)
2317 return;
2318
2319 i = lq_sta->last_txrate_idx;
2320
2321 valid_tx_ant = mvm->fw->valid_tx_ant;
2322
2323 if (!lq_sta->search_better_tbl)
2324 active_tbl = lq_sta->active_tbl;
2325 else
2326 active_tbl = 1 - lq_sta->active_tbl;
2327
2328 tbl = &(lq_sta->lq_info[active_tbl]);
2329 rate = &tbl->rate;
2330
2331 if ((i < 0) || (i >= IWL_RATE_COUNT))
2332 i = 0;
2333
2334 rate->index = i;
2335 rate->ant = first_antenna(valid_tx_ant);
2336 rate->sgi = false;
2337 rate->bw = RATE_MCS_CHAN_WIDTH_20;
2338 if (band == IEEE80211_BAND_5GHZ)
2339 rate->type = LQ_LEGACY_A;
2340 else
2341 rate->type = LQ_LEGACY_G;
2342
2343 WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
2344 if (rate->ant == ANT_A)
2345 tbl->column = RS_COLUMN_LEGACY_ANT_A;
2346 else
2347 tbl->column = RS_COLUMN_LEGACY_ANT_B;
2348
2349 rs_set_expected_tpt_table(lq_sta, tbl);
2350 rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
2351 /* TODO restore station should remember the lq cmd */
2352 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
2353 }
2354
2355 static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2356 struct ieee80211_tx_rate_control *txrc)
2357 {
2358 struct sk_buff *skb = txrc->skb;
2359 struct ieee80211_supported_band *sband = txrc->sband;
2360 struct iwl_op_mode *op_mode __maybe_unused =
2361 (struct iwl_op_mode *)mvm_r;
2362 struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
2363 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2364 struct iwl_lq_sta *lq_sta = mvm_sta;
2365
2366 /* Get max rate if user set max rate */
2367 if (lq_sta) {
2368 lq_sta->max_rate_idx = txrc->max_rate_idx;
2369 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2370 (lq_sta->max_rate_idx != -1))
2371 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2372 if ((lq_sta->max_rate_idx < 0) ||
2373 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2374 lq_sta->max_rate_idx = -1;
2375 }
2376
2377 /* Treat uninitialized rate scaling data same as non-existing. */
2378 if (lq_sta && !lq_sta->drv) {
2379 IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
2380 mvm_sta = NULL;
2381 }
2382
2383 /* Send management frames and NO_ACK data using lowest rate. */
2384 if (rate_control_send_low(sta, mvm_sta, txrc))
2385 return;
2386
2387 iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
2388 info->band, &info->control.rates[0]);
2389
2390 info->control.rates[0].count = 1;
2391 }
2392
2393 static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
2394 gfp_t gfp)
2395 {
2396 struct iwl_mvm_sta *sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
2397 struct iwl_op_mode *op_mode __maybe_unused =
2398 (struct iwl_op_mode *)mvm_rate;
2399 struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
2400
2401 IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
2402
2403 return &sta_priv->lq_sta;
2404 }
2405
2406 static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
2407 int nss)
2408 {
2409 u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
2410 (0x3 << (2 * (nss - 1)));
2411 rx_mcs >>= (2 * (nss - 1));
2412
2413 if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
2414 return IWL_RATE_MCS_7_INDEX;
2415 else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
2416 return IWL_RATE_MCS_8_INDEX;
2417 else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
2418 return IWL_RATE_MCS_9_INDEX;
2419
2420 WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
2421 return -1;
2422 }
2423
2424 static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
2425 struct ieee80211_sta_vht_cap *vht_cap,
2426 struct iwl_lq_sta *lq_sta)
2427 {
2428 int i;
2429 int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
2430
2431 if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
2432 for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
2433 if (i == IWL_RATE_9M_INDEX)
2434 continue;
2435
2436 /* Disable MCS9 as a workaround */
2437 if (i == IWL_RATE_MCS_9_INDEX)
2438 continue;
2439
2440 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
2441 if (i == IWL_RATE_MCS_9_INDEX &&
2442 sta->bandwidth == IEEE80211_STA_RX_BW_20)
2443 continue;
2444
2445 lq_sta->active_siso_rate |= BIT(i);
2446 }
2447 }
2448
2449 if (sta->rx_nss < 2)
2450 return;
2451
2452 highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
2453 if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
2454 for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
2455 if (i == IWL_RATE_9M_INDEX)
2456 continue;
2457
2458 /* Disable MCS9 as a workaround */
2459 if (i == IWL_RATE_MCS_9_INDEX)
2460 continue;
2461
2462 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
2463 if (i == IWL_RATE_MCS_9_INDEX &&
2464 sta->bandwidth == IEEE80211_STA_RX_BW_20)
2465 continue;
2466
2467 lq_sta->active_mimo2_rate |= BIT(i);
2468 }
2469 }
2470 }
2471
2472 #ifdef CONFIG_IWLWIFI_DEBUGFS
2473 static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm,
2474 struct iwl_mvm_frame_stats *stats)
2475 {
2476 spin_lock_bh(&mvm->drv_stats_lock);
2477 memset(stats, 0, sizeof(*stats));
2478 spin_unlock_bh(&mvm->drv_stats_lock);
2479 }
2480
2481 void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
2482 struct iwl_mvm_frame_stats *stats,
2483 u32 rate, bool agg)
2484 {
2485 u8 nss = 0, mcs = 0;
2486
2487 spin_lock(&mvm->drv_stats_lock);
2488
2489 if (agg)
2490 stats->agg_frames++;
2491
2492 stats->success_frames++;
2493
2494 switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
2495 case RATE_MCS_CHAN_WIDTH_20:
2496 stats->bw_20_frames++;
2497 break;
2498 case RATE_MCS_CHAN_WIDTH_40:
2499 stats->bw_40_frames++;
2500 break;
2501 case RATE_MCS_CHAN_WIDTH_80:
2502 stats->bw_80_frames++;
2503 break;
2504 default:
2505 WARN_ONCE(1, "bad BW. rate 0x%x", rate);
2506 }
2507
2508 if (rate & RATE_MCS_HT_MSK) {
2509 stats->ht_frames++;
2510 mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
2511 nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
2512 } else if (rate & RATE_MCS_VHT_MSK) {
2513 stats->vht_frames++;
2514 mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
2515 nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
2516 RATE_VHT_MCS_NSS_POS) + 1;
2517 } else {
2518 stats->legacy_frames++;
2519 }
2520
2521 if (nss == 1)
2522 stats->siso_frames++;
2523 else if (nss == 2)
2524 stats->mimo2_frames++;
2525
2526 if (rate & RATE_MCS_SGI_MSK)
2527 stats->sgi_frames++;
2528 else
2529 stats->ngi_frames++;
2530
2531 stats->last_rates[stats->last_frame_idx] = rate;
2532 stats->last_frame_idx = (stats->last_frame_idx + 1) %
2533 ARRAY_SIZE(stats->last_rates);
2534
2535 spin_unlock(&mvm->drv_stats_lock);
2536 }
2537 #endif
2538
2539 /*
2540 * Called after adding a new station to initialize rate scaling
2541 */
2542 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2543 enum ieee80211_band band, bool init)
2544 {
2545 int i, j;
2546 struct ieee80211_hw *hw = mvm->hw;
2547 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2548 struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2549 struct iwl_mvm_sta *sta_priv;
2550 struct iwl_lq_sta *lq_sta;
2551 struct ieee80211_supported_band *sband;
2552 unsigned long supp; /* must be unsigned long for for_each_set_bit */
2553
2554 sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
2555 lq_sta = &sta_priv->lq_sta;
2556 memset(lq_sta, 0, sizeof(*lq_sta));
2557
2558 sband = hw->wiphy->bands[band];
2559
2560 lq_sta->lq.sta_id = sta_priv->sta_id;
2561
2562 for (j = 0; j < LQ_SIZE; j++)
2563 rs_rate_scale_clear_tbl_windows(&lq_sta->lq_info[j]);
2564
2565 lq_sta->flush_timer = 0;
2566
2567 IWL_DEBUG_RATE(mvm,
2568 "LQ: *** rate scale station global init for station %d ***\n",
2569 sta_priv->sta_id);
2570 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2571 * the lowest or the highest rate.. Could consider using RSSI from
2572 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2573 * after assoc.. */
2574
2575 lq_sta->max_rate_idx = -1;
2576 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2577 lq_sta->band = sband->band;
2578 /*
2579 * active legacy rates as per supported rates bitmap
2580 */
2581 supp = sta->supp_rates[sband->band];
2582 lq_sta->active_legacy_rate = 0;
2583 for_each_set_bit(i, &supp, BITS_PER_LONG)
2584 lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
2585
2586 /* TODO: should probably account for rx_highest for both HT/VHT */
2587 if (!vht_cap || !vht_cap->vht_supported) {
2588 /* active_siso_rate mask includes 9 MBits (bit 5),
2589 * and CCK (bits 0-3), supp_rates[] does not;
2590 * shift to convert format, force 9 MBits off.
2591 */
2592 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2593 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2594 lq_sta->active_siso_rate &= ~((u16)0x2);
2595 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2596
2597 /* Same here */
2598 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2599 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2600 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2601 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2602
2603 lq_sta->is_vht = false;
2604 } else {
2605 rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
2606 lq_sta->is_vht = true;
2607 }
2608
2609 IWL_DEBUG_RATE(mvm,
2610 "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n",
2611 lq_sta->active_siso_rate,
2612 lq_sta->active_mimo2_rate,
2613 lq_sta->is_vht);
2614
2615 /* These values will be overridden later */
2616 lq_sta->lq.single_stream_ant_msk =
2617 first_antenna(mvm->fw->valid_tx_ant);
2618 lq_sta->lq.dual_stream_ant_msk = ANT_AB;
2619
2620 /* as default allow aggregation for all tids */
2621 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2622 lq_sta->drv = mvm;
2623
2624 /* Set last_txrate_idx to lowest rate */
2625 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2626 if (sband->band == IEEE80211_BAND_5GHZ)
2627 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2628 lq_sta->is_agg = 0;
2629 #ifdef CONFIG_MAC80211_DEBUGFS
2630 lq_sta->dbg_fixed_rate = 0;
2631 lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
2632 #endif
2633 #ifdef CONFIG_IWLWIFI_DEBUGFS
2634 iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
2635 #endif
2636 rs_initialize_lq(mvm, sta, lq_sta, band, init);
2637 }
2638
2639 static void rs_rate_update(void *mvm_r,
2640 struct ieee80211_supported_band *sband,
2641 struct cfg80211_chan_def *chandef,
2642 struct ieee80211_sta *sta, void *priv_sta,
2643 u32 changed)
2644 {
2645 u8 tid;
2646 struct iwl_op_mode *op_mode =
2647 (struct iwl_op_mode *)mvm_r;
2648 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2649
2650 /* Stop any ongoing aggregations as rs starts off assuming no agg */
2651 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
2652 ieee80211_stop_tx_ba_session(sta, tid);
2653
2654 iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
2655 }
2656
2657 #ifdef CONFIG_MAC80211_DEBUGFS
2658 static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
2659 struct iwl_lq_cmd *lq_cmd,
2660 enum ieee80211_band band,
2661 u32 ucode_rate)
2662 {
2663 struct rs_rate rate;
2664 int i;
2665 int num_rates = ARRAY_SIZE(lq_cmd->rs_table);
2666 __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate);
2667
2668 for (i = 0; i < num_rates; i++)
2669 lq_cmd->rs_table[i] = ucode_rate_le32;
2670
2671 rs_rate_from_ucode_rate(ucode_rate, band, &rate);
2672
2673 if (is_mimo(&rate))
2674 lq_cmd->mimo_delim = num_rates - 1;
2675 else
2676 lq_cmd->mimo_delim = 0;
2677 }
2678 #endif /* CONFIG_MAC80211_DEBUGFS */
2679
2680 static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
2681 struct iwl_lq_sta *lq_sta,
2682 struct rs_rate *rate,
2683 __le32 *rs_table, int *rs_table_index,
2684 int num_rates, int num_retries,
2685 u8 valid_tx_ant, bool toggle_ant)
2686 {
2687 int i, j;
2688 __le32 ucode_rate;
2689 bool bottom_reached = false;
2690 int prev_rate_idx = rate->index;
2691 int end = LINK_QUAL_MAX_RETRY_NUM;
2692 int index = *rs_table_index;
2693
2694 for (i = 0; i < num_rates && index < end; i++) {
2695 ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm, rate));
2696 for (j = 0; j < num_retries && index < end; j++, index++)
2697 rs_table[index] = ucode_rate;
2698
2699 if (toggle_ant)
2700 rs_toggle_antenna(valid_tx_ant, rate);
2701
2702 prev_rate_idx = rate->index;
2703 bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
2704 if (bottom_reached && !is_legacy(rate))
2705 break;
2706 }
2707
2708 if (!bottom_reached)
2709 rate->index = prev_rate_idx;
2710
2711 *rs_table_index = index;
2712 }
2713
2714 /* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
2715 * column the rate table should look like this:
2716 *
2717 * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
2718 * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
2719 * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
2720 * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
2721 * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
2722 * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
2723 * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
2724 * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
2725 * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
2726 * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps
2727 * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps
2728 * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps
2729 * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps
2730 * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps
2731 * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps
2732 * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
2733 */
2734 static void rs_build_rates_table(struct iwl_mvm *mvm,
2735 struct iwl_lq_sta *lq_sta,
2736 const struct rs_rate *initial_rate)
2737 {
2738 struct rs_rate rate;
2739 int num_rates, num_retries, index = 0;
2740 u8 valid_tx_ant = 0;
2741 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
2742 bool toggle_ant = false;
2743
2744 memcpy(&rate, initial_rate, sizeof(rate));
2745
2746 valid_tx_ant = mvm->fw->valid_tx_ant;
2747
2748 if (is_siso(&rate)) {
2749 num_rates = RS_INITIAL_SISO_NUM_RATES;
2750 num_retries = RS_HT_VHT_RETRIES_PER_RATE;
2751 } else if (is_mimo(&rate)) {
2752 num_rates = RS_INITIAL_MIMO_NUM_RATES;
2753 num_retries = RS_HT_VHT_RETRIES_PER_RATE;
2754 } else {
2755 num_rates = RS_INITIAL_LEGACY_NUM_RATES;
2756 num_retries = RS_LEGACY_RETRIES_PER_RATE;
2757 toggle_ant = true;
2758 }
2759
2760 rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
2761 num_rates, num_retries, valid_tx_ant,
2762 toggle_ant);
2763
2764 rs_get_lower_rate_down_column(lq_sta, &rate);
2765
2766 if (is_siso(&rate)) {
2767 num_rates = RS_SECONDARY_SISO_NUM_RATES;
2768 num_retries = RS_SECONDARY_SISO_RETRIES;
2769 } else if (is_legacy(&rate)) {
2770 num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
2771 num_retries = RS_LEGACY_RETRIES_PER_RATE;
2772 } else {
2773 WARN_ON_ONCE(1);
2774 }
2775
2776 toggle_ant = true;
2777
2778 rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
2779 num_rates, num_retries, valid_tx_ant,
2780 toggle_ant);
2781
2782 rs_get_lower_rate_down_column(lq_sta, &rate);
2783
2784 num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
2785 num_retries = RS_LEGACY_RETRIES_PER_RATE;
2786
2787 rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
2788 num_rates, num_retries, valid_tx_ant,
2789 toggle_ant);
2790
2791 }
2792
2793 static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
2794 struct ieee80211_sta *sta,
2795 struct iwl_lq_sta *lq_sta,
2796 const struct rs_rate *initial_rate)
2797 {
2798 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
2799 u8 ant = initial_rate->ant;
2800
2801 #ifdef CONFIG_MAC80211_DEBUGFS
2802 if (lq_sta->dbg_fixed_rate) {
2803 rs_build_rates_table_from_fixed(mvm, lq_cmd,
2804 lq_sta->band,
2805 lq_sta->dbg_fixed_rate);
2806 lq_cmd->reduced_tpc = 0;
2807 ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
2808 RATE_MCS_ANT_POS;
2809 } else
2810 #endif
2811 rs_build_rates_table(mvm, lq_sta, initial_rate);
2812
2813 if (num_of_ant(ant) == 1)
2814 lq_cmd->single_stream_ant_msk = ant;
2815
2816 lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2817 lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2818
2819 lq_cmd->agg_time_limit =
2820 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2821
2822 if (sta)
2823 lq_cmd->agg_time_limit =
2824 cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
2825 }
2826
2827 static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2828 {
2829 return hw->priv;
2830 }
2831 /* rate scale requires free function to be implemented */
2832 static void rs_free(void *mvm_rate)
2833 {
2834 return;
2835 }
2836
2837 static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
2838 void *mvm_sta)
2839 {
2840 struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
2841 struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
2842
2843 IWL_DEBUG_RATE(mvm, "enter\n");
2844 IWL_DEBUG_RATE(mvm, "leave\n");
2845 }
2846
2847 #ifdef CONFIG_MAC80211_DEBUGFS
2848 int rs_pretty_print_rate(char *buf, const u32 rate)
2849 {
2850
2851 char *type, *bw;
2852 u8 mcs = 0, nss = 0;
2853 u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
2854
2855 if (!(rate & RATE_MCS_HT_MSK) &&
2856 !(rate & RATE_MCS_VHT_MSK)) {
2857 int index = iwl_hwrate_to_plcp_idx(rate);
2858
2859 return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
2860 rs_pretty_ant(ant),
2861 index == IWL_RATE_INVALID ? "BAD" :
2862 iwl_rate_mcs[index].mbps);
2863 }
2864
2865 if (rate & RATE_MCS_VHT_MSK) {
2866 type = "VHT";
2867 mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
2868 nss = ((rate & RATE_VHT_MCS_NSS_MSK)
2869 >> RATE_VHT_MCS_NSS_POS) + 1;
2870 } else if (rate & RATE_MCS_HT_MSK) {
2871 type = "HT";
2872 mcs = rate & RATE_HT_MCS_INDEX_MSK;
2873 } else {
2874 type = "Unknown"; /* shouldn't happen */
2875 }
2876
2877 switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
2878 case RATE_MCS_CHAN_WIDTH_20:
2879 bw = "20Mhz";
2880 break;
2881 case RATE_MCS_CHAN_WIDTH_40:
2882 bw = "40Mhz";
2883 break;
2884 case RATE_MCS_CHAN_WIDTH_80:
2885 bw = "80Mhz";
2886 break;
2887 case RATE_MCS_CHAN_WIDTH_160:
2888 bw = "160Mhz";
2889 break;
2890 default:
2891 bw = "BAD BW";
2892 }
2893
2894 return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
2895 type, rs_pretty_ant(ant), bw, mcs, nss,
2896 (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
2897 (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "",
2898 (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
2899 (rate & RATE_MCS_BF_MSK) ? "BF " : "",
2900 (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
2901 }
2902
2903 /**
2904 * Program the device to use fixed rate for frame transmit
2905 * This is for debugging/testing only
2906 * once the device start use fixed rate, we need to reload the module
2907 * to being back the normal operation.
2908 */
2909 static void rs_program_fix_rate(struct iwl_mvm *mvm,
2910 struct iwl_lq_sta *lq_sta)
2911 {
2912 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2913 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2914 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2915
2916 IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
2917 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2918
2919 if (lq_sta->dbg_fixed_rate) {
2920 struct rs_rate rate;
2921 rs_rate_from_ucode_rate(lq_sta->dbg_fixed_rate,
2922 lq_sta->band, &rate);
2923 rs_fill_lq_cmd(mvm, NULL, lq_sta, &rate);
2924 iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, false);
2925 }
2926 }
2927
2928 static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2929 const char __user *user_buf, size_t count, loff_t *ppos)
2930 {
2931 struct iwl_lq_sta *lq_sta = file->private_data;
2932 struct iwl_mvm *mvm;
2933 char buf[64];
2934 size_t buf_size;
2935 u32 parsed_rate;
2936
2937 mvm = lq_sta->drv;
2938 memset(buf, 0, sizeof(buf));
2939 buf_size = min(count, sizeof(buf) - 1);
2940 if (copy_from_user(buf, user_buf, buf_size))
2941 return -EFAULT;
2942
2943 if (sscanf(buf, "%x", &parsed_rate) == 1)
2944 lq_sta->dbg_fixed_rate = parsed_rate;
2945 else
2946 lq_sta->dbg_fixed_rate = 0;
2947
2948 rs_program_fix_rate(mvm, lq_sta);
2949
2950 return count;
2951 }
2952
2953 static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2954 char __user *user_buf, size_t count, loff_t *ppos)
2955 {
2956 char *buff;
2957 int desc = 0;
2958 int i = 0;
2959 ssize_t ret;
2960
2961 struct iwl_lq_sta *lq_sta = file->private_data;
2962 struct iwl_mvm *mvm;
2963 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2964 struct rs_rate *rate = &tbl->rate;
2965 mvm = lq_sta->drv;
2966 buff = kmalloc(2048, GFP_KERNEL);
2967 if (!buff)
2968 return -ENOMEM;
2969
2970 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2971 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2972 lq_sta->total_failed, lq_sta->total_success,
2973 lq_sta->active_legacy_rate);
2974 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2975 lq_sta->dbg_fixed_rate);
2976 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
2977 (mvm->fw->valid_tx_ant & ANT_A) ? "ANT_A," : "",
2978 (mvm->fw->valid_tx_ant & ANT_B) ? "ANT_B," : "",
2979 (mvm->fw->valid_tx_ant & ANT_C) ? "ANT_C" : "");
2980 desc += sprintf(buff+desc, "lq type %s\n",
2981 (is_legacy(rate)) ? "legacy" :
2982 is_vht(rate) ? "VHT" : "HT");
2983 if (!is_legacy(rate)) {
2984 desc += sprintf(buff+desc, " %s",
2985 (is_siso(rate)) ? "SISO" : "MIMO2");
2986 desc += sprintf(buff+desc, " %s",
2987 (is_ht20(rate)) ? "20MHz" :
2988 (is_ht40(rate)) ? "40MHz" :
2989 (is_ht80(rate)) ? "80Mhz" : "BAD BW");
2990 desc += sprintf(buff+desc, " %s %s\n",
2991 (rate->sgi) ? "SGI" : "NGI",
2992 (lq_sta->is_agg) ? "AGG on" : "");
2993 }
2994 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2995 lq_sta->last_rate_n_flags);
2996 desc += sprintf(buff+desc,
2997 "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
2998 lq_sta->lq.flags,
2999 lq_sta->lq.mimo_delim,
3000 lq_sta->lq.single_stream_ant_msk,
3001 lq_sta->lq.dual_stream_ant_msk);
3002
3003 desc += sprintf(buff+desc,
3004 "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
3005 le16_to_cpu(lq_sta->lq.agg_time_limit),
3006 lq_sta->lq.agg_disable_start_th,
3007 lq_sta->lq.agg_frame_cnt_limit);
3008
3009 desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
3010 desc += sprintf(buff+desc,
3011 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
3012 lq_sta->lq.initial_rate_index[0],
3013 lq_sta->lq.initial_rate_index[1],
3014 lq_sta->lq.initial_rate_index[2],
3015 lq_sta->lq.initial_rate_index[3]);
3016
3017 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3018 u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
3019
3020 desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
3021 desc += rs_pretty_print_rate(buff+desc, r);
3022 }
3023
3024 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3025 kfree(buff);
3026 return ret;
3027 }
3028
3029 static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
3030 .write = rs_sta_dbgfs_scale_table_write,
3031 .read = rs_sta_dbgfs_scale_table_read,
3032 .open = simple_open,
3033 .llseek = default_llseek,
3034 };
3035 static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
3036 char __user *user_buf, size_t count, loff_t *ppos)
3037 {
3038 char *buff;
3039 int desc = 0;
3040 int i, j;
3041 ssize_t ret;
3042 struct iwl_scale_tbl_info *tbl;
3043 struct rs_rate *rate;
3044 struct iwl_lq_sta *lq_sta = file->private_data;
3045
3046 buff = kmalloc(1024, GFP_KERNEL);
3047 if (!buff)
3048 return -ENOMEM;
3049
3050 for (i = 0; i < LQ_SIZE; i++) {
3051 tbl = &(lq_sta->lq_info[i]);
3052 rate = &tbl->rate;
3053 desc += sprintf(buff+desc,
3054 "%s type=%d SGI=%d BW=%s DUP=0\n"
3055 "index=%d\n",
3056 lq_sta->active_tbl == i ? "*" : "x",
3057 rate->type,
3058 rate->sgi,
3059 is_ht20(rate) ? "20Mhz" :
3060 is_ht40(rate) ? "40Mhz" :
3061 is_ht80(rate) ? "80Mhz" : "ERR",
3062 rate->index);
3063 for (j = 0; j < IWL_RATE_COUNT; j++) {
3064 desc += sprintf(buff+desc,
3065 "counter=%d success=%d %%=%d\n",
3066 tbl->win[j].counter,
3067 tbl->win[j].success_counter,
3068 tbl->win[j].success_ratio);
3069 }
3070 }
3071 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3072 kfree(buff);
3073 return ret;
3074 }
3075
3076 static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
3077 .read = rs_sta_dbgfs_stats_table_read,
3078 .open = simple_open,
3079 .llseek = default_llseek,
3080 };
3081
3082 static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
3083 char __user *user_buf,
3084 size_t count, loff_t *ppos)
3085 {
3086 static const char * const column_name[] = {
3087 [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
3088 [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
3089 [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
3090 [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
3091 [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
3092 [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
3093 [RS_COLUMN_MIMO2] = "MIMO2",
3094 [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
3095 };
3096
3097 static const char * const rate_name[] = {
3098 [IWL_RATE_1M_INDEX] = "1M",
3099 [IWL_RATE_2M_INDEX] = "2M",
3100 [IWL_RATE_5M_INDEX] = "5.5M",
3101 [IWL_RATE_11M_INDEX] = "11M",
3102 [IWL_RATE_6M_INDEX] = "6M|MCS0",
3103 [IWL_RATE_9M_INDEX] = "9M",
3104 [IWL_RATE_12M_INDEX] = "12M|MCS1",
3105 [IWL_RATE_18M_INDEX] = "18M|MCS2",
3106 [IWL_RATE_24M_INDEX] = "24M|MCS3",
3107 [IWL_RATE_36M_INDEX] = "36M|MCS4",
3108 [IWL_RATE_48M_INDEX] = "48M|MCS5",
3109 [IWL_RATE_54M_INDEX] = "54M|MCS6",
3110 [IWL_RATE_MCS_7_INDEX] = "MCS7",
3111 [IWL_RATE_MCS_8_INDEX] = "MCS8",
3112 [IWL_RATE_MCS_9_INDEX] = "MCS9",
3113 };
3114
3115 char *buff, *pos, *endpos;
3116 int col, rate;
3117 ssize_t ret;
3118 struct iwl_lq_sta *lq_sta = file->private_data;
3119 struct rs_rate_stats *stats;
3120 static const size_t bufsz = 1024;
3121
3122 buff = kmalloc(bufsz, GFP_KERNEL);
3123 if (!buff)
3124 return -ENOMEM;
3125
3126 pos = buff;
3127 endpos = pos + bufsz;
3128
3129 pos += scnprintf(pos, endpos - pos, "COLUMN,");
3130 for (rate = 0; rate < IWL_RATE_COUNT; rate++)
3131 pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
3132 pos += scnprintf(pos, endpos - pos, "\n");
3133
3134 for (col = 0; col < RS_COLUMN_COUNT; col++) {
3135 pos += scnprintf(pos, endpos - pos,
3136 "%s,", column_name[col]);
3137
3138 for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
3139 stats = &(lq_sta->tx_stats[col][rate]);
3140 pos += scnprintf(pos, endpos - pos,
3141 "%llu/%llu,",
3142 stats->success,
3143 stats->total);
3144 }
3145 pos += scnprintf(pos, endpos - pos, "\n");
3146 }
3147
3148 ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
3149 kfree(buff);
3150 return ret;
3151 }
3152
3153 static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
3154 const char __user *user_buf,
3155 size_t count, loff_t *ppos)
3156 {
3157 struct iwl_lq_sta *lq_sta = file->private_data;
3158 memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
3159
3160 return count;
3161 }
3162
3163 static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
3164 .read = rs_sta_dbgfs_drv_tx_stats_read,
3165 .write = rs_sta_dbgfs_drv_tx_stats_write,
3166 .open = simple_open,
3167 .llseek = default_llseek,
3168 };
3169
3170 static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
3171 {
3172 struct iwl_lq_sta *lq_sta = mvm_sta;
3173 lq_sta->rs_sta_dbgfs_scale_table_file =
3174 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
3175 lq_sta, &rs_sta_dbgfs_scale_table_ops);
3176 lq_sta->rs_sta_dbgfs_stats_table_file =
3177 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
3178 lq_sta, &rs_sta_dbgfs_stats_table_ops);
3179 lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
3180 debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
3181 lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
3182 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
3183 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
3184 &lq_sta->tx_agg_tid_en);
3185 lq_sta->rs_sta_dbgfs_reduced_txp_file =
3186 debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
3187 &lq_sta->dbg_fixed_txp_reduction);
3188 }
3189
3190 static void rs_remove_debugfs(void *mvm, void *mvm_sta)
3191 {
3192 struct iwl_lq_sta *lq_sta = mvm_sta;
3193 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
3194 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
3195 debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
3196 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
3197 debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
3198 }
3199 #endif
3200
3201 /*
3202 * Initialization of rate scaling information is done by driver after
3203 * the station is added. Since mac80211 calls this function before a
3204 * station is added we ignore it.
3205 */
3206 static void rs_rate_init_stub(void *mvm_r,
3207 struct ieee80211_supported_band *sband,
3208 struct cfg80211_chan_def *chandef,
3209 struct ieee80211_sta *sta, void *mvm_sta)
3210 {
3211 }
3212
3213 static const struct rate_control_ops rs_mvm_ops = {
3214 .name = RS_NAME,
3215 .tx_status = rs_tx_status,
3216 .get_rate = rs_get_rate,
3217 .rate_init = rs_rate_init_stub,
3218 .alloc = rs_alloc,
3219 .free = rs_free,
3220 .alloc_sta = rs_alloc_sta,
3221 .free_sta = rs_free_sta,
3222 .rate_update = rs_rate_update,
3223 #ifdef CONFIG_MAC80211_DEBUGFS
3224 .add_sta_debugfs = rs_add_debugfs,
3225 .remove_sta_debugfs = rs_remove_debugfs,
3226 #endif
3227 };
3228
3229 int iwl_mvm_rate_control_register(void)
3230 {
3231 return ieee80211_rate_control_register(&rs_mvm_ops);
3232 }
3233
3234 void iwl_mvm_rate_control_unregister(void)
3235 {
3236 ieee80211_rate_control_unregister(&rs_mvm_ops);
3237 }
3238
3239 /**
3240 * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
3241 * Tx protection, according to this rquest and previous requests,
3242 * and send the LQ command.
3243 * @mvmsta: The station
3244 * @enable: Enable Tx protection?
3245 */
3246 int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
3247 bool enable)
3248 {
3249 struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
3250
3251 lockdep_assert_held(&mvm->mutex);
3252
3253 if (enable) {
3254 if (mvmsta->tx_protection == 0)
3255 lq->flags |= LQ_FLAG_USE_RTS_MSK;
3256 mvmsta->tx_protection++;
3257 } else {
3258 mvmsta->tx_protection--;
3259 if (mvmsta->tx_protection == 0)
3260 lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
3261 }
3262
3263 return iwl_mvm_send_lq_cmd(mvm, lq, false);
3264 }