1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5 * Copyright (C) 2015-2017 Intel Deutschland GmbH
6 */
7 #include <net/mac80211.h>
8
9 #include "iwl-debug.h"
10 #include "iwl-io.h"
11 #include "iwl-prph.h"
12 #include "iwl-csr.h"
13 #include "mvm.h"
14 #include "fw/api/rs.h"
15 #include "fw/img.h"
16
17 /*
18 * Will return 0 even if the cmd failed when RFKILL is asserted unless
19 * CMD_WANT_SKB is set in cmd->flags.
20 */
iwl_mvm_send_cmd(struct iwl_mvm * mvm,struct iwl_host_cmd * cmd)21 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
22 {
23 int ret;
24
25 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
26 if (WARN_ON(mvm->d3_test_active))
27 return -EIO;
28 #endif
29
30 /*
31 * Synchronous commands from this op-mode must hold
32 * the mutex, this ensures we don't try to send two
33 * (or more) synchronous commands at a time.
34 */
35 if (!(cmd->flags & CMD_ASYNC))
36 lockdep_assert_held(&mvm->mutex);
37
38 ret = iwl_trans_send_cmd(mvm->trans, cmd);
39
40 /*
41 * If the caller wants the SKB, then don't hide any problems, the
42 * caller might access the response buffer which will be NULL if
43 * the command failed.
44 */
45 if (cmd->flags & CMD_WANT_SKB)
46 return ret;
47
48 /*
49 * Silently ignore failures if RFKILL is asserted or
50 * we are in suspend\resume process
51 */
52 if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
53 return 0;
54 return ret;
55 }
56
iwl_mvm_send_cmd_pdu(struct iwl_mvm * mvm,u32 id,u32 flags,u16 len,const void * data)57 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
58 u32 flags, u16 len, const void *data)
59 {
60 struct iwl_host_cmd cmd = {
61 .id = id,
62 .len = { len, },
63 .data = { data, },
64 .flags = flags,
65 };
66
67 return iwl_mvm_send_cmd(mvm, &cmd);
68 }
69
70 /*
71 * We assume that the caller set the status to the success value
72 */
iwl_mvm_send_cmd_status(struct iwl_mvm * mvm,struct iwl_host_cmd * cmd,u32 * status)73 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
74 u32 *status)
75 {
76 struct iwl_rx_packet *pkt;
77 struct iwl_cmd_response *resp;
78 int ret, resp_len;
79
80 lockdep_assert_held(&mvm->mutex);
81
82 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
83 if (WARN_ON(mvm->d3_test_active))
84 return -EIO;
85 #endif
86
87 /*
88 * Only synchronous commands can wait for status,
89 * we use WANT_SKB so the caller can't.
90 */
91 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
92 "cmd flags %x", cmd->flags))
93 return -EINVAL;
94
95 cmd->flags |= CMD_WANT_SKB;
96
97 ret = iwl_trans_send_cmd(mvm->trans, cmd);
98 if (ret == -ERFKILL) {
99 /*
100 * The command failed because of RFKILL, don't update
101 * the status, leave it as success and return 0.
102 */
103 return 0;
104 } else if (ret) {
105 return ret;
106 }
107
108 pkt = cmd->resp_pkt;
109
110 resp_len = iwl_rx_packet_payload_len(pkt);
111 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
112 ret = -EIO;
113 goto out_free_resp;
114 }
115
116 resp = (void *)pkt->data;
117 *status = le32_to_cpu(resp->status);
118 out_free_resp:
119 iwl_free_resp(cmd);
120 return ret;
121 }
122
123 /*
124 * We assume that the caller set the status to the sucess value
125 */
iwl_mvm_send_cmd_pdu_status(struct iwl_mvm * mvm,u32 id,u16 len,const void * data,u32 * status)126 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
127 const void *data, u32 *status)
128 {
129 struct iwl_host_cmd cmd = {
130 .id = id,
131 .len = { len, },
132 .data = { data, },
133 };
134
135 return iwl_mvm_send_cmd_status(mvm, &cmd, status);
136 }
137
iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,enum nl80211_band band)138 int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
139 enum nl80211_band band)
140 {
141 int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
142 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
143 bool is_LB = band == NL80211_BAND_2GHZ;
144
145 if (format == RATE_MCS_LEGACY_OFDM_MSK)
146 return is_LB ? rate + IWL_FIRST_OFDM_RATE :
147 rate;
148
149 /* CCK is not allowed in HB */
150 return is_LB ? rate : -1;
151 }
152
iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,enum nl80211_band band)153 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
154 enum nl80211_band band)
155 {
156 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
157 int idx;
158 int band_offset = 0;
159
160 /* Legacy rate format, search for match in table */
161 if (band != NL80211_BAND_2GHZ)
162 band_offset = IWL_FIRST_OFDM_RATE;
163 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
164 if (iwl_fw_rate_idx_to_plcp(idx) == rate)
165 return idx - band_offset;
166
167 return -1;
168 }
169
iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw * fw,int rate_idx)170 u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
171 {
172 if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
173 /* In the new rate legacy rates are indexed:
174 * 0 - 3 for CCK and 0 - 7 for OFDM.
175 */
176 return (rate_idx >= IWL_FIRST_OFDM_RATE ?
177 rate_idx - IWL_FIRST_OFDM_RATE :
178 rate_idx);
179
180 return iwl_fw_rate_idx_to_plcp(rate_idx);
181 }
182
iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)183 u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
184 {
185 static const u8 mac80211_ac_to_ucode_ac[] = {
186 AC_VO,
187 AC_VI,
188 AC_BE,
189 AC_BK
190 };
191
192 return mac80211_ac_to_ucode_ac[ac];
193 }
194
iwl_mvm_rx_fw_error(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)195 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
196 {
197 struct iwl_rx_packet *pkt = rxb_addr(rxb);
198 struct iwl_error_resp *err_resp = (void *)pkt->data;
199
200 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
201 le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
202 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
203 le16_to_cpu(err_resp->bad_cmd_seq_num),
204 le32_to_cpu(err_resp->error_service));
205 IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
206 le64_to_cpu(err_resp->timestamp));
207 }
208
209 /*
210 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
211 * The parameter should also be a combination of ANT_[ABC].
212 */
first_antenna(u8 mask)213 u8 first_antenna(u8 mask)
214 {
215 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
216 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
217 return BIT(0);
218 return BIT(ffs(mask) - 1);
219 }
220
221 #define MAX_ANT_NUM 2
222 /*
223 * Toggles between TX antennas to send the probe request on.
224 * Receives the bitmask of valid TX antennas and the *index* used
225 * for the last TX, and returns the next valid *index* to use.
226 * In order to set it in the tx_cmd, must do BIT(idx).
227 */
iwl_mvm_next_antenna(struct iwl_mvm * mvm,u8 valid,u8 last_idx)228 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
229 {
230 u8 ind = last_idx;
231 int i;
232
233 for (i = 0; i < MAX_ANT_NUM; i++) {
234 ind = (ind + 1) % MAX_ANT_NUM;
235 if (valid & BIT(ind))
236 return ind;
237 }
238
239 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
240 return last_idx;
241 }
242
243 /**
244 * iwl_mvm_send_lq_cmd() - Send link quality command
245 * @mvm: Driver data.
246 * @lq: Link quality command to send.
247 *
248 * The link quality command is sent as the last step of station creation.
249 * This is the special case in which init is set and we call a callback in
250 * this case to clear the state indicating that station creation is in
251 * progress.
252 *
253 * Returns: an error code indicating success or failure
254 */
iwl_mvm_send_lq_cmd(struct iwl_mvm * mvm,struct iwl_lq_cmd * lq)255 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
256 {
257 struct iwl_host_cmd cmd = {
258 .id = LQ_CMD,
259 .len = { sizeof(struct iwl_lq_cmd), },
260 .flags = CMD_ASYNC,
261 .data = { lq, },
262 };
263
264 if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
265 iwl_mvm_has_tlc_offload(mvm)))
266 return -EINVAL;
267
268 return iwl_mvm_send_cmd(mvm, &cmd);
269 }
270
271 /**
272 * iwl_mvm_update_smps - Get a request to change the SMPS mode
273 * @mvm: Driver data.
274 * @vif: Pointer to the ieee80211_vif structure
275 * @req_type: The part of the driver who call for a change.
276 * @smps_request: The request to change the SMPS mode.
277 * @link_id: for MLO link_id, otherwise 0 (deflink)
278 *
279 * Get a requst to change the SMPS mode,
280 * and change it according to all other requests in the driver.
281 */
iwl_mvm_update_smps(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_mvm_smps_type_request req_type,enum ieee80211_smps_mode smps_request,unsigned int link_id)282 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
283 enum iwl_mvm_smps_type_request req_type,
284 enum ieee80211_smps_mode smps_request,
285 unsigned int link_id)
286 {
287 struct iwl_mvm_vif *mvmvif;
288 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
289 int i;
290
291 lockdep_assert_held(&mvm->mutex);
292
293 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
294 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
295 return;
296
297 if (vif->type != NL80211_IFTYPE_STATION)
298 return;
299
300 mvmvif = iwl_mvm_vif_from_mac80211(vif);
301
302 if (WARN_ON_ONCE(!mvmvif->link[link_id]))
303 return;
304
305 mvmvif->link[link_id]->smps_requests[req_type] = smps_request;
306 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
307 if (mvmvif->link[link_id]->smps_requests[i] ==
308 IEEE80211_SMPS_STATIC) {
309 smps_mode = IEEE80211_SMPS_STATIC;
310 break;
311 }
312 if (mvmvif->link[link_id]->smps_requests[i] ==
313 IEEE80211_SMPS_DYNAMIC)
314 smps_mode = IEEE80211_SMPS_DYNAMIC;
315 }
316
317 /* SMPS is disabled in eSR */
318 if (mvmvif->esr_active)
319 smps_mode = IEEE80211_SMPS_OFF;
320
321 ieee80211_request_smps(vif, link_id, smps_mode);
322 }
323
iwl_mvm_update_smps_on_active_links(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_mvm_smps_type_request req_type,enum ieee80211_smps_mode smps_request)324 void iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm,
325 struct ieee80211_vif *vif,
326 enum iwl_mvm_smps_type_request req_type,
327 enum ieee80211_smps_mode smps_request)
328 {
329 struct ieee80211_bss_conf *link_conf;
330 unsigned int link_id;
331
332 rcu_read_lock();
333 for_each_vif_active_link(vif, link_conf, link_id)
334 iwl_mvm_update_smps(mvm, vif, req_type, smps_request,
335 link_id);
336 rcu_read_unlock();
337 }
338
iwl_wait_stats_complete(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)339 static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
340 struct iwl_rx_packet *pkt, void *data)
341 {
342 WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
343
344 return true;
345 }
346
347 #define PERIODIC_STAT_RATE 5
348
iwl_mvm_request_periodic_system_statistics(struct iwl_mvm * mvm,bool enable)349 int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, bool enable)
350 {
351 u32 flags = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
352 u32 type = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
353 IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
354 struct iwl_system_statistics_cmd system_cmd = {
355 .cfg_mask = cpu_to_le32(flags),
356 .config_time_sec = cpu_to_le32(enable ?
357 PERIODIC_STAT_RATE : 0),
358 .type_id_mask = cpu_to_le32(type),
359 };
360
361 return iwl_mvm_send_cmd_pdu(mvm,
362 WIDE_ID(SYSTEM_GROUP,
363 SYSTEM_STATISTICS_CMD),
364 0, sizeof(system_cmd), &system_cmd);
365 }
366
iwl_mvm_request_system_statistics(struct iwl_mvm * mvm,bool clear,u8 cmd_ver)367 static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear,
368 u8 cmd_ver)
369 {
370 struct iwl_system_statistics_cmd system_cmd = {
371 .cfg_mask = clear ?
372 cpu_to_le32(IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK) :
373 cpu_to_le32(IWL_STATS_CFG_FLG_RESET_MSK |
374 IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK),
375 .type_id_mask = cpu_to_le32(IWL_STATS_NTFY_TYPE_ID_OPER |
376 IWL_STATS_NTFY_TYPE_ID_OPER_PART1),
377 };
378 struct iwl_host_cmd cmd = {
379 .id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD),
380 .len[0] = sizeof(system_cmd),
381 .data[0] = &system_cmd,
382 };
383 struct iwl_notification_wait stats_wait;
384 static const u16 stats_complete[] = {
385 WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
386 };
387 int ret;
388
389 if (cmd_ver != 1) {
390 IWL_FW_CHECK_FAILED(mvm,
391 "Invalid system statistics command version:%d\n",
392 cmd_ver);
393 return -EOPNOTSUPP;
394 }
395
396 iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
397 stats_complete, ARRAY_SIZE(stats_complete),
398 NULL, NULL);
399
400 mvm->statistics_clear = clear;
401 ret = iwl_mvm_send_cmd(mvm, &cmd);
402 if (ret) {
403 iwl_remove_notification(&mvm->notif_wait, &stats_wait);
404 return ret;
405 }
406
407 /* 500ms for OPERATIONAL, PART1 and END notification should be enough
408 * for FW to collect data from all LMACs and send
409 * STATISTICS_NOTIFICATION to host
410 */
411 ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 2);
412 if (ret)
413 return ret;
414
415 if (clear)
416 iwl_mvm_accu_radio_stats(mvm);
417
418 return ret;
419 }
420
iwl_mvm_request_statistics(struct iwl_mvm * mvm,bool clear)421 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
422 {
423 struct iwl_statistics_cmd scmd = {
424 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
425 };
426
427 struct iwl_host_cmd cmd = {
428 .id = STATISTICS_CMD,
429 .len[0] = sizeof(scmd),
430 .data[0] = &scmd,
431 };
432 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
433 WIDE_ID(SYSTEM_GROUP,
434 SYSTEM_STATISTICS_CMD),
435 IWL_FW_CMD_VER_UNKNOWN);
436 int ret;
437
438 /*
439 * Don't request statistics during restart, they'll not have any useful
440 * information right after restart, nor is clearing needed
441 */
442 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
443 return 0;
444
445 if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
446 return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver);
447
448 /* From version 15 - STATISTICS_NOTIFICATION, the reply for
449 * STATISTICS_CMD is empty, and the response is with
450 * STATISTICS_NOTIFICATION notification
451 */
452 if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
453 STATISTICS_NOTIFICATION, 0) < 15) {
454 cmd.flags = CMD_WANT_SKB;
455
456 ret = iwl_mvm_send_cmd(mvm, &cmd);
457 if (ret)
458 return ret;
459
460 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
461 iwl_free_resp(&cmd);
462 } else {
463 struct iwl_notification_wait stats_wait;
464 static const u16 stats_complete[] = {
465 STATISTICS_NOTIFICATION,
466 };
467
468 iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
469 stats_complete, ARRAY_SIZE(stats_complete),
470 iwl_wait_stats_complete, NULL);
471
472 ret = iwl_mvm_send_cmd(mvm, &cmd);
473 if (ret) {
474 iwl_remove_notification(&mvm->notif_wait, &stats_wait);
475 return ret;
476 }
477
478 /* 200ms should be enough for FW to collect data from all
479 * LMACs and send STATISTICS_NOTIFICATION to host
480 */
481 ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
482 if (ret)
483 return ret;
484 }
485
486 if (clear)
487 iwl_mvm_accu_radio_stats(mvm);
488
489 return 0;
490 }
491
iwl_mvm_accu_radio_stats(struct iwl_mvm * mvm)492 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
493 {
494 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
495 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
496 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
497 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
498 }
499
500 struct iwl_mvm_diversity_iter_data {
501 struct iwl_mvm_phy_ctxt *ctxt;
502 bool result;
503 };
504
iwl_mvm_diversity_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)505 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
506 struct ieee80211_vif *vif)
507 {
508 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
509 struct iwl_mvm_diversity_iter_data *data = _data;
510 int i, link_id;
511
512 for_each_mvm_vif_valid_link(mvmvif, link_id) {
513 struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
514
515 if (link_info->phy_ctxt != data->ctxt)
516 continue;
517
518 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
519 if (link_info->smps_requests[i] == IEEE80211_SMPS_STATIC ||
520 link_info->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
521 data->result = false;
522 break;
523 }
524 }
525 }
526 }
527
iwl_mvm_rx_diversity_allowed(struct iwl_mvm * mvm,struct iwl_mvm_phy_ctxt * ctxt)528 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
529 struct iwl_mvm_phy_ctxt *ctxt)
530 {
531 struct iwl_mvm_diversity_iter_data data = {
532 .ctxt = ctxt,
533 .result = true,
534 };
535
536 lockdep_assert_held(&mvm->mutex);
537
538 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
539 return false;
540
541 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
542 return false;
543
544 if (mvm->cfg->rx_with_siso_diversity)
545 return false;
546
547 ieee80211_iterate_active_interfaces_atomic(
548 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
549 iwl_mvm_diversity_iter, &data);
550
551 return data.result;
552 }
553
iwl_mvm_send_low_latency_cmd(struct iwl_mvm * mvm,bool low_latency,u16 mac_id)554 void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
555 bool low_latency, u16 mac_id)
556 {
557 struct iwl_mac_low_latency_cmd cmd = {
558 .mac_id = cpu_to_le32(mac_id)
559 };
560
561 if (!fw_has_capa(&mvm->fw->ucode_capa,
562 IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
563 return;
564
565 if (low_latency) {
566 /* currently we don't care about the direction */
567 cmd.low_latency_rx = 1;
568 cmd.low_latency_tx = 1;
569 }
570
571 if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
572 0, sizeof(cmd), &cmd))
573 IWL_ERR(mvm, "Failed to send low latency command\n");
574 }
575
iwl_mvm_update_low_latency(struct iwl_mvm * mvm,struct ieee80211_vif * vif,bool low_latency,enum iwl_mvm_low_latency_cause cause)576 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
577 bool low_latency,
578 enum iwl_mvm_low_latency_cause cause)
579 {
580 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
581 int res;
582 bool prev;
583
584 lockdep_assert_held(&mvm->mutex);
585
586 prev = iwl_mvm_vif_low_latency(mvmvif);
587 iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
588
589 low_latency = iwl_mvm_vif_low_latency(mvmvif);
590
591 if (low_latency == prev)
592 return 0;
593
594 iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
595
596 res = iwl_mvm_update_quotas(mvm, false, NULL);
597 if (res)
598 return res;
599
600 iwl_mvm_bt_coex_vif_change(mvm);
601
602 return iwl_mvm_power_update_mac(mvm);
603 }
604
605 struct iwl_mvm_low_latency_iter {
606 bool result;
607 bool result_per_band[NUM_NL80211_BANDS];
608 };
609
iwl_mvm_ll_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)610 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
611 {
612 struct iwl_mvm_low_latency_iter *result = _data;
613 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
614 enum nl80211_band band;
615
616 if (iwl_mvm_vif_low_latency(mvmvif)) {
617 result->result = true;
618
619 if (!mvmvif->deflink.phy_ctxt)
620 return;
621
622 band = mvmvif->deflink.phy_ctxt->channel->band;
623 result->result_per_band[band] = true;
624 }
625 }
626
iwl_mvm_low_latency(struct iwl_mvm * mvm)627 bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
628 {
629 struct iwl_mvm_low_latency_iter data = {};
630
631 ieee80211_iterate_active_interfaces_atomic(
632 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
633 iwl_mvm_ll_iter, &data);
634
635 return data.result;
636 }
637
iwl_mvm_low_latency_band(struct iwl_mvm * mvm,enum nl80211_band band)638 bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
639 {
640 struct iwl_mvm_low_latency_iter data = {};
641
642 ieee80211_iterate_active_interfaces_atomic(
643 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
644 iwl_mvm_ll_iter, &data);
645
646 return data.result_per_band[band];
647 }
648
649 struct iwl_bss_iter_data {
650 struct ieee80211_vif *vif;
651 bool error;
652 };
653
iwl_mvm_bss_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)654 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
655 struct ieee80211_vif *vif)
656 {
657 struct iwl_bss_iter_data *data = _data;
658
659 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
660 return;
661
662 if (data->vif) {
663 data->error = true;
664 return;
665 }
666
667 data->vif = vif;
668 }
669
iwl_mvm_get_bss_vif(struct iwl_mvm * mvm)670 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
671 {
672 struct iwl_bss_iter_data bss_iter_data = {};
673
674 ieee80211_iterate_active_interfaces_atomic(
675 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
676 iwl_mvm_bss_iface_iterator, &bss_iter_data);
677
678 if (bss_iter_data.error) {
679 IWL_ERR(mvm, "More than one managed interface active!\n");
680 return ERR_PTR(-EINVAL);
681 }
682
683 return bss_iter_data.vif;
684 }
685
686 struct iwl_bss_find_iter_data {
687 struct ieee80211_vif *vif;
688 u32 macid;
689 };
690
iwl_mvm_bss_find_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)691 static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
692 struct ieee80211_vif *vif)
693 {
694 struct iwl_bss_find_iter_data *data = _data;
695 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
696
697 if (mvmvif->id == data->macid)
698 data->vif = vif;
699 }
700
iwl_mvm_get_vif_by_macid(struct iwl_mvm * mvm,u32 macid)701 struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
702 {
703 struct iwl_bss_find_iter_data data = {
704 .macid = macid,
705 };
706
707 lockdep_assert_held(&mvm->mutex);
708
709 ieee80211_iterate_active_interfaces_atomic(
710 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
711 iwl_mvm_bss_find_iface_iterator, &data);
712
713 return data.vif;
714 }
715
716 struct iwl_sta_iter_data {
717 bool assoc;
718 };
719
iwl_mvm_sta_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)720 static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
721 struct ieee80211_vif *vif)
722 {
723 struct iwl_sta_iter_data *data = _data;
724
725 if (vif->type != NL80211_IFTYPE_STATION)
726 return;
727
728 if (vif->cfg.assoc)
729 data->assoc = true;
730 }
731
iwl_mvm_is_vif_assoc(struct iwl_mvm * mvm)732 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
733 {
734 struct iwl_sta_iter_data data = {
735 .assoc = false,
736 };
737
738 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
739 IEEE80211_IFACE_ITER_NORMAL,
740 iwl_mvm_sta_iface_iterator,
741 &data);
742 return data.assoc;
743 }
744
iwl_mvm_get_wd_timeout(struct iwl_mvm * mvm,struct ieee80211_vif * vif,bool tdls,bool cmd_q)745 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
746 struct ieee80211_vif *vif,
747 bool tdls, bool cmd_q)
748 {
749 struct iwl_fw_dbg_trigger_tlv *trigger;
750 struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
751 unsigned int default_timeout = cmd_q ?
752 IWL_DEF_WD_TIMEOUT :
753 mvm->trans->trans_cfg->base_params->wd_timeout;
754
755 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
756 /*
757 * We can't know when the station is asleep or awake, so we
758 * must disable the queue hang detection.
759 */
760 if (fw_has_capa(&mvm->fw->ucode_capa,
761 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
762 vif && vif->type == NL80211_IFTYPE_AP)
763 return IWL_WATCHDOG_DISABLED;
764 return default_timeout;
765 }
766
767 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
768 txq_timer = (void *)trigger->data;
769
770 if (tdls)
771 return le32_to_cpu(txq_timer->tdls);
772
773 if (cmd_q)
774 return le32_to_cpu(txq_timer->command_queue);
775
776 if (WARN_ON(!vif))
777 return default_timeout;
778
779 switch (ieee80211_vif_type_p2p(vif)) {
780 case NL80211_IFTYPE_ADHOC:
781 return le32_to_cpu(txq_timer->ibss);
782 case NL80211_IFTYPE_STATION:
783 return le32_to_cpu(txq_timer->bss);
784 case NL80211_IFTYPE_AP:
785 return le32_to_cpu(txq_timer->softap);
786 case NL80211_IFTYPE_P2P_CLIENT:
787 return le32_to_cpu(txq_timer->p2p_client);
788 case NL80211_IFTYPE_P2P_GO:
789 return le32_to_cpu(txq_timer->p2p_go);
790 case NL80211_IFTYPE_P2P_DEVICE:
791 return le32_to_cpu(txq_timer->p2p_device);
792 case NL80211_IFTYPE_MONITOR:
793 return default_timeout;
794 default:
795 WARN_ON(1);
796 return mvm->trans->trans_cfg->base_params->wd_timeout;
797 }
798 }
799
iwl_mvm_connection_loss(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const char * errmsg)800 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
801 const char *errmsg)
802 {
803 struct iwl_fw_dbg_trigger_tlv *trig;
804 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
805
806 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
807 FW_DBG_TRIGGER_MLME);
808 if (!trig)
809 goto out;
810
811 trig_mlme = (void *)trig->data;
812
813 if (trig_mlme->stop_connection_loss &&
814 --trig_mlme->stop_connection_loss)
815 goto out;
816
817 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
818
819 out:
820 ieee80211_connection_loss(vif);
821 }
822
iwl_mvm_event_frame_timeout_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_sta * sta,u16 tid)823 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
824 struct ieee80211_vif *vif,
825 const struct ieee80211_sta *sta,
826 u16 tid)
827 {
828 struct iwl_fw_dbg_trigger_tlv *trig;
829 struct iwl_fw_dbg_trigger_ba *ba_trig;
830
831 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
832 FW_DBG_TRIGGER_BA);
833 if (!trig)
834 return;
835
836 ba_trig = (void *)trig->data;
837
838 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
839 return;
840
841 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
842 "Frame from %pM timed out, tid %d",
843 sta->addr, tid);
844 }
845
iwl_mvm_tcm_load_percentage(u32 airtime,u32 elapsed)846 u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
847 {
848 if (!elapsed)
849 return 0;
850
851 return (100 * airtime / elapsed) / USEC_PER_MSEC;
852 }
853
854 static enum iwl_mvm_traffic_load
iwl_mvm_tcm_load(struct iwl_mvm * mvm,u32 airtime,unsigned long elapsed)855 iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
856 {
857 u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
858
859 if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
860 return IWL_MVM_TRAFFIC_HIGH;
861 if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
862 return IWL_MVM_TRAFFIC_MEDIUM;
863
864 return IWL_MVM_TRAFFIC_LOW;
865 }
866
iwl_mvm_tcm_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)867 static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
868 {
869 struct iwl_mvm *mvm = _data;
870 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
871 bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
872
873 if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
874 return;
875
876 low_latency = mvm->tcm.result.low_latency[mvmvif->id];
877
878 if (!mvm->tcm.result.change[mvmvif->id] &&
879 prev == low_latency) {
880 iwl_mvm_update_quotas(mvm, false, NULL);
881 return;
882 }
883
884 if (prev != low_latency) {
885 /* this sends traffic load and updates quota as well */
886 iwl_mvm_update_low_latency(mvm, vif, low_latency,
887 LOW_LATENCY_TRAFFIC);
888 } else {
889 iwl_mvm_update_quotas(mvm, false, NULL);
890 }
891 }
892
iwl_mvm_tcm_results(struct iwl_mvm * mvm)893 static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
894 {
895 mutex_lock(&mvm->mutex);
896
897 ieee80211_iterate_active_interfaces(
898 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
899 iwl_mvm_tcm_iter, mvm);
900
901 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
902 iwl_mvm_config_scan(mvm);
903
904 mutex_unlock(&mvm->mutex);
905 }
906
iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct * wk)907 static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
908 {
909 struct iwl_mvm *mvm;
910 struct iwl_mvm_vif *mvmvif;
911 struct ieee80211_vif *vif;
912
913 mvmvif = container_of(wk, struct iwl_mvm_vif,
914 uapsd_nonagg_detected_wk.work);
915 vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
916 mvm = mvmvif->mvm;
917
918 if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
919 return;
920
921 /* remember that this AP is broken */
922 memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
923 vif->bss_conf.bssid, ETH_ALEN);
924 mvm->uapsd_noagg_bssid_write_idx++;
925 if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
926 mvm->uapsd_noagg_bssid_write_idx = 0;
927
928 iwl_mvm_connection_loss(mvm, vif,
929 "AP isn't using AMPDU with uAPSD enabled");
930 }
931
iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm * mvm,struct ieee80211_vif * vif)932 static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
933 struct ieee80211_vif *vif)
934 {
935 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
936
937 if (vif->type != NL80211_IFTYPE_STATION)
938 return;
939
940 if (!vif->cfg.assoc)
941 return;
942
943 if (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd &&
944 !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd &&
945 !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd &&
946 !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd)
947 return;
948
949 if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
950 return;
951
952 mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
953 IWL_INFO(mvm,
954 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
955 schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk,
956 15 * HZ);
957 }
958
iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm * mvm,unsigned int elapsed,int mac)959 static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
960 unsigned int elapsed,
961 int mac)
962 {
963 u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
964 u64 tpt;
965 unsigned long rate;
966 struct ieee80211_vif *vif;
967
968 rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
969
970 if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
971 mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
972 return;
973
974 if (iwl_mvm_has_new_rx_api(mvm)) {
975 tpt = 8 * bytes; /* kbps */
976 do_div(tpt, elapsed);
977 rate *= 1000; /* kbps */
978 if (tpt < 22 * rate / 100)
979 return;
980 } else {
981 /*
982 * the rate here is actually the threshold, in 100Kbps units,
983 * so do the needed conversion from bytes to 100Kbps:
984 * 100kb = bits / (100 * 1000),
985 * 100kbps = 100kb / (msecs / 1000) ==
986 * (bits / (100 * 1000)) / (msecs / 1000) ==
987 * bits / (100 * msecs)
988 */
989 tpt = (8 * bytes);
990 do_div(tpt, elapsed * 100);
991 if (tpt < rate)
992 return;
993 }
994
995 rcu_read_lock();
996 vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
997 if (vif)
998 iwl_mvm_uapsd_agg_disconnect(mvm, vif);
999 rcu_read_unlock();
1000 }
1001
iwl_mvm_tcm_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1002 static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
1003 struct ieee80211_vif *vif)
1004 {
1005 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1006 u32 *band = _data;
1007
1008 if (!mvmvif->deflink.phy_ctxt)
1009 return;
1010
1011 band[mvmvif->id] = mvmvif->deflink.phy_ctxt->channel->band;
1012 }
1013
iwl_mvm_calc_tcm_stats(struct iwl_mvm * mvm,unsigned long ts,bool handle_uapsd)1014 static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
1015 unsigned long ts,
1016 bool handle_uapsd)
1017 {
1018 unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
1019 unsigned int uapsd_elapsed =
1020 jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
1021 u32 total_airtime = 0;
1022 u32 band_airtime[NUM_NL80211_BANDS] = {0};
1023 u32 band[NUM_MAC_INDEX_DRIVER] = {0};
1024 int ac, mac, i;
1025 bool low_latency = false;
1026 enum iwl_mvm_traffic_load load, band_load;
1027 bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
1028
1029 if (handle_ll)
1030 mvm->tcm.ll_ts = ts;
1031 if (handle_uapsd)
1032 mvm->tcm.uapsd_nonagg_ts = ts;
1033
1034 mvm->tcm.result.elapsed = elapsed;
1035
1036 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1037 IEEE80211_IFACE_ITER_NORMAL,
1038 iwl_mvm_tcm_iterator,
1039 &band);
1040
1041 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1042 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1043 u32 vo_vi_pkts = 0;
1044 u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
1045
1046 total_airtime += airtime;
1047 band_airtime[band[mac]] += airtime;
1048
1049 load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
1050 mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
1051 mvm->tcm.result.load[mac] = load;
1052 mvm->tcm.result.airtime[mac] = airtime;
1053
1054 for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
1055 vo_vi_pkts += mdata->rx.pkts[ac] +
1056 mdata->tx.pkts[ac];
1057
1058 /* enable immediately with enough packets but defer disabling */
1059 if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
1060 mvm->tcm.result.low_latency[mac] = true;
1061 else if (handle_ll)
1062 mvm->tcm.result.low_latency[mac] = false;
1063
1064 if (handle_ll) {
1065 /* clear old data */
1066 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1067 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1068 }
1069 low_latency |= mvm->tcm.result.low_latency[mac];
1070
1071 if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
1072 iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
1073 mac);
1074 /* clear old data */
1075 if (handle_uapsd)
1076 mdata->uapsd_nonagg_detect.rx_bytes = 0;
1077 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1078 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1079 }
1080
1081 load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
1082 mvm->tcm.result.global_load = load;
1083
1084 for (i = 0; i < NUM_NL80211_BANDS; i++) {
1085 band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
1086 mvm->tcm.result.band_load[i] = band_load;
1087 }
1088
1089 /*
1090 * If the current load isn't low we need to force re-evaluation
1091 * in the TCM period, so that we can return to low load if there
1092 * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
1093 * triggered by traffic).
1094 */
1095 if (load != IWL_MVM_TRAFFIC_LOW)
1096 return MVM_TCM_PERIOD;
1097 /*
1098 * If low-latency is active we need to force re-evaluation after
1099 * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
1100 * when there's no traffic at all.
1101 */
1102 if (low_latency)
1103 return MVM_LL_PERIOD;
1104 /*
1105 * Otherwise, we don't need to run the work struct because we're
1106 * in the default "idle" state - traffic indication is low (which
1107 * also covers the "no traffic" case) and low-latency is disabled
1108 * so there's no state that may need to be disabled when there's
1109 * no traffic at all.
1110 *
1111 * Note that this has no impact on the regular scheduling of the
1112 * updates triggered by traffic - those happen whenever one of the
1113 * two timeouts expire (if there's traffic at all.)
1114 */
1115 return 0;
1116 }
1117
iwl_mvm_recalc_tcm(struct iwl_mvm * mvm)1118 void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
1119 {
1120 unsigned long ts = jiffies;
1121 bool handle_uapsd =
1122 time_after(ts, mvm->tcm.uapsd_nonagg_ts +
1123 msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
1124
1125 spin_lock(&mvm->tcm.lock);
1126 if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1127 spin_unlock(&mvm->tcm.lock);
1128 return;
1129 }
1130 spin_unlock(&mvm->tcm.lock);
1131
1132 if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
1133 mutex_lock(&mvm->mutex);
1134 if (iwl_mvm_request_statistics(mvm, true))
1135 handle_uapsd = false;
1136 mutex_unlock(&mvm->mutex);
1137 }
1138
1139 spin_lock(&mvm->tcm.lock);
1140 /* re-check if somebody else won the recheck race */
1141 if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1142 /* calculate statistics */
1143 unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
1144 handle_uapsd);
1145
1146 /* the memset needs to be visible before the timestamp */
1147 smp_mb();
1148 mvm->tcm.ts = ts;
1149 if (work_delay)
1150 schedule_delayed_work(&mvm->tcm.work, work_delay);
1151 }
1152 spin_unlock(&mvm->tcm.lock);
1153
1154 iwl_mvm_tcm_results(mvm);
1155 }
1156
iwl_mvm_tcm_work(struct work_struct * work)1157 void iwl_mvm_tcm_work(struct work_struct *work)
1158 {
1159 struct delayed_work *delayed_work = to_delayed_work(work);
1160 struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1161 tcm.work);
1162
1163 iwl_mvm_recalc_tcm(mvm);
1164 }
1165
iwl_mvm_pause_tcm(struct iwl_mvm * mvm,bool with_cancel)1166 void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
1167 {
1168 spin_lock_bh(&mvm->tcm.lock);
1169 mvm->tcm.paused = true;
1170 spin_unlock_bh(&mvm->tcm.lock);
1171 if (with_cancel)
1172 cancel_delayed_work_sync(&mvm->tcm.work);
1173 }
1174
iwl_mvm_resume_tcm(struct iwl_mvm * mvm)1175 void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
1176 {
1177 int mac;
1178 bool low_latency = false;
1179
1180 spin_lock_bh(&mvm->tcm.lock);
1181 mvm->tcm.ts = jiffies;
1182 mvm->tcm.ll_ts = jiffies;
1183 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1184 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1185
1186 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1187 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1188 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1189 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1190
1191 if (mvm->tcm.result.low_latency[mac])
1192 low_latency = true;
1193 }
1194 /* The TCM data needs to be reset before "paused" flag changes */
1195 smp_mb();
1196 mvm->tcm.paused = false;
1197
1198 /*
1199 * if the current load is not low or low latency is active, force
1200 * re-evaluation to cover the case of no traffic.
1201 */
1202 if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
1203 schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
1204 else if (low_latency)
1205 schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
1206
1207 spin_unlock_bh(&mvm->tcm.lock);
1208 }
1209
iwl_mvm_tcm_add_vif(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1210 void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1211 {
1212 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1213
1214 INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
1215 iwl_mvm_tcm_uapsd_nonagg_detected_wk);
1216 }
1217
iwl_mvm_tcm_rm_vif(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1218 void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1219 {
1220 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1221
1222 cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
1223 }
1224
iwl_mvm_get_systime(struct iwl_mvm * mvm)1225 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
1226 {
1227 u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
1228
1229 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
1230 mvm->trans->cfg->gp2_reg_addr)
1231 reg_addr = mvm->trans->cfg->gp2_reg_addr;
1232
1233 return iwl_read_prph(mvm->trans, reg_addr);
1234 }
1235
iwl_mvm_get_sync_time(struct iwl_mvm * mvm,int clock_type,u32 * gp2,u64 * boottime,ktime_t * realtime)1236 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
1237 u32 *gp2, u64 *boottime, ktime_t *realtime)
1238 {
1239 bool ps_disabled;
1240
1241 lockdep_assert_held(&mvm->mutex);
1242
1243 /* Disable power save when reading GP2 */
1244 ps_disabled = mvm->ps_disabled;
1245 if (!ps_disabled) {
1246 mvm->ps_disabled = true;
1247 iwl_mvm_power_update_device(mvm);
1248 }
1249
1250 *gp2 = iwl_mvm_get_systime(mvm);
1251
1252 if (clock_type == CLOCK_BOOTTIME && boottime)
1253 *boottime = ktime_get_boottime_ns();
1254 else if (clock_type == CLOCK_REALTIME && realtime)
1255 *realtime = ktime_get_real();
1256
1257 if (!ps_disabled) {
1258 mvm->ps_disabled = ps_disabled;
1259 iwl_mvm_power_update_device(mvm);
1260 }
1261 }
1262
1263 /* Find if at least two links from different vifs use same channel
1264 * FIXME: consider having a refcount array in struct iwl_mvm_vif for
1265 * used phy_ctxt ids.
1266 */
iwl_mvm_have_links_same_channel(struct iwl_mvm_vif * vif1,struct iwl_mvm_vif * vif2)1267 bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1,
1268 struct iwl_mvm_vif *vif2)
1269 {
1270 unsigned int i, j;
1271
1272 for_each_mvm_vif_valid_link(vif1, i) {
1273 for_each_mvm_vif_valid_link(vif2, j) {
1274 if (vif1->link[i]->phy_ctxt == vif2->link[j]->phy_ctxt)
1275 return true;
1276 }
1277 }
1278
1279 return false;
1280 }
1281
iwl_mvm_vif_is_active(struct iwl_mvm_vif * mvmvif)1282 bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif)
1283 {
1284 unsigned int i;
1285
1286 /* FIXME: can it fail when phy_ctxt is assigned? */
1287 for_each_mvm_vif_valid_link(mvmvif, i) {
1288 if (mvmvif->link[i]->phy_ctxt &&
1289 mvmvif->link[i]->phy_ctxt->id < NUM_PHY_CTX)
1290 return true;
1291 }
1292
1293 return false;
1294 }
1295