1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #include <linux/etherdevice.h>
8 #include <linux/ip.h>
9 #include <linux/fs.h>
10 #include <net/cfg80211.h>
11 #include <net/ipv6.h>
12 #include <net/tcp.h>
13 #include <net/addrconf.h>
14 #include "iwl-modparams.h"
15 #include "fw-api.h"
16 #include "mvm.h"
17 #include "fw/img.h"
18
iwl_mvm_set_rekey_data(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_gtk_rekey_data * data)19 void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
20 struct ieee80211_vif *vif,
21 struct cfg80211_gtk_rekey_data *data)
22 {
23 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
24 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
25
26 mutex_lock(&mvm->mutex);
27
28 mvmvif->rekey_data.kek_len = data->kek_len;
29 mvmvif->rekey_data.kck_len = data->kck_len;
30 memcpy(mvmvif->rekey_data.kek, data->kek, data->kek_len);
31 memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len);
32 mvmvif->rekey_data.akm = data->akm & 0xFF;
33 mvmvif->rekey_data.replay_ctr =
34 cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
35 mvmvif->rekey_data.valid = true;
36
37 mutex_unlock(&mvm->mutex);
38 }
39
40 #if IS_ENABLED(CONFIG_IPV6)
iwl_mvm_ipv6_addr_change(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct inet6_dev * idev)41 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
42 struct ieee80211_vif *vif,
43 struct inet6_dev *idev)
44 {
45 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
46 struct inet6_ifaddr *ifa;
47 int idx = 0;
48
49 memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs));
50
51 read_lock_bh(&idev->lock);
52 list_for_each_entry(ifa, &idev->addr_list, if_list) {
53 mvmvif->target_ipv6_addrs[idx] = ifa->addr;
54 if (ifa->flags & IFA_F_TENTATIVE)
55 __set_bit(idx, mvmvif->tentative_addrs);
56 idx++;
57 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
58 break;
59 }
60 read_unlock_bh(&idev->lock);
61
62 mvmvif->num_target_ipv6_addrs = idx;
63 }
64 #endif
65
iwl_mvm_set_default_unicast_key(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int idx)66 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
67 struct ieee80211_vif *vif, int idx)
68 {
69 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
70
71 mvmvif->tx_key_idx = idx;
72 }
73
iwl_mvm_convert_p1k(u16 * p1k,__le16 * out)74 static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
75 {
76 int i;
77
78 for (i = 0; i < IWL_P1K_SIZE; i++)
79 out[i] = cpu_to_le16(p1k[i]);
80 }
81
iwl_mvm_find_max_pn(struct ieee80211_key_conf * key,struct iwl_mvm_key_pn * ptk_pn,struct ieee80211_key_seq * seq,int tid,int queues)82 static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
83 struct iwl_mvm_key_pn *ptk_pn,
84 struct ieee80211_key_seq *seq,
85 int tid, int queues)
86 {
87 const u8 *ret = seq->ccmp.pn;
88 int i;
89
90 /* get the PN from mac80211, used on the default queue */
91 ieee80211_get_key_rx_seq(key, tid, seq);
92
93 /* and use the internal data for the other queues */
94 for (i = 1; i < queues; i++) {
95 const u8 *tmp = ptk_pn->q[i].pn[tid];
96
97 if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0)
98 ret = tmp;
99 }
100
101 return ret;
102 }
103
104 struct wowlan_key_data {
105 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
106 struct iwl_wowlan_tkip_params_cmd *tkip;
107 struct iwl_wowlan_kek_kck_material_cmd_v3 *kek_kck_cmd;
108 bool error, use_rsc_tsc, use_tkip, configure_keys;
109 int wep_key_idx;
110 };
111
iwl_mvm_wowlan_program_keys(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key,void * _data)112 static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
113 struct ieee80211_vif *vif,
114 struct ieee80211_sta *sta,
115 struct ieee80211_key_conf *key,
116 void *_data)
117 {
118 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
119 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
120 struct wowlan_key_data *data = _data;
121 struct aes_sc *aes_sc, *aes_tx_sc = NULL;
122 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
123 struct iwl_p1k_cache *rx_p1ks;
124 u8 *rx_mic_key;
125 struct ieee80211_key_seq seq;
126 u32 cur_rx_iv32 = 0;
127 u16 p1k[IWL_P1K_SIZE];
128 int ret, i;
129
130 switch (key->cipher) {
131 case WLAN_CIPHER_SUITE_WEP40:
132 case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
133 struct {
134 struct iwl_mvm_wep_key_cmd wep_key_cmd;
135 struct iwl_mvm_wep_key wep_key;
136 } __packed wkc = {
137 .wep_key_cmd.mac_id_n_color =
138 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
139 mvmvif->color)),
140 .wep_key_cmd.num_keys = 1,
141 /* firmware sets STA_KEY_FLG_WEP_13BYTES */
142 .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
143 .wep_key.key_index = key->keyidx,
144 .wep_key.key_size = key->keylen,
145 };
146
147 /*
148 * This will fail -- the key functions don't set support
149 * pairwise WEP keys. However, that's better than silently
150 * failing WoWLAN. Or maybe not?
151 */
152 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
153 break;
154
155 memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
156 if (key->keyidx == mvmvif->tx_key_idx) {
157 /* TX key must be at offset 0 */
158 wkc.wep_key.key_offset = 0;
159 } else {
160 /* others start at 1 */
161 data->wep_key_idx++;
162 wkc.wep_key.key_offset = data->wep_key_idx;
163 }
164
165 if (data->configure_keys) {
166 mutex_lock(&mvm->mutex);
167 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0,
168 sizeof(wkc), &wkc);
169 data->error = ret != 0;
170
171 mvm->ptk_ivlen = key->iv_len;
172 mvm->ptk_icvlen = key->icv_len;
173 mvm->gtk_ivlen = key->iv_len;
174 mvm->gtk_icvlen = key->icv_len;
175 mutex_unlock(&mvm->mutex);
176 }
177
178 /* don't upload key again */
179 return;
180 }
181 default:
182 data->error = true;
183 return;
184 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
185 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
186 data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
187 return;
188 case WLAN_CIPHER_SUITE_AES_CMAC:
189 data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
190 /*
191 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
192 * but we also shouldn't abort suspend due to that. It does have
193 * support for the IGTK key renewal, but doesn't really use the
194 * IGTK for anything. This means we could spuriously wake up or
195 * be deauthenticated, but that was considered acceptable.
196 */
197 return;
198 case WLAN_CIPHER_SUITE_TKIP:
199 if (sta) {
200 u64 pn64;
201
202 tkip_sc =
203 data->rsc_tsc->params.all_tsc_rsc.tkip.unicast_rsc;
204 tkip_tx_sc =
205 &data->rsc_tsc->params.all_tsc_rsc.tkip.tsc;
206
207 rx_p1ks = data->tkip->rx_uni;
208
209 pn64 = atomic64_read(&key->tx_pn);
210 tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
211 tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
212
213 ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
214 p1k);
215 iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
216
217 memcpy(data->tkip->mic_keys.tx,
218 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
219 IWL_MIC_KEY_SIZE);
220
221 rx_mic_key = data->tkip->mic_keys.rx_unicast;
222 } else {
223 tkip_sc =
224 data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc;
225 rx_p1ks = data->tkip->rx_multi;
226 rx_mic_key = data->tkip->mic_keys.rx_mcast;
227 data->kek_kck_cmd->gtk_cipher =
228 cpu_to_le32(STA_KEY_FLG_TKIP);
229 }
230
231 /*
232 * For non-QoS this relies on the fact that both the uCode and
233 * mac80211 use TID 0 (as they need to to avoid replay attacks)
234 * for checking the IV in the frames.
235 */
236 for (i = 0; i < IWL_NUM_RSC; i++) {
237 ieee80211_get_key_rx_seq(key, i, &seq);
238 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
239 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
240 /* wrapping isn't allowed, AP must rekey */
241 if (seq.tkip.iv32 > cur_rx_iv32)
242 cur_rx_iv32 = seq.tkip.iv32;
243 }
244
245 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
246 cur_rx_iv32, p1k);
247 iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
248 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
249 cur_rx_iv32 + 1, p1k);
250 iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
251
252 memcpy(rx_mic_key,
253 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
254 IWL_MIC_KEY_SIZE);
255
256 data->use_tkip = true;
257 data->use_rsc_tsc = true;
258 break;
259 case WLAN_CIPHER_SUITE_CCMP:
260 case WLAN_CIPHER_SUITE_GCMP:
261 case WLAN_CIPHER_SUITE_GCMP_256:
262 if (sta) {
263 u64 pn64;
264
265 aes_sc =
266 data->rsc_tsc->params.all_tsc_rsc.aes.unicast_rsc;
267 aes_tx_sc =
268 &data->rsc_tsc->params.all_tsc_rsc.aes.tsc;
269
270 pn64 = atomic64_read(&key->tx_pn);
271 aes_tx_sc->pn = cpu_to_le64(pn64);
272 } else {
273 aes_sc =
274 data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc;
275 data->kek_kck_cmd->gtk_cipher =
276 key->cipher == WLAN_CIPHER_SUITE_CCMP ?
277 cpu_to_le32(STA_KEY_FLG_CCM) :
278 cpu_to_le32(STA_KEY_FLG_GCMP);
279 }
280
281 /*
282 * For non-QoS this relies on the fact that both the uCode and
283 * mac80211/our RX code use TID 0 for checking the PN.
284 */
285 if (sta && iwl_mvm_has_new_rx_api(mvm)) {
286 struct iwl_mvm_sta *mvmsta;
287 struct iwl_mvm_key_pn *ptk_pn;
288 const u8 *pn;
289
290 mvmsta = iwl_mvm_sta_from_mac80211(sta);
291 rcu_read_lock();
292 ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
293 if (WARN_ON(!ptk_pn)) {
294 rcu_read_unlock();
295 break;
296 }
297
298 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
299 pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
300 mvm->trans->num_rx_queues);
301 aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
302 ((u64)pn[4] << 8) |
303 ((u64)pn[3] << 16) |
304 ((u64)pn[2] << 24) |
305 ((u64)pn[1] << 32) |
306 ((u64)pn[0] << 40));
307 }
308
309 rcu_read_unlock();
310 } else {
311 for (i = 0; i < IWL_NUM_RSC; i++) {
312 u8 *pn = seq.ccmp.pn;
313
314 ieee80211_get_key_rx_seq(key, i, &seq);
315 aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
316 ((u64)pn[4] << 8) |
317 ((u64)pn[3] << 16) |
318 ((u64)pn[2] << 24) |
319 ((u64)pn[1] << 32) |
320 ((u64)pn[0] << 40));
321 }
322 }
323 data->use_rsc_tsc = true;
324 break;
325 }
326
327 IWL_DEBUG_WOWLAN(mvm, "GTK cipher %d\n", data->kek_kck_cmd->gtk_cipher);
328
329 if (data->configure_keys) {
330 mutex_lock(&mvm->mutex);
331 /*
332 * The D3 firmware hardcodes the key offset 0 as the key it
333 * uses to transmit packets to the AP, i.e. the PTK.
334 */
335 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
336 mvm->ptk_ivlen = key->iv_len;
337 mvm->ptk_icvlen = key->icv_len;
338 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
339 } else {
340 /*
341 * firmware only supports TSC/RSC for a single key,
342 * so if there are multiple keep overwriting them
343 * with new ones -- this relies on mac80211 doing
344 * list_add_tail().
345 */
346 mvm->gtk_ivlen = key->iv_len;
347 mvm->gtk_icvlen = key->icv_len;
348 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
349 }
350 mutex_unlock(&mvm->mutex);
351 data->error = ret != 0;
352 }
353 }
354
iwl_mvm_send_patterns_v1(struct iwl_mvm * mvm,struct cfg80211_wowlan * wowlan)355 static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
356 struct cfg80211_wowlan *wowlan)
357 {
358 struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd;
359 struct iwl_host_cmd cmd = {
360 .id = WOWLAN_PATTERNS,
361 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
362 };
363 int i, err;
364
365 if (!wowlan->n_patterns)
366 return 0;
367
368 cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns);
369
370 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
371 if (!pattern_cmd)
372 return -ENOMEM;
373
374 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
375
376 for (i = 0; i < wowlan->n_patterns; i++) {
377 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
378
379 memcpy(&pattern_cmd->patterns[i].mask,
380 wowlan->patterns[i].mask, mask_len);
381 memcpy(&pattern_cmd->patterns[i].pattern,
382 wowlan->patterns[i].pattern,
383 wowlan->patterns[i].pattern_len);
384 pattern_cmd->patterns[i].mask_size = mask_len;
385 pattern_cmd->patterns[i].pattern_size =
386 wowlan->patterns[i].pattern_len;
387 }
388
389 cmd.data[0] = pattern_cmd;
390 err = iwl_mvm_send_cmd(mvm, &cmd);
391 kfree(pattern_cmd);
392 return err;
393 }
394
iwl_mvm_send_patterns(struct iwl_mvm * mvm,struct cfg80211_wowlan * wowlan)395 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
396 struct cfg80211_wowlan *wowlan)
397 {
398 struct iwl_wowlan_patterns_cmd *pattern_cmd;
399 struct iwl_host_cmd cmd = {
400 .id = WOWLAN_PATTERNS,
401 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
402 };
403 int i, err;
404
405 if (!wowlan->n_patterns)
406 return 0;
407
408 cmd.len[0] = sizeof(*pattern_cmd) +
409 wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
410
411 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
412 if (!pattern_cmd)
413 return -ENOMEM;
414
415 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
416
417 for (i = 0; i < wowlan->n_patterns; i++) {
418 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
419
420 pattern_cmd->patterns[i].pattern_type =
421 WOWLAN_PATTERN_TYPE_BITMASK;
422
423 memcpy(&pattern_cmd->patterns[i].u.bitmask.mask,
424 wowlan->patterns[i].mask, mask_len);
425 memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern,
426 wowlan->patterns[i].pattern,
427 wowlan->patterns[i].pattern_len);
428 pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len;
429 pattern_cmd->patterns[i].u.bitmask.pattern_size =
430 wowlan->patterns[i].pattern_len;
431 }
432
433 cmd.data[0] = pattern_cmd;
434 err = iwl_mvm_send_cmd(mvm, &cmd);
435 kfree(pattern_cmd);
436 return err;
437 }
438
iwl_mvm_d3_reprogram(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * ap_sta)439 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
440 struct ieee80211_sta *ap_sta)
441 {
442 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
443 struct ieee80211_chanctx_conf *ctx;
444 u8 chains_static, chains_dynamic;
445 struct cfg80211_chan_def chandef;
446 int ret, i;
447 struct iwl_binding_cmd_v1 binding_cmd = {};
448 struct iwl_time_quota_cmd quota_cmd = {};
449 struct iwl_time_quota_data *quota;
450 u32 status;
451
452 if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm)))
453 return -EINVAL;
454
455 /* add back the PHY */
456 if (WARN_ON(!mvmvif->phy_ctxt))
457 return -EINVAL;
458
459 rcu_read_lock();
460 ctx = rcu_dereference(vif->chanctx_conf);
461 if (WARN_ON(!ctx)) {
462 rcu_read_unlock();
463 return -EINVAL;
464 }
465 chandef = ctx->def;
466 chains_static = ctx->rx_chains_static;
467 chains_dynamic = ctx->rx_chains_dynamic;
468 rcu_read_unlock();
469
470 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
471 chains_static, chains_dynamic);
472 if (ret)
473 return ret;
474
475 /* add back the MAC */
476 mvmvif->uploaded = false;
477
478 if (WARN_ON(!vif->bss_conf.assoc))
479 return -EINVAL;
480
481 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
482 if (ret)
483 return ret;
484
485 /* add back binding - XXX refactor? */
486 binding_cmd.id_and_color =
487 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
488 mvmvif->phy_ctxt->color));
489 binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
490 binding_cmd.phy =
491 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
492 mvmvif->phy_ctxt->color));
493 binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
494 mvmvif->color));
495 for (i = 1; i < MAX_MACS_IN_BINDING; i++)
496 binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
497
498 status = 0;
499 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
500 IWL_BINDING_CMD_SIZE_V1, &binding_cmd,
501 &status);
502 if (ret) {
503 IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
504 return ret;
505 }
506
507 if (status) {
508 IWL_ERR(mvm, "Binding command failed: %u\n", status);
509 return -EIO;
510 }
511
512 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
513 if (ret)
514 return ret;
515 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
516
517 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
518 if (ret)
519 return ret;
520
521 /* and some quota */
522 quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, 0);
523 quota->id_and_color =
524 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
525 mvmvif->phy_ctxt->color));
526 quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
527 quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
528
529 for (i = 1; i < MAX_BINDINGS; i++) {
530 quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, i);
531 quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
532 }
533
534 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
535 iwl_mvm_quota_cmd_size(mvm), "a_cmd);
536 if (ret)
537 IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
538
539 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
540 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
541
542 return 0;
543 }
544
iwl_mvm_get_last_nonqos_seq(struct iwl_mvm * mvm,struct ieee80211_vif * vif)545 static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
546 struct ieee80211_vif *vif)
547 {
548 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
549 struct iwl_nonqos_seq_query_cmd query_cmd = {
550 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
551 .mac_id_n_color =
552 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
553 mvmvif->color)),
554 };
555 struct iwl_host_cmd cmd = {
556 .id = NON_QOS_TX_COUNTER_CMD,
557 .flags = CMD_WANT_SKB,
558 };
559 int err;
560 u32 size;
561
562 cmd.data[0] = &query_cmd;
563 cmd.len[0] = sizeof(query_cmd);
564
565 err = iwl_mvm_send_cmd(mvm, &cmd);
566 if (err)
567 return err;
568
569 size = iwl_rx_packet_payload_len(cmd.resp_pkt);
570 if (size < sizeof(__le16)) {
571 err = -EINVAL;
572 } else {
573 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
574 /* firmware returns next, not last-used seqno */
575 err = (u16) (err - 0x10);
576 }
577
578 iwl_free_resp(&cmd);
579 return err;
580 }
581
iwl_mvm_set_last_nonqos_seq(struct iwl_mvm * mvm,struct ieee80211_vif * vif)582 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
583 {
584 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
585 struct iwl_nonqos_seq_query_cmd query_cmd = {
586 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
587 .mac_id_n_color =
588 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
589 mvmvif->color)),
590 .value = cpu_to_le16(mvmvif->seqno),
591 };
592
593 /* return if called during restart, not resume from D3 */
594 if (!mvmvif->seqno_valid)
595 return;
596
597 mvmvif->seqno_valid = false;
598
599 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
600 sizeof(query_cmd), &query_cmd))
601 IWL_ERR(mvm, "failed to set non-QoS seqno\n");
602 }
603
iwl_mvm_switch_to_d3(struct iwl_mvm * mvm)604 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
605 {
606 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
607
608 iwl_mvm_stop_device(mvm);
609 /*
610 * Set the HW restart bit -- this is mostly true as we're
611 * going to load new firmware and reprogram that, though
612 * the reprogramming is going to be manual to avoid adding
613 * all the MACs that aren't support.
614 * We don't have to clear up everything though because the
615 * reprogramming is manual. When we resume, we'll actually
616 * go through a proper restart sequence again to switch
617 * back to the runtime firmware image.
618 */
619 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
620
621 /* the fw is reset, so all the keys are cleared */
622 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
623
624 mvm->ptk_ivlen = 0;
625 mvm->ptk_icvlen = 0;
626 mvm->ptk_ivlen = 0;
627 mvm->ptk_icvlen = 0;
628
629 return iwl_mvm_load_d3_fw(mvm);
630 }
631
632 static int
iwl_mvm_get_wowlan_config(struct iwl_mvm * mvm,struct cfg80211_wowlan * wowlan,struct iwl_wowlan_config_cmd * wowlan_config_cmd,struct ieee80211_vif * vif,struct iwl_mvm_vif * mvmvif,struct ieee80211_sta * ap_sta)633 iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
634 struct cfg80211_wowlan *wowlan,
635 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
636 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
637 struct ieee80211_sta *ap_sta)
638 {
639 int ret;
640 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
641
642 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
643
644 wowlan_config_cmd->is_11n_connection =
645 ap_sta->ht_cap.ht_supported;
646 wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
647 ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
648
649 /* Query the last used seqno and set it */
650 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
651 if (ret < 0)
652 return ret;
653
654 wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
655
656 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
657
658 if (wowlan->disconnect)
659 wowlan_config_cmd->wakeup_filter |=
660 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
661 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
662 if (wowlan->magic_pkt)
663 wowlan_config_cmd->wakeup_filter |=
664 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
665 if (wowlan->gtk_rekey_failure)
666 wowlan_config_cmd->wakeup_filter |=
667 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
668 if (wowlan->eap_identity_req)
669 wowlan_config_cmd->wakeup_filter |=
670 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
671 if (wowlan->four_way_handshake)
672 wowlan_config_cmd->wakeup_filter |=
673 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
674 if (wowlan->n_patterns)
675 wowlan_config_cmd->wakeup_filter |=
676 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
677
678 if (wowlan->rfkill_release)
679 wowlan_config_cmd->wakeup_filter |=
680 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
681
682 if (wowlan->tcp) {
683 /*
684 * Set the "link change" (really "link lost") flag as well
685 * since that implies losing the TCP connection.
686 */
687 wowlan_config_cmd->wakeup_filter |=
688 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
689 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
690 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
691 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
692 }
693
694 if (wowlan->any) {
695 wowlan_config_cmd->wakeup_filter |=
696 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
697 IWL_WOWLAN_WAKEUP_LINK_CHANGE |
698 IWL_WOWLAN_WAKEUP_RX_FRAME |
699 IWL_WOWLAN_WAKEUP_BCN_FILTERING);
700 }
701
702 return 0;
703 }
704
iwl_mvm_wowlan_config_key_params(struct iwl_mvm * mvm,struct ieee80211_vif * vif,u32 cmd_flags)705 static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
706 struct ieee80211_vif *vif,
707 u32 cmd_flags)
708 {
709 struct iwl_wowlan_kek_kck_material_cmd_v3 kek_kck_cmd = {};
710 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
711 bool unified = fw_has_capa(&mvm->fw->ucode_capa,
712 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
713 struct wowlan_key_data key_data = {
714 .configure_keys = !unified,
715 .use_rsc_tsc = false,
716 .tkip = &tkip_cmd,
717 .use_tkip = false,
718 .kek_kck_cmd = &kek_kck_cmd,
719 };
720 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
721 int ret;
722 u8 cmd_ver;
723 size_t cmd_size;
724
725 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
726 if (!key_data.rsc_tsc)
727 return -ENOMEM;
728
729 /*
730 * if we have to configure keys, call ieee80211_iter_keys(),
731 * as we need non-atomic context in order to take the
732 * required locks.
733 */
734 /*
735 * Note that currently we don't propagate cmd_flags
736 * to the iterator. In case of key_data.configure_keys,
737 * all the configured commands are SYNC, and
738 * iwl_mvm_wowlan_program_keys() will take care of
739 * locking/unlocking mvm->mutex.
740 */
741 ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys,
742 &key_data);
743
744 if (key_data.error) {
745 ret = -EIO;
746 goto out;
747 }
748
749 if (key_data.use_rsc_tsc) {
750 int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
751 WOWLAN_TSC_RSC_PARAM,
752 IWL_FW_CMD_VER_UNKNOWN);
753 int size;
754
755 if (ver == 4) {
756 size = sizeof(*key_data.rsc_tsc);
757 key_data.rsc_tsc->sta_id =
758 cpu_to_le32(mvmvif->ap_sta_id);
759
760 } else if (ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) {
761 size = sizeof(key_data.rsc_tsc->params);
762 } else {
763 ret = 0;
764 WARN_ON_ONCE(1);
765 goto out;
766 }
767
768 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM,
769 cmd_flags,
770 size,
771 key_data.rsc_tsc);
772
773 if (ret)
774 goto out;
775 }
776
777 if (key_data.use_tkip &&
778 !fw_has_api(&mvm->fw->ucode_capa,
779 IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
780 int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
781 WOWLAN_TKIP_PARAM,
782 IWL_FW_CMD_VER_UNKNOWN);
783 int size;
784
785 if (ver == 2) {
786 size = sizeof(tkip_cmd);
787 key_data.tkip->sta_id =
788 cpu_to_le32(mvmvif->ap_sta_id);
789 } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) {
790 size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1);
791 } else {
792 ret = -EINVAL;
793 WARN_ON_ONCE(1);
794 goto out;
795 }
796
797 /* send relevant data according to CMD version */
798 ret = iwl_mvm_send_cmd_pdu(mvm,
799 WOWLAN_TKIP_PARAM,
800 cmd_flags, size,
801 &tkip_cmd);
802 if (ret)
803 goto out;
804 }
805
806 /* configure rekey data only if offloaded rekey is supported (d3) */
807 if (mvmvif->rekey_data.valid) {
808 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
809 IWL_ALWAYS_LONG_GROUP,
810 WOWLAN_KEK_KCK_MATERIAL,
811 IWL_FW_CMD_VER_UNKNOWN);
812 if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 &&
813 cmd_ver != IWL_FW_CMD_VER_UNKNOWN))
814 return -EINVAL;
815 if (cmd_ver == 3)
816 cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
817 else
818 cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
819
820 memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
821 mvmvif->rekey_data.kck_len);
822 kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len);
823 memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
824 mvmvif->rekey_data.kek_len);
825 kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
826 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
827 kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
828
829 IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
830 mvmvif->rekey_data.akm);
831
832 ret = iwl_mvm_send_cmd_pdu(mvm,
833 WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
834 cmd_size,
835 &kek_kck_cmd);
836 if (ret)
837 goto out;
838 }
839 ret = 0;
840 out:
841 kfree(key_data.rsc_tsc);
842 return ret;
843 }
844
845 static int
iwl_mvm_wowlan_config(struct iwl_mvm * mvm,struct cfg80211_wowlan * wowlan,struct iwl_wowlan_config_cmd * wowlan_config_cmd,struct ieee80211_vif * vif,struct iwl_mvm_vif * mvmvif,struct ieee80211_sta * ap_sta)846 iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
847 struct cfg80211_wowlan *wowlan,
848 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
849 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
850 struct ieee80211_sta *ap_sta)
851 {
852 int ret;
853 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
854 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
855
856 mvm->offload_tid = wowlan_config_cmd->offloading_tid;
857
858 if (!unified_image) {
859 ret = iwl_mvm_switch_to_d3(mvm);
860 if (ret)
861 return ret;
862
863 ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
864 if (ret)
865 return ret;
866 }
867
868 /*
869 * This needs to be unlocked due to lock ordering
870 * constraints. Since we're in the suspend path
871 * that isn't really a problem though.
872 */
873 mutex_unlock(&mvm->mutex);
874 ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC);
875 mutex_lock(&mvm->mutex);
876 if (ret)
877 return ret;
878
879 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
880 sizeof(*wowlan_config_cmd),
881 wowlan_config_cmd);
882 if (ret)
883 return ret;
884
885 if (fw_has_api(&mvm->fw->ucode_capa,
886 IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
887 ret = iwl_mvm_send_patterns(mvm, wowlan);
888 else
889 ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
890 if (ret)
891 return ret;
892
893 return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
894 }
895
896 static int
iwl_mvm_netdetect_config(struct iwl_mvm * mvm,struct cfg80211_wowlan * wowlan,struct cfg80211_sched_scan_request * nd_config,struct ieee80211_vif * vif)897 iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
898 struct cfg80211_wowlan *wowlan,
899 struct cfg80211_sched_scan_request *nd_config,
900 struct ieee80211_vif *vif)
901 {
902 int ret;
903 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
904 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
905
906 if (!unified_image) {
907 ret = iwl_mvm_switch_to_d3(mvm);
908 if (ret)
909 return ret;
910 } else {
911 /* In theory, we wouldn't have to stop a running sched
912 * scan in order to start another one (for
913 * net-detect). But in practice this doesn't seem to
914 * work properly, so stop any running sched_scan now.
915 */
916 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
917 if (ret)
918 return ret;
919 }
920
921 ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
922 IWL_MVM_SCAN_NETDETECT);
923 if (ret)
924 return ret;
925
926 if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
927 return -EBUSY;
928
929 /* save the sched scan matchsets... */
930 if (nd_config->n_match_sets) {
931 mvm->nd_match_sets = kmemdup(nd_config->match_sets,
932 sizeof(*nd_config->match_sets) *
933 nd_config->n_match_sets,
934 GFP_KERNEL);
935 if (mvm->nd_match_sets)
936 mvm->n_nd_match_sets = nd_config->n_match_sets;
937 }
938
939 /* ...and the sched scan channels for later reporting */
940 mvm->nd_channels = kmemdup(nd_config->channels,
941 sizeof(*nd_config->channels) *
942 nd_config->n_channels,
943 GFP_KERNEL);
944 if (mvm->nd_channels)
945 mvm->n_nd_channels = nd_config->n_channels;
946
947 return 0;
948 }
949
iwl_mvm_free_nd(struct iwl_mvm * mvm)950 static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
951 {
952 kfree(mvm->nd_match_sets);
953 mvm->nd_match_sets = NULL;
954 mvm->n_nd_match_sets = 0;
955 kfree(mvm->nd_channels);
956 mvm->nd_channels = NULL;
957 mvm->n_nd_channels = 0;
958 }
959
__iwl_mvm_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wowlan,bool test)960 static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
961 struct cfg80211_wowlan *wowlan,
962 bool test)
963 {
964 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
965 struct ieee80211_vif *vif = NULL;
966 struct iwl_mvm_vif *mvmvif = NULL;
967 struct ieee80211_sta *ap_sta = NULL;
968 struct iwl_d3_manager_config d3_cfg_cmd_data = {
969 /*
970 * Program the minimum sleep time to 10 seconds, as many
971 * platforms have issues processing a wakeup signal while
972 * still being in the process of suspending.
973 */
974 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
975 };
976 struct iwl_host_cmd d3_cfg_cmd = {
977 .id = D3_CONFIG_CMD,
978 .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
979 .data[0] = &d3_cfg_cmd_data,
980 .len[0] = sizeof(d3_cfg_cmd_data),
981 };
982 int ret;
983 int len __maybe_unused;
984 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
985 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
986
987 if (!wowlan) {
988 /*
989 * mac80211 shouldn't get here, but for D3 test
990 * it doesn't warrant a warning
991 */
992 WARN_ON(!test);
993 return -EINVAL;
994 }
995
996 mutex_lock(&mvm->mutex);
997
998 set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
999
1000 synchronize_net();
1001
1002 vif = iwl_mvm_get_bss_vif(mvm);
1003 if (IS_ERR_OR_NULL(vif)) {
1004 ret = 1;
1005 goto out_noreset;
1006 }
1007
1008 mvmvif = iwl_mvm_vif_from_mac80211(vif);
1009
1010 if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
1011 /* if we're not associated, this must be netdetect */
1012 if (!wowlan->nd_config) {
1013 ret = 1;
1014 goto out_noreset;
1015 }
1016
1017 ret = iwl_mvm_netdetect_config(
1018 mvm, wowlan, wowlan->nd_config, vif);
1019 if (ret)
1020 goto out;
1021
1022 mvm->net_detect = true;
1023 } else {
1024 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
1025
1026 wowlan_config_cmd.sta_id = mvmvif->ap_sta_id;
1027
1028 ap_sta = rcu_dereference_protected(
1029 mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
1030 lockdep_is_held(&mvm->mutex));
1031 if (IS_ERR_OR_NULL(ap_sta)) {
1032 ret = -EINVAL;
1033 goto out_noreset;
1034 }
1035
1036 ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1037 vif, mvmvif, ap_sta);
1038 if (ret)
1039 goto out_noreset;
1040 ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1041 vif, mvmvif, ap_sta);
1042 if (ret)
1043 goto out;
1044
1045 mvm->net_detect = false;
1046 }
1047
1048 ret = iwl_mvm_power_update_device(mvm);
1049 if (ret)
1050 goto out;
1051
1052 ret = iwl_mvm_power_update_mac(mvm);
1053 if (ret)
1054 goto out;
1055
1056 #ifdef CONFIG_IWLWIFI_DEBUGFS
1057 if (mvm->d3_wake_sysassert)
1058 d3_cfg_cmd_data.wakeup_flags |=
1059 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
1060 #endif
1061
1062 /*
1063 * Prior to 9000 device family the driver needs to stop the dbg
1064 * recording before entering D3. In later devices the FW stops the
1065 * recording automatically.
1066 */
1067 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
1068 iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true);
1069
1070 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
1071
1072 /* must be last -- this switches firmware state */
1073 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
1074 if (ret)
1075 goto out;
1076 #ifdef CONFIG_IWLWIFI_DEBUGFS
1077 len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
1078 if (len >= sizeof(u32)) {
1079 mvm->d3_test_pme_ptr =
1080 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
1081 }
1082 #endif
1083 iwl_free_resp(&d3_cfg_cmd);
1084
1085 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1086
1087 ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
1088 out:
1089 if (ret < 0) {
1090 iwl_mvm_free_nd(mvm);
1091
1092 if (!unified_image) {
1093 if (mvm->fw_restart > 0) {
1094 mvm->fw_restart--;
1095 ieee80211_restart_hw(mvm->hw);
1096 }
1097 }
1098
1099 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
1100 }
1101 out_noreset:
1102 mutex_unlock(&mvm->mutex);
1103
1104 return ret;
1105 }
1106
iwl_mvm_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wowlan)1107 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1108 {
1109 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1110
1111 iwl_mvm_pause_tcm(mvm, true);
1112
1113 iwl_fw_runtime_suspend(&mvm->fwrt);
1114
1115 return __iwl_mvm_suspend(hw, wowlan, false);
1116 }
1117
1118 /* converted data from the different status responses */
1119 struct iwl_wowlan_status_data {
1120 u16 pattern_number;
1121 u16 qos_seq_ctr[8];
1122 u32 wakeup_reasons;
1123 u32 wake_packet_length;
1124 u32 wake_packet_bufsize;
1125 const u8 *wake_packet;
1126 };
1127
iwl_mvm_report_wakeup_reasons(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_wowlan_status_data * status)1128 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1129 struct ieee80211_vif *vif,
1130 struct iwl_wowlan_status_data *status)
1131 {
1132 struct sk_buff *pkt = NULL;
1133 struct cfg80211_wowlan_wakeup wakeup = {
1134 .pattern_idx = -1,
1135 };
1136 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1137 u32 reasons = status->wakeup_reasons;
1138
1139 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
1140 wakeup_report = NULL;
1141 goto report;
1142 }
1143
1144 pm_wakeup_event(mvm->dev, 0);
1145
1146 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
1147 wakeup.magic_pkt = true;
1148
1149 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
1150 wakeup.pattern_idx =
1151 status->pattern_number;
1152
1153 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1154 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
1155 wakeup.disconnect = true;
1156
1157 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
1158 wakeup.gtk_rekey_failure = true;
1159
1160 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1161 wakeup.rfkill_release = true;
1162
1163 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
1164 wakeup.eap_identity_req = true;
1165
1166 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
1167 wakeup.four_way_handshake = true;
1168
1169 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
1170 wakeup.tcp_connlost = true;
1171
1172 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
1173 wakeup.tcp_nomoretokens = true;
1174
1175 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
1176 wakeup.tcp_match = true;
1177
1178 if (status->wake_packet_bufsize) {
1179 int pktsize = status->wake_packet_bufsize;
1180 int pktlen = status->wake_packet_length;
1181 const u8 *pktdata = status->wake_packet;
1182 struct ieee80211_hdr *hdr = (void *)pktdata;
1183 int truncated = pktlen - pktsize;
1184
1185 /* this would be a firmware bug */
1186 if (WARN_ON_ONCE(truncated < 0))
1187 truncated = 0;
1188
1189 if (ieee80211_is_data(hdr->frame_control)) {
1190 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
1191 int ivlen = 0, icvlen = 4; /* also FCS */
1192
1193 pkt = alloc_skb(pktsize, GFP_KERNEL);
1194 if (!pkt)
1195 goto report;
1196
1197 skb_put_data(pkt, pktdata, hdrlen);
1198 pktdata += hdrlen;
1199 pktsize -= hdrlen;
1200
1201 if (ieee80211_has_protected(hdr->frame_control)) {
1202 /*
1203 * This is unlocked and using gtk_i(c)vlen,
1204 * but since everything is under RTNL still
1205 * that's not really a problem - changing
1206 * it would be difficult.
1207 */
1208 if (is_multicast_ether_addr(hdr->addr1)) {
1209 ivlen = mvm->gtk_ivlen;
1210 icvlen += mvm->gtk_icvlen;
1211 } else {
1212 ivlen = mvm->ptk_ivlen;
1213 icvlen += mvm->ptk_icvlen;
1214 }
1215 }
1216
1217 /* if truncated, FCS/ICV is (partially) gone */
1218 if (truncated >= icvlen) {
1219 icvlen = 0;
1220 truncated -= icvlen;
1221 } else {
1222 icvlen -= truncated;
1223 truncated = 0;
1224 }
1225
1226 pktsize -= ivlen + icvlen;
1227 pktdata += ivlen;
1228
1229 skb_put_data(pkt, pktdata, pktsize);
1230
1231 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
1232 goto report;
1233 wakeup.packet = pkt->data;
1234 wakeup.packet_present_len = pkt->len;
1235 wakeup.packet_len = pkt->len - truncated;
1236 wakeup.packet_80211 = false;
1237 } else {
1238 int fcslen = 4;
1239
1240 if (truncated >= 4) {
1241 truncated -= 4;
1242 fcslen = 0;
1243 } else {
1244 fcslen -= truncated;
1245 truncated = 0;
1246 }
1247 pktsize -= fcslen;
1248 wakeup.packet = status->wake_packet;
1249 wakeup.packet_present_len = pktsize;
1250 wakeup.packet_len = pktlen - truncated;
1251 wakeup.packet_80211 = true;
1252 }
1253 }
1254
1255 report:
1256 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1257 kfree_skb(pkt);
1258 }
1259
iwl_mvm_aes_sc_to_seq(struct aes_sc * sc,struct ieee80211_key_seq * seq)1260 static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
1261 struct ieee80211_key_seq *seq)
1262 {
1263 u64 pn;
1264
1265 pn = le64_to_cpu(sc->pn);
1266 seq->ccmp.pn[0] = pn >> 40;
1267 seq->ccmp.pn[1] = pn >> 32;
1268 seq->ccmp.pn[2] = pn >> 24;
1269 seq->ccmp.pn[3] = pn >> 16;
1270 seq->ccmp.pn[4] = pn >> 8;
1271 seq->ccmp.pn[5] = pn;
1272 }
1273
iwl_mvm_tkip_sc_to_seq(struct tkip_sc * sc,struct ieee80211_key_seq * seq)1274 static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
1275 struct ieee80211_key_seq *seq)
1276 {
1277 seq->tkip.iv32 = le32_to_cpu(sc->iv32);
1278 seq->tkip.iv16 = le16_to_cpu(sc->iv16);
1279 }
1280
iwl_mvm_set_aes_rx_seq(struct iwl_mvm * mvm,struct aes_sc * scs,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)1281 static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
1282 struct ieee80211_sta *sta,
1283 struct ieee80211_key_conf *key)
1284 {
1285 int tid;
1286
1287 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1288
1289 if (sta && iwl_mvm_has_new_rx_api(mvm)) {
1290 struct iwl_mvm_sta *mvmsta;
1291 struct iwl_mvm_key_pn *ptk_pn;
1292
1293 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1294
1295 rcu_read_lock();
1296 ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
1297 if (WARN_ON(!ptk_pn)) {
1298 rcu_read_unlock();
1299 return;
1300 }
1301
1302 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1303 struct ieee80211_key_seq seq = {};
1304 int i;
1305
1306 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1307 ieee80211_set_key_rx_seq(key, tid, &seq);
1308 for (i = 1; i < mvm->trans->num_rx_queues; i++)
1309 memcpy(ptk_pn->q[i].pn[tid],
1310 seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
1311 }
1312 rcu_read_unlock();
1313 } else {
1314 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1315 struct ieee80211_key_seq seq = {};
1316
1317 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1318 ieee80211_set_key_rx_seq(key, tid, &seq);
1319 }
1320 }
1321 }
1322
iwl_mvm_set_tkip_rx_seq(struct tkip_sc * scs,struct ieee80211_key_conf * key)1323 static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
1324 struct ieee80211_key_conf *key)
1325 {
1326 int tid;
1327
1328 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1329
1330 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1331 struct ieee80211_key_seq seq = {};
1332
1333 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
1334 ieee80211_set_key_rx_seq(key, tid, &seq);
1335 }
1336 }
1337
iwl_mvm_set_key_rx_seq(struct iwl_mvm * mvm,struct ieee80211_key_conf * key,struct iwl_wowlan_status * status)1338 static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
1339 struct ieee80211_key_conf *key,
1340 struct iwl_wowlan_status *status)
1341 {
1342 union iwl_all_tsc_rsc *rsc = &status->gtk[0].rsc.all_tsc_rsc;
1343
1344 switch (key->cipher) {
1345 case WLAN_CIPHER_SUITE_CCMP:
1346 case WLAN_CIPHER_SUITE_GCMP:
1347 case WLAN_CIPHER_SUITE_GCMP_256:
1348 iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
1349 break;
1350 case WLAN_CIPHER_SUITE_TKIP:
1351 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
1352 break;
1353 default:
1354 WARN_ON(1);
1355 }
1356 }
1357
1358 struct iwl_mvm_d3_gtk_iter_data {
1359 struct iwl_mvm *mvm;
1360 struct iwl_wowlan_status *status;
1361 void *last_gtk;
1362 u32 cipher;
1363 bool find_phase, unhandled_cipher;
1364 int num_keys;
1365 };
1366
iwl_mvm_d3_update_keys(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key,void * _data)1367 static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
1368 struct ieee80211_vif *vif,
1369 struct ieee80211_sta *sta,
1370 struct ieee80211_key_conf *key,
1371 void *_data)
1372 {
1373 struct iwl_mvm_d3_gtk_iter_data *data = _data;
1374
1375 if (data->unhandled_cipher)
1376 return;
1377
1378 switch (key->cipher) {
1379 case WLAN_CIPHER_SUITE_WEP40:
1380 case WLAN_CIPHER_SUITE_WEP104:
1381 /* ignore WEP completely, nothing to do */
1382 return;
1383 case WLAN_CIPHER_SUITE_CCMP:
1384 case WLAN_CIPHER_SUITE_GCMP:
1385 case WLAN_CIPHER_SUITE_GCMP_256:
1386 case WLAN_CIPHER_SUITE_TKIP:
1387 /* we support these */
1388 break;
1389 default:
1390 /* everything else (even CMAC for MFP) - disconnect from AP */
1391 data->unhandled_cipher = true;
1392 return;
1393 }
1394
1395 data->num_keys++;
1396
1397 /*
1398 * pairwise key - update sequence counters only;
1399 * note that this assumes no TDLS sessions are active
1400 */
1401 if (sta) {
1402 struct ieee80211_key_seq seq = {};
1403 union iwl_all_tsc_rsc *sc =
1404 &data->status->gtk[0].rsc.all_tsc_rsc;
1405
1406 if (data->find_phase)
1407 return;
1408
1409 switch (key->cipher) {
1410 case WLAN_CIPHER_SUITE_CCMP:
1411 case WLAN_CIPHER_SUITE_GCMP:
1412 case WLAN_CIPHER_SUITE_GCMP_256:
1413 iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
1414 sta, key);
1415 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
1416 break;
1417 case WLAN_CIPHER_SUITE_TKIP:
1418 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
1419 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
1420 atomic64_set(&key->tx_pn,
1421 (u64)seq.tkip.iv16 |
1422 ((u64)seq.tkip.iv32 << 16));
1423 break;
1424 }
1425
1426 /* that's it for this key */
1427 return;
1428 }
1429
1430 if (data->find_phase) {
1431 data->last_gtk = key;
1432 data->cipher = key->cipher;
1433 return;
1434 }
1435
1436 if (data->status->num_of_gtk_rekeys)
1437 ieee80211_remove_key(key);
1438 else if (data->last_gtk == key)
1439 iwl_mvm_set_key_rx_seq(data->mvm, key, data->status);
1440 }
1441
iwl_mvm_setup_connection_keep(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_wowlan_status * status)1442 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1443 struct ieee80211_vif *vif,
1444 struct iwl_wowlan_status *status)
1445 {
1446 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1447 struct iwl_mvm_d3_gtk_iter_data gtkdata = {
1448 .mvm = mvm,
1449 .status = status,
1450 };
1451 u32 disconnection_reasons =
1452 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1453 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
1454
1455 if (!status || !vif->bss_conf.bssid)
1456 return false;
1457
1458 if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
1459 return false;
1460
1461 /* find last GTK that we used initially, if any */
1462 gtkdata.find_phase = true;
1463 ieee80211_iter_keys(mvm->hw, vif,
1464 iwl_mvm_d3_update_keys, >kdata);
1465 /* not trying to keep connections with MFP/unhandled ciphers */
1466 if (gtkdata.unhandled_cipher)
1467 return false;
1468 if (!gtkdata.num_keys)
1469 goto out;
1470 if (!gtkdata.last_gtk)
1471 return false;
1472
1473 /*
1474 * invalidate all other GTKs that might still exist and update
1475 * the one that we used
1476 */
1477 gtkdata.find_phase = false;
1478 ieee80211_iter_keys(mvm->hw, vif,
1479 iwl_mvm_d3_update_keys, >kdata);
1480
1481 IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n",
1482 le32_to_cpu(status->num_of_gtk_rekeys));
1483 if (status->num_of_gtk_rekeys) {
1484 struct ieee80211_key_conf *key;
1485 struct {
1486 struct ieee80211_key_conf conf;
1487 u8 key[32];
1488 } conf = {
1489 .conf.cipher = gtkdata.cipher,
1490 .conf.keyidx =
1491 iwlmvm_wowlan_gtk_idx(&status->gtk[0]),
1492 };
1493 __be64 replay_ctr;
1494
1495 IWL_DEBUG_WOWLAN(mvm,
1496 "Received from FW GTK cipher %d, key index %d\n",
1497 conf.conf.cipher, conf.conf.keyidx);
1498 switch (gtkdata.cipher) {
1499 case WLAN_CIPHER_SUITE_CCMP:
1500 case WLAN_CIPHER_SUITE_GCMP:
1501 BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
1502 BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
1503 conf.conf.keylen = WLAN_KEY_LEN_CCMP;
1504 memcpy(conf.conf.key, status->gtk[0].key,
1505 WLAN_KEY_LEN_CCMP);
1506 break;
1507 case WLAN_CIPHER_SUITE_GCMP_256:
1508 BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
1509 conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
1510 memcpy(conf.conf.key, status->gtk[0].key,
1511 WLAN_KEY_LEN_GCMP_256);
1512 break;
1513 case WLAN_CIPHER_SUITE_TKIP:
1514 BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
1515 conf.conf.keylen = WLAN_KEY_LEN_TKIP;
1516 memcpy(conf.conf.key, status->gtk[0].key, 16);
1517 /* leave TX MIC key zeroed, we don't use it anyway */
1518 memcpy(conf.conf.key +
1519 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
1520 status->gtk[0].tkip_mic_key, 8);
1521 break;
1522 }
1523
1524 key = ieee80211_gtk_rekey_add(vif, &conf.conf);
1525 if (IS_ERR(key))
1526 return false;
1527 iwl_mvm_set_key_rx_seq(mvm, key, status);
1528
1529 replay_ctr =
1530 cpu_to_be64(le64_to_cpu(status->replay_ctr));
1531
1532 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
1533 (void *)&replay_ctr, GFP_KERNEL);
1534 }
1535
1536 out:
1537 mvmvif->seqno_valid = true;
1538 /* +0x10 because the set API expects next-to-use, not last-used */
1539 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
1540
1541 return true;
1542 }
1543
1544 /* Occasionally, templates would be nice. This is one of those times ... */
1545 #define iwl_mvm_parse_wowlan_status_common(_ver) \
1546 static struct iwl_wowlan_status * \
1547 iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
1548 void *_data, int len) \
1549 { \
1550 struct iwl_wowlan_status *status; \
1551 struct iwl_wowlan_status_ ##_ver *data = _data; \
1552 int data_size; \
1553 \
1554 if (len < sizeof(*data)) { \
1555 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
1556 return ERR_PTR(-EIO); \
1557 } \
1558 \
1559 data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \
1560 if (len != sizeof(*data) + data_size) { \
1561 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
1562 return ERR_PTR(-EIO); \
1563 } \
1564 \
1565 status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \
1566 if (!status) \
1567 return ERR_PTR(-ENOMEM); \
1568 \
1569 /* copy all the common fields */ \
1570 status->replay_ctr = data->replay_ctr; \
1571 status->pattern_number = data->pattern_number; \
1572 status->non_qos_seq_ctr = data->non_qos_seq_ctr; \
1573 memcpy(status->qos_seq_ctr, data->qos_seq_ctr, \
1574 sizeof(status->qos_seq_ctr)); \
1575 status->wakeup_reasons = data->wakeup_reasons; \
1576 status->num_of_gtk_rekeys = data->num_of_gtk_rekeys; \
1577 status->received_beacons = data->received_beacons; \
1578 status->wake_packet_length = data->wake_packet_length; \
1579 status->wake_packet_bufsize = data->wake_packet_bufsize; \
1580 memcpy(status->wake_packet, data->wake_packet, \
1581 le32_to_cpu(status->wake_packet_bufsize)); \
1582 \
1583 return status; \
1584 }
1585
1586 iwl_mvm_parse_wowlan_status_common(v6)
iwl_mvm_parse_wowlan_status_common(v7)1587 iwl_mvm_parse_wowlan_status_common(v7)
1588 iwl_mvm_parse_wowlan_status_common(v9)
1589
1590 struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
1591 {
1592 struct iwl_wowlan_status *status;
1593 struct iwl_host_cmd cmd = {
1594 .id = WOWLAN_GET_STATUSES,
1595 .flags = CMD_WANT_SKB,
1596 };
1597 int ret, len;
1598 u8 notif_ver;
1599
1600 lockdep_assert_held(&mvm->mutex);
1601
1602 ret = iwl_mvm_send_cmd(mvm, &cmd);
1603 if (ret) {
1604 IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret);
1605 return ERR_PTR(ret);
1606 }
1607
1608 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1609
1610 /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */
1611 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
1612 WOWLAN_GET_STATUSES, 7);
1613
1614 if (!fw_has_api(&mvm->fw->ucode_capa,
1615 IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
1616 struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
1617
1618 status = iwl_mvm_parse_wowlan_status_common_v6(mvm,
1619 cmd.resp_pkt->data,
1620 len);
1621 if (IS_ERR(status))
1622 goto out_free_resp;
1623
1624 BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) >
1625 sizeof(status->gtk[0].key));
1626 BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) >
1627 sizeof(status->gtk[0].tkip_mic_key));
1628
1629 /* copy GTK info to the right place */
1630 memcpy(status->gtk[0].key, v6->gtk.decrypt_key,
1631 sizeof(v6->gtk.decrypt_key));
1632 memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key,
1633 sizeof(v6->gtk.tkip_mic_key));
1634 memcpy(&status->gtk[0].rsc, &v6->gtk.rsc,
1635 sizeof(status->gtk[0].rsc));
1636
1637 /* hardcode the key length to 16 since v6 only supports 16 */
1638 status->gtk[0].key_len = 16;
1639
1640 /*
1641 * The key index only uses 2 bits (values 0 to 3) and
1642 * we always set bit 7 which means this is the
1643 * currently used key.
1644 */
1645 status->gtk[0].key_flags = v6->gtk.key_index | BIT(7);
1646 } else if (notif_ver == 7) {
1647 struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data;
1648
1649 status = iwl_mvm_parse_wowlan_status_common_v7(mvm,
1650 cmd.resp_pkt->data,
1651 len);
1652 if (IS_ERR(status))
1653 goto out_free_resp;
1654
1655 status->gtk[0] = v7->gtk[0];
1656 status->igtk[0] = v7->igtk[0];
1657 } else if (notif_ver == 9) {
1658 struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data;
1659
1660 status = iwl_mvm_parse_wowlan_status_common_v9(mvm,
1661 cmd.resp_pkt->data,
1662 len);
1663 if (IS_ERR(status))
1664 goto out_free_resp;
1665
1666 status->gtk[0] = v9->gtk[0];
1667 status->igtk[0] = v9->igtk[0];
1668
1669 status->tid_tear_down = v9->tid_tear_down;
1670 } else {
1671 IWL_ERR(mvm,
1672 "Firmware advertises unknown WoWLAN status response %d!\n",
1673 notif_ver);
1674 status = ERR_PTR(-EIO);
1675 }
1676
1677 out_free_resp:
1678 iwl_free_resp(&cmd);
1679 return status;
1680 }
1681
1682 static struct iwl_wowlan_status *
iwl_mvm_get_wakeup_status(struct iwl_mvm * mvm)1683 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
1684 {
1685 int ret;
1686
1687 /* only for tracing for now */
1688 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
1689 if (ret)
1690 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
1691
1692 return iwl_mvm_send_wowlan_get_status(mvm);
1693 }
1694
1695 /* releases the MVM mutex */
iwl_mvm_query_wakeup_reasons(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1696 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1697 struct ieee80211_vif *vif)
1698 {
1699 struct iwl_wowlan_status_data status;
1700 struct iwl_wowlan_status *fw_status;
1701 int i;
1702 bool keep;
1703 struct iwl_mvm_sta *mvm_ap_sta;
1704
1705 fw_status = iwl_mvm_get_wakeup_status(mvm);
1706 if (IS_ERR_OR_NULL(fw_status))
1707 goto out_unlock;
1708
1709 IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n",
1710 le32_to_cpu(fw_status->wakeup_reasons));
1711
1712 status.pattern_number = le16_to_cpu(fw_status->pattern_number);
1713 for (i = 0; i < 8; i++)
1714 status.qos_seq_ctr[i] =
1715 le16_to_cpu(fw_status->qos_seq_ctr[i]);
1716 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
1717 status.wake_packet_length =
1718 le32_to_cpu(fw_status->wake_packet_length);
1719 status.wake_packet_bufsize =
1720 le32_to_cpu(fw_status->wake_packet_bufsize);
1721 status.wake_packet = fw_status->wake_packet;
1722
1723 /* still at hard-coded place 0 for D3 image */
1724 mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
1725 if (!mvm_ap_sta)
1726 goto out_free;
1727
1728 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1729 u16 seq = status.qos_seq_ctr[i];
1730 /* firmware stores last-used value, we store next value */
1731 seq += 0x10;
1732 mvm_ap_sta->tid_data[i].seq_number = seq;
1733 }
1734
1735 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
1736 i = mvm->offload_tid;
1737 iwl_trans_set_q_ptrs(mvm->trans,
1738 mvm_ap_sta->tid_data[i].txq_id,
1739 mvm_ap_sta->tid_data[i].seq_number >> 4);
1740 }
1741
1742 /* now we have all the data we need, unlock to avoid mac80211 issues */
1743 mutex_unlock(&mvm->mutex);
1744
1745 iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
1746
1747 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
1748
1749 kfree(fw_status);
1750 return keep;
1751
1752 out_free:
1753 kfree(fw_status);
1754 out_unlock:
1755 mutex_unlock(&mvm->mutex);
1756 return false;
1757 }
1758
1759 #define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \
1760 IWL_SCAN_MAX_PROFILES)
1761
1762 struct iwl_mvm_nd_query_results {
1763 u32 matched_profiles;
1764 u8 matches[ND_QUERY_BUF_LEN];
1765 };
1766
1767 static int
iwl_mvm_netdetect_query_results(struct iwl_mvm * mvm,struct iwl_mvm_nd_query_results * results)1768 iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
1769 struct iwl_mvm_nd_query_results *results)
1770 {
1771 struct iwl_scan_offload_profiles_query *query;
1772 struct iwl_host_cmd cmd = {
1773 .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
1774 .flags = CMD_WANT_SKB,
1775 };
1776 int ret, len;
1777 size_t query_len, matches_len;
1778 int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
1779
1780 ret = iwl_mvm_send_cmd(mvm, &cmd);
1781 if (ret) {
1782 IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
1783 return ret;
1784 }
1785
1786 if (fw_has_api(&mvm->fw->ucode_capa,
1787 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
1788 query_len = sizeof(struct iwl_scan_offload_profiles_query);
1789 matches_len = sizeof(struct iwl_scan_offload_profile_match) *
1790 max_profiles;
1791 } else {
1792 query_len = sizeof(struct iwl_scan_offload_profiles_query_v1);
1793 matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) *
1794 max_profiles;
1795 }
1796
1797 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1798 if (len < query_len) {
1799 IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
1800 ret = -EIO;
1801 goto out_free_resp;
1802 }
1803
1804 query = (void *)cmd.resp_pkt->data;
1805
1806 results->matched_profiles = le32_to_cpu(query->matched_profiles);
1807 memcpy(results->matches, query->matches, matches_len);
1808
1809 #ifdef CONFIG_IWLWIFI_DEBUGFS
1810 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
1811 #endif
1812
1813 out_free_resp:
1814 iwl_free_resp(&cmd);
1815 return ret;
1816 }
1817
iwl_mvm_query_num_match_chans(struct iwl_mvm * mvm,struct iwl_mvm_nd_query_results * query,int idx)1818 static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
1819 struct iwl_mvm_nd_query_results *query,
1820 int idx)
1821 {
1822 int n_chans = 0, i;
1823
1824 if (fw_has_api(&mvm->fw->ucode_capa,
1825 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
1826 struct iwl_scan_offload_profile_match *matches =
1827 (struct iwl_scan_offload_profile_match *)query->matches;
1828
1829 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++)
1830 n_chans += hweight8(matches[idx].matching_channels[i]);
1831 } else {
1832 struct iwl_scan_offload_profile_match_v1 *matches =
1833 (struct iwl_scan_offload_profile_match_v1 *)query->matches;
1834
1835 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++)
1836 n_chans += hweight8(matches[idx].matching_channels[i]);
1837 }
1838
1839 return n_chans;
1840 }
1841
iwl_mvm_query_set_freqs(struct iwl_mvm * mvm,struct iwl_mvm_nd_query_results * query,struct cfg80211_wowlan_nd_match * match,int idx)1842 static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
1843 struct iwl_mvm_nd_query_results *query,
1844 struct cfg80211_wowlan_nd_match *match,
1845 int idx)
1846 {
1847 int i;
1848
1849 if (fw_has_api(&mvm->fw->ucode_capa,
1850 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
1851 struct iwl_scan_offload_profile_match *matches =
1852 (struct iwl_scan_offload_profile_match *)query->matches;
1853
1854 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
1855 if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
1856 match->channels[match->n_channels++] =
1857 mvm->nd_channels[i]->center_freq;
1858 } else {
1859 struct iwl_scan_offload_profile_match_v1 *matches =
1860 (struct iwl_scan_offload_profile_match_v1 *)query->matches;
1861
1862 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
1863 if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
1864 match->channels[match->n_channels++] =
1865 mvm->nd_channels[i]->center_freq;
1866 }
1867 }
1868
iwl_mvm_query_netdetect_reasons(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1869 static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
1870 struct ieee80211_vif *vif)
1871 {
1872 struct cfg80211_wowlan_nd_info *net_detect = NULL;
1873 struct cfg80211_wowlan_wakeup wakeup = {
1874 .pattern_idx = -1,
1875 };
1876 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1877 struct iwl_mvm_nd_query_results query;
1878 struct iwl_wowlan_status *fw_status;
1879 unsigned long matched_profiles;
1880 u32 reasons = 0;
1881 int i, n_matches, ret;
1882
1883 fw_status = iwl_mvm_get_wakeup_status(mvm);
1884 if (!IS_ERR_OR_NULL(fw_status)) {
1885 reasons = le32_to_cpu(fw_status->wakeup_reasons);
1886 kfree(fw_status);
1887 }
1888
1889 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1890 wakeup.rfkill_release = true;
1891
1892 if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
1893 goto out;
1894
1895 ret = iwl_mvm_netdetect_query_results(mvm, &query);
1896 if (ret || !query.matched_profiles) {
1897 wakeup_report = NULL;
1898 goto out;
1899 }
1900
1901 matched_profiles = query.matched_profiles;
1902 if (mvm->n_nd_match_sets) {
1903 n_matches = hweight_long(matched_profiles);
1904 } else {
1905 IWL_ERR(mvm, "no net detect match information available\n");
1906 n_matches = 0;
1907 }
1908
1909 net_detect = kzalloc(struct_size(net_detect, matches, n_matches),
1910 GFP_KERNEL);
1911 if (!net_detect || !n_matches)
1912 goto out_report_nd;
1913
1914 for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
1915 struct cfg80211_wowlan_nd_match *match;
1916 int idx, n_channels = 0;
1917
1918 n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i);
1919
1920 match = kzalloc(struct_size(match, channels, n_channels),
1921 GFP_KERNEL);
1922 if (!match)
1923 goto out_report_nd;
1924
1925 net_detect->matches[net_detect->n_matches++] = match;
1926
1927 /* We inverted the order of the SSIDs in the scan
1928 * request, so invert the index here.
1929 */
1930 idx = mvm->n_nd_match_sets - i - 1;
1931 match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
1932 memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
1933 match->ssid.ssid_len);
1934
1935 if (mvm->n_nd_channels < n_channels)
1936 continue;
1937
1938 iwl_mvm_query_set_freqs(mvm, &query, match, i);
1939 }
1940
1941 out_report_nd:
1942 wakeup.net_detect = net_detect;
1943 out:
1944 iwl_mvm_free_nd(mvm);
1945
1946 mutex_unlock(&mvm->mutex);
1947 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1948
1949 if (net_detect) {
1950 for (i = 0; i < net_detect->n_matches; i++)
1951 kfree(net_detect->matches[i]);
1952 kfree(net_detect);
1953 }
1954 }
1955
iwl_mvm_d3_disconnect_iter(void * data,u8 * mac,struct ieee80211_vif * vif)1956 static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
1957 struct ieee80211_vif *vif)
1958 {
1959 /* skip the one we keep connection on */
1960 if (data == vif)
1961 return;
1962
1963 if (vif->type == NL80211_IFTYPE_STATION)
1964 ieee80211_resume_disconnect(vif);
1965 }
1966
iwl_mvm_rt_status(struct iwl_trans * trans,u32 base,u32 * err_id)1967 static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id)
1968 {
1969 struct error_table_start {
1970 /* cf. struct iwl_error_event_table */
1971 u32 valid;
1972 __le32 err_id;
1973 } err_info;
1974
1975 if (!base)
1976 return false;
1977
1978 iwl_trans_read_mem_bytes(trans, base,
1979 &err_info, sizeof(err_info));
1980 if (err_info.valid && err_id)
1981 *err_id = le32_to_cpu(err_info.err_id);
1982
1983 return !!err_info.valid;
1984 }
1985
iwl_mvm_check_rt_status(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1986 static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
1987 struct ieee80211_vif *vif)
1988 {
1989 u32 err_id;
1990
1991 /* check for lmac1 error */
1992 if (iwl_mvm_rt_status(mvm->trans,
1993 mvm->trans->dbg.lmac_error_event_table[0],
1994 &err_id)) {
1995 if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
1996 struct cfg80211_wowlan_wakeup wakeup = {
1997 .rfkill_release = true,
1998 };
1999 ieee80211_report_wowlan_wakeup(vif, &wakeup,
2000 GFP_KERNEL);
2001 }
2002 return true;
2003 }
2004
2005 /* check if we have lmac2 set and check for error */
2006 if (iwl_mvm_rt_status(mvm->trans,
2007 mvm->trans->dbg.lmac_error_event_table[1], NULL))
2008 return true;
2009
2010 /* check for umac error */
2011 if (iwl_mvm_rt_status(mvm->trans,
2012 mvm->trans->dbg.umac_error_event_table, NULL))
2013 return true;
2014
2015 return false;
2016 }
2017
__iwl_mvm_resume(struct iwl_mvm * mvm,bool test)2018 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
2019 {
2020 struct ieee80211_vif *vif = NULL;
2021 int ret = 1;
2022 enum iwl_d3_status d3_status;
2023 bool keep = false;
2024 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2025 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2026 bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
2027 IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
2028
2029 mutex_lock(&mvm->mutex);
2030
2031 mvm->last_reset_or_resume_time_jiffies = jiffies;
2032
2033 /* get the BSS vif pointer again */
2034 vif = iwl_mvm_get_bss_vif(mvm);
2035 if (IS_ERR_OR_NULL(vif))
2036 goto err;
2037
2038 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
2039
2040 if (iwl_mvm_check_rt_status(mvm, vif)) {
2041 set_bit(STATUS_FW_ERROR, &mvm->trans->status);
2042 iwl_mvm_dump_nic_error_log(mvm);
2043 iwl_dbg_tlv_time_point(&mvm->fwrt,
2044 IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
2045 iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
2046 false, 0);
2047 ret = 1;
2048 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2049 goto err;
2050 }
2051
2052 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
2053 if (ret)
2054 goto err;
2055
2056 if (d3_status != IWL_D3_STATUS_ALIVE) {
2057 IWL_INFO(mvm, "Device was reset during suspend\n");
2058 goto err;
2059 }
2060
2061 if (d0i3_first) {
2062 struct iwl_host_cmd cmd = {
2063 .id = D0I3_END_CMD,
2064 .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
2065 };
2066 int len;
2067
2068 ret = iwl_mvm_send_cmd(mvm, &cmd);
2069 if (ret < 0) {
2070 IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
2071 ret);
2072 goto err;
2073 }
2074 switch (mvm->cmd_ver.d0i3_resp) {
2075 case 0:
2076 break;
2077 case 1:
2078 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
2079 if (len != sizeof(u32)) {
2080 IWL_ERR(mvm,
2081 "Error with D0I3_END_CMD response size (%d)\n",
2082 len);
2083 goto err;
2084 }
2085 if (IWL_D0I3_RESET_REQUIRE &
2086 le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) {
2087 iwl_write32(mvm->trans, CSR_RESET,
2088 CSR_RESET_REG_FLAG_FORCE_NMI);
2089 iwl_free_resp(&cmd);
2090 }
2091 break;
2092 default:
2093 WARN_ON(1);
2094 }
2095 }
2096
2097 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2098
2099 /*
2100 * Query the current location and source from the D3 firmware so we
2101 * can play it back when we re-intiailize the D0 firmware
2102 */
2103 iwl_mvm_update_changed_regdom(mvm);
2104
2105 /* Re-configure PPAG settings */
2106 iwl_mvm_ppag_send_cmd(mvm);
2107
2108 if (!unified_image)
2109 /* Re-configure default SAR profile */
2110 iwl_mvm_sar_select_profile(mvm, 1, 1);
2111
2112 if (mvm->net_detect) {
2113 /* If this is a non-unified image, we restart the FW,
2114 * so no need to stop the netdetect scan. If that
2115 * fails, continue and try to get the wake-up reasons,
2116 * but trigger a HW restart by keeping a failure code
2117 * in ret.
2118 */
2119 if (unified_image)
2120 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
2121 false);
2122
2123 iwl_mvm_query_netdetect_reasons(mvm, vif);
2124 /* has unlocked the mutex, so skip that */
2125 goto out;
2126 } else {
2127 keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
2128 #ifdef CONFIG_IWLWIFI_DEBUGFS
2129 if (keep)
2130 mvm->keep_vif = vif;
2131 #endif
2132 /* has unlocked the mutex, so skip that */
2133 goto out_iterate;
2134 }
2135
2136 err:
2137 iwl_mvm_free_nd(mvm);
2138 mutex_unlock(&mvm->mutex);
2139
2140 out_iterate:
2141 if (!test)
2142 ieee80211_iterate_active_interfaces_mtx(mvm->hw,
2143 IEEE80211_IFACE_ITER_NORMAL,
2144 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
2145
2146 out:
2147 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
2148
2149 /* no need to reset the device in unified images, if successful */
2150 if (unified_image && !ret) {
2151 /* nothing else to do if we already sent D0I3_END_CMD */
2152 if (d0i3_first)
2153 return 0;
2154
2155 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
2156 if (!ret)
2157 return 0;
2158 }
2159
2160 /*
2161 * Reconfigure the device in one of the following cases:
2162 * 1. We are not using a unified image
2163 * 2. We are using a unified image but had an error while exiting D3
2164 */
2165 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
2166
2167 return 1;
2168 }
2169
iwl_mvm_resume(struct ieee80211_hw * hw)2170 int iwl_mvm_resume(struct ieee80211_hw *hw)
2171 {
2172 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2173 int ret;
2174
2175 ret = __iwl_mvm_resume(mvm, false);
2176
2177 iwl_mvm_resume_tcm(mvm);
2178
2179 iwl_fw_runtime_resume(&mvm->fwrt);
2180
2181 return ret;
2182 }
2183
iwl_mvm_set_wakeup(struct ieee80211_hw * hw,bool enabled)2184 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
2185 {
2186 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2187
2188 device_set_wakeup_enable(mvm->trans->dev, enabled);
2189 }
2190
2191 #ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_d3_test_open(struct inode * inode,struct file * file)2192 static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
2193 {
2194 struct iwl_mvm *mvm = inode->i_private;
2195 int err;
2196
2197 if (mvm->d3_test_active)
2198 return -EBUSY;
2199
2200 file->private_data = inode->i_private;
2201
2202 iwl_mvm_pause_tcm(mvm, true);
2203
2204 iwl_fw_runtime_suspend(&mvm->fwrt);
2205
2206 /* start pseudo D3 */
2207 rtnl_lock();
2208 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
2209 rtnl_unlock();
2210 if (err > 0)
2211 err = -EINVAL;
2212 if (err)
2213 return err;
2214
2215 mvm->d3_test_active = true;
2216 mvm->keep_vif = NULL;
2217 return 0;
2218 }
2219
iwl_mvm_d3_test_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2220 static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
2221 size_t count, loff_t *ppos)
2222 {
2223 struct iwl_mvm *mvm = file->private_data;
2224 u32 pme_asserted;
2225
2226 while (true) {
2227 /* read pme_ptr if available */
2228 if (mvm->d3_test_pme_ptr) {
2229 pme_asserted = iwl_trans_read_mem32(mvm->trans,
2230 mvm->d3_test_pme_ptr);
2231 if (pme_asserted)
2232 break;
2233 }
2234
2235 if (msleep_interruptible(100))
2236 break;
2237 }
2238
2239 return 0;
2240 }
2241
iwl_mvm_d3_test_disconn_work_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)2242 static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
2243 struct ieee80211_vif *vif)
2244 {
2245 /* skip the one we keep connection on */
2246 if (_data == vif)
2247 return;
2248
2249 if (vif->type == NL80211_IFTYPE_STATION)
2250 ieee80211_connection_loss(vif);
2251 }
2252
iwl_mvm_d3_test_release(struct inode * inode,struct file * file)2253 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2254 {
2255 struct iwl_mvm *mvm = inode->i_private;
2256 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2257 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2258
2259 mvm->d3_test_active = false;
2260
2261 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
2262
2263 rtnl_lock();
2264 __iwl_mvm_resume(mvm, true);
2265 rtnl_unlock();
2266
2267 iwl_mvm_resume_tcm(mvm);
2268
2269 iwl_fw_runtime_resume(&mvm->fwrt);
2270
2271 iwl_abort_notification_waits(&mvm->notif_wait);
2272 if (!unified_image) {
2273 int remaining_time = 10;
2274
2275 ieee80211_restart_hw(mvm->hw);
2276
2277 /* wait for restart and disconnect all interfaces */
2278 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2279 remaining_time > 0) {
2280 remaining_time--;
2281 msleep(1000);
2282 }
2283
2284 if (remaining_time == 0)
2285 IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
2286 }
2287
2288 ieee80211_iterate_active_interfaces_atomic(
2289 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2290 iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
2291
2292 return 0;
2293 }
2294
2295 const struct file_operations iwl_dbgfs_d3_test_ops = {
2296 .llseek = no_llseek,
2297 .open = iwl_mvm_d3_test_open,
2298 .read = iwl_mvm_d3_test_read,
2299 .release = iwl_mvm_d3_test_release,
2300 };
2301 #endif
2302