1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2015-2017 Intel Deutschland GmbH
4  * Copyright (C) 2018-2021 Intel Corporation
5  */
6 #include <linux/etherdevice.h>
7 #include <linux/math64.h>
8 #include <net/cfg80211.h>
9 #include "mvm.h"
10 #include "iwl-io.h"
11 #include "iwl-prph.h"
12 #include "constants.h"
13 
14 struct iwl_mvm_loc_entry {
15 	struct list_head list;
16 	u8 addr[ETH_ALEN];
17 	u8 lci_len, civic_len;
18 	u8 buf[];
19 };
20 
21 struct iwl_mvm_smooth_entry {
22 	struct list_head list;
23 	u8 addr[ETH_ALEN];
24 	s64 rtt_avg;
25 	u64 host_time;
26 };
27 
28 struct iwl_mvm_ftm_pasn_entry {
29 	struct list_head list;
30 	u8 addr[ETH_ALEN];
31 	u8 hltk[HLTK_11AZ_LEN];
32 	u8 tk[TK_11AZ_LEN];
33 	u8 cipher;
34 	u8 tx_pn[IEEE80211_CCMP_PN_LEN];
35 	u8 rx_pn[IEEE80211_CCMP_PN_LEN];
36 };
37 
38 int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
39 			     u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
40 			     u8 *hltk, u32 hltk_len)
41 {
42 	struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn),
43 						      GFP_KERNEL);
44 	u32 expected_tk_len;
45 
46 	lockdep_assert_held(&mvm->mutex);
47 
48 	if (!pasn)
49 		return -ENOBUFS;
50 
51 	pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
52 
53 	switch (pasn->cipher) {
54 	case IWL_LOCATION_CIPHER_CCMP_128:
55 	case IWL_LOCATION_CIPHER_GCMP_128:
56 		expected_tk_len = WLAN_KEY_LEN_CCMP;
57 		break;
58 	case IWL_LOCATION_CIPHER_GCMP_256:
59 		expected_tk_len = WLAN_KEY_LEN_GCMP_256;
60 		break;
61 	default:
62 		goto out;
63 	}
64 
65 	/*
66 	 * If associated to this AP and already have security context,
67 	 * the TK is already configured for this station, so it
68 	 * shouldn't be set again here.
69 	 */
70 	if (vif->bss_conf.assoc &&
71 	    !memcmp(addr, vif->bss_conf.bssid, ETH_ALEN)) {
72 		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
73 		struct ieee80211_sta *sta;
74 
75 		rcu_read_lock();
76 		sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
77 		if (!IS_ERR_OR_NULL(sta) && sta->mfp)
78 			expected_tk_len = 0;
79 		rcu_read_unlock();
80 	}
81 
82 	if (tk_len != expected_tk_len || hltk_len != sizeof(pasn->hltk)) {
83 		IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n",
84 			tk_len, hltk_len);
85 		goto out;
86 	}
87 
88 	memcpy(pasn->addr, addr, sizeof(pasn->addr));
89 	memcpy(pasn->hltk, hltk, sizeof(pasn->hltk));
90 
91 	if (tk && tk_len)
92 		memcpy(pasn->tk, tk, sizeof(pasn->tk));
93 
94 	list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list);
95 	return 0;
96 out:
97 	kfree(pasn);
98 	return -EINVAL;
99 }
100 
101 void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr)
102 {
103 	struct iwl_mvm_ftm_pasn_entry *entry, *prev;
104 
105 	lockdep_assert_held(&mvm->mutex);
106 
107 	list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list,
108 				 list) {
109 		if (memcmp(entry->addr, addr, sizeof(entry->addr)))
110 			continue;
111 
112 		list_del(&entry->list);
113 		kfree(entry);
114 		return;
115 	}
116 }
117 
118 static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
119 {
120 	struct iwl_mvm_loc_entry *e, *t;
121 
122 	mvm->ftm_initiator.req = NULL;
123 	mvm->ftm_initiator.req_wdev = NULL;
124 	memset(mvm->ftm_initiator.responses, 0,
125 	       sizeof(mvm->ftm_initiator.responses));
126 
127 	list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
128 		list_del(&e->list);
129 		kfree(e);
130 	}
131 }
132 
133 void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
134 {
135 	struct cfg80211_pmsr_result result = {
136 		.status = NL80211_PMSR_STATUS_FAILURE,
137 		.final = 1,
138 		.host_time = ktime_get_boottime_ns(),
139 		.type = NL80211_PMSR_TYPE_FTM,
140 	};
141 	int i;
142 
143 	lockdep_assert_held(&mvm->mutex);
144 
145 	if (!mvm->ftm_initiator.req)
146 		return;
147 
148 	for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) {
149 		memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr,
150 		       ETH_ALEN);
151 		result.ftm.burst_index = mvm->ftm_initiator.responses[i];
152 
153 		cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
154 				     mvm->ftm_initiator.req,
155 				     &result, GFP_KERNEL);
156 	}
157 
158 	cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
159 			       mvm->ftm_initiator.req, GFP_KERNEL);
160 	iwl_mvm_ftm_reset(mvm);
161 }
162 
163 void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm)
164 {
165 	INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp);
166 
167 	IWL_DEBUG_INFO(mvm,
168 		       "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n",
169 			IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH,
170 			IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA,
171 			IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ,
172 			IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT,
173 			IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT);
174 }
175 
176 void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm)
177 {
178 	struct iwl_mvm_smooth_entry *se, *st;
179 
180 	list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp,
181 				 list) {
182 		list_del(&se->list);
183 		kfree(se);
184 	}
185 }
186 
187 static int
188 iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)
189 {
190 	switch (s) {
191 	case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS:
192 		return 0;
193 	case IWL_TOF_RANGE_REQUEST_STATUS_BUSY:
194 		return -EBUSY;
195 	default:
196 		WARN_ON_ONCE(1);
197 		return -EIO;
198 	}
199 }
200 
201 static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
202 			       struct iwl_tof_range_req_cmd_v5 *cmd,
203 			       struct cfg80211_pmsr_request *req)
204 {
205 	int i;
206 
207 	cmd->request_id = req->cookie;
208 	cmd->num_of_ap = req->n_peers;
209 
210 	/* use maximum for "no timeout" or bigger than what we can do */
211 	if (!req->timeout || req->timeout > 255 * 100)
212 		cmd->req_timeout = 255;
213 	else
214 		cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100);
215 
216 	/*
217 	 * We treat it always as random, since if not we'll
218 	 * have filled our local address there instead.
219 	 */
220 	cmd->macaddr_random = 1;
221 	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
222 	for (i = 0; i < ETH_ALEN; i++)
223 		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
224 
225 	if (vif->bss_conf.assoc)
226 		memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
227 	else
228 		eth_broadcast_addr(cmd->range_req_bssid);
229 }
230 
231 static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
232 				   struct ieee80211_vif *vif,
233 				   struct iwl_tof_range_req_cmd_v9 *cmd,
234 				   struct cfg80211_pmsr_request *req)
235 {
236 	int i;
237 
238 	cmd->initiator_flags =
239 		cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
240 			    IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
241 	cmd->request_id = req->cookie;
242 	cmd->num_of_ap = req->n_peers;
243 
244 	/*
245 	 * Use a large value for "no timeout". Don't use the maximum value
246 	 * because of fw limitations.
247 	 */
248 	if (req->timeout)
249 		cmd->req_timeout_ms = cpu_to_le32(req->timeout);
250 	else
251 		cmd->req_timeout_ms = cpu_to_le32(0xfffff);
252 
253 	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
254 	for (i = 0; i < ETH_ALEN; i++)
255 		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
256 
257 	if (vif->bss_conf.assoc) {
258 		memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
259 
260 		/* AP's TSF is only relevant if associated */
261 		for (i = 0; i < req->n_peers; i++) {
262 			if (req->peers[i].report_ap_tsf) {
263 				struct iwl_mvm_vif *mvmvif =
264 					iwl_mvm_vif_from_mac80211(vif);
265 
266 				cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
267 				return;
268 			}
269 		}
270 	} else {
271 		eth_broadcast_addr(cmd->range_req_bssid);
272 	}
273 
274 	/* Don't report AP's TSF */
275 	cmd->tsf_mac_id = cpu_to_le32(0xff);
276 }
277 
278 static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
279 			       struct iwl_tof_range_req_cmd_v8 *cmd,
280 			       struct cfg80211_pmsr_request *req)
281 {
282 	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req);
283 }
284 
285 static int
286 iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
287 			      struct cfg80211_pmsr_request_peer *peer,
288 			      u8 *channel, u8 *bandwidth,
289 			      u8 *ctrl_ch_position)
290 {
291 	u32 freq = peer->chandef.chan->center_freq;
292 
293 	*channel = ieee80211_frequency_to_channel(freq);
294 
295 	switch (peer->chandef.width) {
296 	case NL80211_CHAN_WIDTH_20_NOHT:
297 		*bandwidth = IWL_TOF_BW_20_LEGACY;
298 		break;
299 	case NL80211_CHAN_WIDTH_20:
300 		*bandwidth = IWL_TOF_BW_20_HT;
301 		break;
302 	case NL80211_CHAN_WIDTH_40:
303 		*bandwidth = IWL_TOF_BW_40;
304 		break;
305 	case NL80211_CHAN_WIDTH_80:
306 		*bandwidth = IWL_TOF_BW_80;
307 		break;
308 	default:
309 		IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
310 			peer->chandef.width);
311 		return -EINVAL;
312 	}
313 
314 	*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
315 		iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
316 
317 	return 0;
318 }
319 
320 static int
321 iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
322 			      struct cfg80211_pmsr_request_peer *peer,
323 			      u8 *channel, u8 *format_bw,
324 			      u8 *ctrl_ch_position)
325 {
326 	u32 freq = peer->chandef.chan->center_freq;
327 	u8 cmd_ver;
328 
329 	*channel = ieee80211_frequency_to_channel(freq);
330 
331 	switch (peer->chandef.width) {
332 	case NL80211_CHAN_WIDTH_20_NOHT:
333 		*format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
334 		*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
335 		break;
336 	case NL80211_CHAN_WIDTH_20:
337 		*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
338 		*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
339 		break;
340 	case NL80211_CHAN_WIDTH_40:
341 		*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
342 		*format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
343 		break;
344 	case NL80211_CHAN_WIDTH_80:
345 		*format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
346 		*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
347 		break;
348 	case NL80211_CHAN_WIDTH_160:
349 		cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
350 						WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
351 						IWL_FW_CMD_VER_UNKNOWN);
352 
353 		if (cmd_ver >= 13) {
354 			*format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
355 			*format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
356 			break;
357 		}
358 		fallthrough;
359 	default:
360 		IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
361 			peer->chandef.width);
362 		return -EINVAL;
363 	}
364 
365 	/* non EDCA based measurement must use HE preamble */
366 	if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
367 		*format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
368 
369 	*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
370 		iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
371 
372 	return 0;
373 }
374 
375 static int
376 iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
377 			  struct cfg80211_pmsr_request_peer *peer,
378 			  struct iwl_tof_range_req_ap_entry_v2 *target)
379 {
380 	int ret;
381 
382 	ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
383 					    &target->bandwidth,
384 					    &target->ctrl_ch_position);
385 	if (ret)
386 		return ret;
387 
388 	memcpy(target->bssid, peer->addr, ETH_ALEN);
389 	target->burst_period =
390 		cpu_to_le16(peer->ftm.burst_period);
391 	target->samples_per_burst = peer->ftm.ftms_per_burst;
392 	target->num_of_bursts = peer->ftm.num_bursts_exp;
393 	target->measure_type = 0; /* regular two-sided FTM */
394 	target->retries_per_sample = peer->ftm.ftmr_retries;
395 	target->asap_mode = peer->ftm.asap;
396 	target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK;
397 
398 	if (peer->ftm.request_lci)
399 		target->location_req |= IWL_TOF_LOC_LCI;
400 	if (peer->ftm.request_civicloc)
401 		target->location_req |= IWL_TOF_LOC_CIVIC;
402 
403 	target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO;
404 
405 	return 0;
406 }
407 
408 #define FTM_PUT_FLAG(flag)	(target->initiator_ap_flags |= \
409 				 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
410 
411 static void
412 iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
413 			      struct cfg80211_pmsr_request_peer *peer,
414 			      struct iwl_tof_range_req_ap_entry_v6 *target)
415 {
416 	memcpy(target->bssid, peer->addr, ETH_ALEN);
417 	target->burst_period =
418 		cpu_to_le16(peer->ftm.burst_period);
419 	target->samples_per_burst = peer->ftm.ftms_per_burst;
420 	target->num_of_bursts = peer->ftm.num_bursts_exp;
421 	target->ftmr_max_retries = peer->ftm.ftmr_retries;
422 	target->initiator_ap_flags = cpu_to_le32(0);
423 
424 	if (peer->ftm.asap)
425 		FTM_PUT_FLAG(ASAP);
426 
427 	if (peer->ftm.request_lci)
428 		FTM_PUT_FLAG(LCI_REQUEST);
429 
430 	if (peer->ftm.request_civicloc)
431 		FTM_PUT_FLAG(CIVIC_REQUEST);
432 
433 	if (IWL_MVM_FTM_INITIATOR_DYNACK)
434 		FTM_PUT_FLAG(DYN_ACK);
435 
436 	if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
437 		FTM_PUT_FLAG(ALGO_LR);
438 	else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
439 		FTM_PUT_FLAG(ALGO_FFT);
440 
441 	if (peer->ftm.trigger_based)
442 		FTM_PUT_FLAG(TB);
443 	else if (peer->ftm.non_trigger_based)
444 		FTM_PUT_FLAG(NON_TB);
445 
446 	if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
447 	    peer->ftm.lmr_feedback)
448 		FTM_PUT_FLAG(LMR_FEEDBACK);
449 }
450 
451 static int
452 iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
453 			  struct cfg80211_pmsr_request_peer *peer,
454 			  struct iwl_tof_range_req_ap_entry_v3 *target)
455 {
456 	int ret;
457 
458 	ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
459 					    &target->bandwidth,
460 					    &target->ctrl_ch_position);
461 	if (ret)
462 		return ret;
463 
464 	/*
465 	 * Versions 3 and 4 has some common fields, so
466 	 * iwl_mvm_ftm_put_target_common() can be used for version 7 too.
467 	 */
468 	iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
469 
470 	return 0;
471 }
472 
473 static int
474 iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
475 			  struct cfg80211_pmsr_request_peer *peer,
476 			  struct iwl_tof_range_req_ap_entry_v4 *target)
477 {
478 	int ret;
479 
480 	ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
481 					    &target->format_bw,
482 					    &target->ctrl_ch_position);
483 	if (ret)
484 		return ret;
485 
486 	iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
487 
488 	return 0;
489 }
490 
491 static int
492 iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
493 		       struct cfg80211_pmsr_request_peer *peer,
494 		       struct iwl_tof_range_req_ap_entry_v6 *target)
495 {
496 	int ret;
497 
498 	ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
499 					    &target->format_bw,
500 					    &target->ctrl_ch_position);
501 	if (ret)
502 		return ret;
503 
504 	iwl_mvm_ftm_put_target_common(mvm, peer, target);
505 
506 	if (vif->bss_conf.assoc &&
507 	    !memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) {
508 		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
509 		struct ieee80211_sta *sta;
510 
511 		rcu_read_lock();
512 
513 		sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
514 		if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
515 			FTM_PUT_FLAG(PMF);
516 
517 		rcu_read_unlock();
518 
519 		target->sta_id = mvmvif->ap_sta_id;
520 	} else {
521 		target->sta_id = IWL_MVM_INVALID_STA;
522 	}
523 
524 	/*
525 	 * TODO: Beacon interval is currently unknown, so use the common value
526 	 * of 100 TUs.
527 	 */
528 	target->beacon_interval = cpu_to_le16(100);
529 	return 0;
530 }
531 
532 static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
533 {
534 	u32 status;
535 	int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
536 
537 	if (!err && status) {
538 		IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
539 			status);
540 		err = iwl_ftm_range_request_status_to_err(status);
541 	}
542 
543 	return err;
544 }
545 
546 static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
547 				struct cfg80211_pmsr_request *req)
548 {
549 	struct iwl_tof_range_req_cmd_v5 cmd_v5;
550 	struct iwl_host_cmd hcmd = {
551 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
552 		.dataflags[0] = IWL_HCMD_DFL_DUP,
553 		.data[0] = &cmd_v5,
554 		.len[0] = sizeof(cmd_v5),
555 	};
556 	u8 i;
557 	int err;
558 
559 	iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
560 
561 	for (i = 0; i < cmd_v5.num_of_ap; i++) {
562 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
563 
564 		err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
565 		if (err)
566 			return err;
567 	}
568 
569 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
570 }
571 
572 static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
573 				struct cfg80211_pmsr_request *req)
574 {
575 	struct iwl_tof_range_req_cmd_v7 cmd_v7;
576 	struct iwl_host_cmd hcmd = {
577 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
578 		.dataflags[0] = IWL_HCMD_DFL_DUP,
579 		.data[0] = &cmd_v7,
580 		.len[0] = sizeof(cmd_v7),
581 	};
582 	u8 i;
583 	int err;
584 
585 	/*
586 	 * Versions 7 and 8 has the same structure except from the responders
587 	 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
588 	 */
589 	iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req);
590 
591 	for (i = 0; i < cmd_v7.num_of_ap; i++) {
592 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
593 
594 		err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
595 		if (err)
596 			return err;
597 	}
598 
599 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
600 }
601 
602 static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
603 				struct cfg80211_pmsr_request *req)
604 {
605 	struct iwl_tof_range_req_cmd_v8 cmd;
606 	struct iwl_host_cmd hcmd = {
607 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
608 		.dataflags[0] = IWL_HCMD_DFL_DUP,
609 		.data[0] = &cmd,
610 		.len[0] = sizeof(cmd),
611 	};
612 	u8 i;
613 	int err;
614 
615 	iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req);
616 
617 	for (i = 0; i < cmd.num_of_ap; i++) {
618 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
619 
620 		err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
621 		if (err)
622 			return err;
623 	}
624 
625 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
626 }
627 
628 static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
629 				struct cfg80211_pmsr_request *req)
630 {
631 	struct iwl_tof_range_req_cmd_v9 cmd;
632 	struct iwl_host_cmd hcmd = {
633 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
634 		.dataflags[0] = IWL_HCMD_DFL_DUP,
635 		.data[0] = &cmd,
636 		.len[0] = sizeof(cmd),
637 	};
638 	u8 i;
639 	int err;
640 
641 	iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req);
642 
643 	for (i = 0; i < cmd.num_of_ap; i++) {
644 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
645 		struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i];
646 
647 		err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
648 		if (err)
649 			return err;
650 	}
651 
652 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
653 }
654 
655 static void iter(struct ieee80211_hw *hw,
656 		 struct ieee80211_vif *vif,
657 		 struct ieee80211_sta *sta,
658 		 struct ieee80211_key_conf *key,
659 		 void *data)
660 {
661 	struct iwl_tof_range_req_ap_entry_v6 *target = data;
662 
663 	if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
664 		return;
665 
666 	WARN_ON(!sta->mfp);
667 
668 	if (WARN_ON(key->keylen > sizeof(target->tk)))
669 		return;
670 
671 	memcpy(target->tk, key->key, key->keylen);
672 	target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
673 	WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID);
674 }
675 
676 static void
677 iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
678 				struct iwl_tof_range_req_ap_entry_v7 *target)
679 {
680 	struct iwl_mvm_ftm_pasn_entry *entry;
681 	u32 flags = le32_to_cpu(target->initiator_ap_flags);
682 
683 	if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
684 		       IWL_INITIATOR_AP_FLAGS_TB)))
685 		return;
686 
687 	lockdep_assert_held(&mvm->mutex);
688 
689 	list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
690 		if (memcmp(entry->addr, target->bssid, sizeof(entry->addr)))
691 			continue;
692 
693 		target->cipher = entry->cipher;
694 		memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
695 
696 		if (vif->bss_conf.assoc &&
697 		    !memcmp(vif->bss_conf.bssid, target->bssid,
698 			    sizeof(target->bssid)))
699 			ieee80211_iter_keys(mvm->hw, vif, iter, target);
700 		else
701 			memcpy(target->tk, entry->tk, sizeof(target->tk));
702 
703 		memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn));
704 		memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn));
705 
706 		target->initiator_ap_flags |=
707 			cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED);
708 		return;
709 	}
710 }
711 
712 static int
713 iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
714 			  struct cfg80211_pmsr_request_peer *peer,
715 			  struct iwl_tof_range_req_ap_entry_v7 *target)
716 {
717 	int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
718 	if (err)
719 		return err;
720 
721 	iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
722 	return err;
723 }
724 
725 static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
726 				 struct ieee80211_vif *vif,
727 				 struct cfg80211_pmsr_request *req)
728 {
729 	struct iwl_tof_range_req_cmd_v11 cmd;
730 	struct iwl_host_cmd hcmd = {
731 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
732 		.dataflags[0] = IWL_HCMD_DFL_DUP,
733 		.data[0] = &cmd,
734 		.len[0] = sizeof(cmd),
735 	};
736 	u8 i;
737 	int err;
738 
739 	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
740 
741 	for (i = 0; i < cmd.num_of_ap; i++) {
742 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
743 		struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i];
744 
745 		err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target);
746 		if (err)
747 			return err;
748 	}
749 
750 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
751 }
752 
753 static void
754 iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm,
755 			   struct iwl_tof_range_req_ap_entry_v8 *target)
756 {
757 	/* Only 2 STS are supported on Tx */
758 	u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
759 		IWL_MVM_FTM_I2R_MAX_STS;
760 
761 	target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
762 		(IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS);
763 	target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
764 		(i2r_max_sts << IWL_LOCATION_MAX_STS_POS);
765 	target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF;
766 	target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF;
767 }
768 
769 static int
770 iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
771 			  struct cfg80211_pmsr_request_peer *peer,
772 			  struct iwl_tof_range_req_ap_entry_v8 *target)
773 {
774 	u32 flags;
775 	int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target);
776 
777 	if (ret)
778 		return ret;
779 
780 	iwl_mvm_ftm_set_ndp_params(mvm, target);
781 
782 	/*
783 	 * If secure LTF is turned off, replace the flag with PMF only
784 	 */
785 	flags = le32_to_cpu(target->initiator_ap_flags);
786 	if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
787 	    !IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
788 		flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
789 		flags |= IWL_INITIATOR_AP_FLAGS_PMF;
790 		target->initiator_ap_flags = cpu_to_le32(flags);
791 	}
792 
793 	return 0;
794 }
795 
796 static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
797 				 struct ieee80211_vif *vif,
798 				 struct cfg80211_pmsr_request *req)
799 {
800 	struct iwl_tof_range_req_cmd_v12 cmd;
801 	struct iwl_host_cmd hcmd = {
802 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
803 		.dataflags[0] = IWL_HCMD_DFL_DUP,
804 		.data[0] = &cmd,
805 		.len[0] = sizeof(cmd),
806 	};
807 	u8 i;
808 	int err;
809 
810 	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
811 
812 	for (i = 0; i < cmd.num_of_ap; i++) {
813 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
814 		struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i];
815 
816 		err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target);
817 		if (err)
818 			return err;
819 	}
820 
821 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
822 }
823 
824 static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
825 				 struct ieee80211_vif *vif,
826 				 struct cfg80211_pmsr_request *req)
827 {
828 	struct iwl_tof_range_req_cmd_v13 cmd;
829 	struct iwl_host_cmd hcmd = {
830 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
831 		.dataflags[0] = IWL_HCMD_DFL_DUP,
832 		.data[0] = &cmd,
833 		.len[0] = sizeof(cmd),
834 	};
835 	u8 i;
836 	int err;
837 
838 	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
839 
840 	for (i = 0; i < cmd.num_of_ap; i++) {
841 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
842 		struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i];
843 
844 		err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target);
845 		if (err)
846 			return err;
847 
848 		if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
849 			target->bss_color = peer->ftm.bss_color;
850 
851 		if (peer->ftm.non_trigger_based) {
852 			target->min_time_between_msr =
853 				cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
854 			target->burst_period =
855 				cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
856 		} else {
857 			target->min_time_between_msr = cpu_to_le16(0);
858 		}
859 
860 		target->band =
861 			iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
862 	}
863 
864 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
865 }
866 
867 int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
868 		      struct cfg80211_pmsr_request *req)
869 {
870 	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
871 				  IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
872 	int err;
873 
874 	lockdep_assert_held(&mvm->mutex);
875 
876 	if (mvm->ftm_initiator.req)
877 		return -EBUSY;
878 
879 	if (new_api) {
880 		u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
881 						   WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
882 						   IWL_FW_CMD_VER_UNKNOWN);
883 
884 		switch (cmd_ver) {
885 		case 13:
886 			err = iwl_mvm_ftm_start_v13(mvm, vif, req);
887 			break;
888 		case 12:
889 			err = iwl_mvm_ftm_start_v12(mvm, vif, req);
890 			break;
891 		case 11:
892 			err = iwl_mvm_ftm_start_v11(mvm, vif, req);
893 			break;
894 		case 9:
895 		case 10:
896 			err = iwl_mvm_ftm_start_v9(mvm, vif, req);
897 			break;
898 		case 8:
899 			err = iwl_mvm_ftm_start_v8(mvm, vif, req);
900 			break;
901 		default:
902 			err = iwl_mvm_ftm_start_v7(mvm, vif, req);
903 			break;
904 		}
905 	} else {
906 		err = iwl_mvm_ftm_start_v5(mvm, vif, req);
907 	}
908 
909 	if (!err) {
910 		mvm->ftm_initiator.req = req;
911 		mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
912 	}
913 
914 	return err;
915 }
916 
917 void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
918 {
919 	struct iwl_tof_range_abort_cmd cmd = {
920 		.request_id = req->cookie,
921 	};
922 
923 	lockdep_assert_held(&mvm->mutex);
924 
925 	if (req != mvm->ftm_initiator.req)
926 		return;
927 
928 	iwl_mvm_ftm_reset(mvm);
929 
930 	if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
931 				 0, sizeof(cmd), &cmd))
932 		IWL_ERR(mvm, "failed to abort FTM process\n");
933 }
934 
935 static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req,
936 				 const u8 *addr)
937 {
938 	int i;
939 
940 	for (i = 0; i < req->n_peers; i++) {
941 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
942 
943 		if (ether_addr_equal_unaligned(peer->addr, addr))
944 			return i;
945 	}
946 
947 	return -ENOENT;
948 }
949 
950 static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
951 {
952 	u32 gp2_ts = le32_to_cpu(fw_gp2_ts);
953 	u32 curr_gp2, diff;
954 	u64 now_from_boot_ns;
955 
956 	iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
957 			      &now_from_boot_ns, NULL);
958 
959 	if (curr_gp2 >= gp2_ts)
960 		diff = curr_gp2 - gp2_ts;
961 	else
962 		diff = curr_gp2 + (U32_MAX - gp2_ts + 1);
963 
964 	return now_from_boot_ns - (u64)diff * 1000;
965 }
966 
967 static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm,
968 				      struct cfg80211_pmsr_result *res)
969 {
970 	struct iwl_mvm_loc_entry *entry;
971 
972 	list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) {
973 		if (!ether_addr_equal_unaligned(res->addr, entry->addr))
974 			continue;
975 
976 		if (entry->lci_len) {
977 			res->ftm.lci_len = entry->lci_len;
978 			res->ftm.lci = entry->buf;
979 		}
980 
981 		if (entry->civic_len) {
982 			res->ftm.civicloc_len = entry->civic_len;
983 			res->ftm.civicloc = entry->buf + entry->lci_len;
984 		}
985 
986 		/* we found the entry we needed */
987 		break;
988 	}
989 }
990 
991 static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
992 					u8 num_of_aps)
993 {
994 	lockdep_assert_held(&mvm->mutex);
995 
996 	if (request_id != (u8)mvm->ftm_initiator.req->cookie) {
997 		IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n",
998 			request_id, (u8)mvm->ftm_initiator.req->cookie);
999 		return -EINVAL;
1000 	}
1001 
1002 	if (num_of_aps > mvm->ftm_initiator.req->n_peers) {
1003 		IWL_ERR(mvm, "FTM range response invalid\n");
1004 		return -EINVAL;
1005 	}
1006 
1007 	return 0;
1008 }
1009 
1010 static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
1011 				      struct cfg80211_pmsr_result *res)
1012 {
1013 	struct iwl_mvm_smooth_entry *resp;
1014 	s64 rtt_avg, rtt = res->ftm.rtt_avg;
1015 	u32 undershoot, overshoot;
1016 	u8 alpha;
1017 	bool found;
1018 
1019 	if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH)
1020 		return;
1021 
1022 	WARN_ON(rtt < 0);
1023 
1024 	if (res->status != NL80211_PMSR_STATUS_SUCCESS) {
1025 		IWL_DEBUG_INFO(mvm,
1026 			       ": %pM: ignore failed measurement. Status=%u\n",
1027 			       res->addr, res->status);
1028 		return;
1029 	}
1030 
1031 	found = false;
1032 	list_for_each_entry(resp, &mvm->ftm_initiator.smooth.resp, list) {
1033 		if (!memcmp(res->addr, resp->addr, ETH_ALEN)) {
1034 			found = true;
1035 			break;
1036 		}
1037 	}
1038 
1039 	if (!found) {
1040 		resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1041 		if (!resp)
1042 			return;
1043 
1044 		memcpy(resp->addr, res->addr, ETH_ALEN);
1045 		list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp);
1046 
1047 		resp->rtt_avg = rtt;
1048 
1049 		IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n",
1050 			       resp->addr, resp->rtt_avg);
1051 		goto update_time;
1052 	}
1053 
1054 	if (res->host_time - resp->host_time >
1055 	    IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) {
1056 		resp->rtt_avg = rtt;
1057 
1058 		IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n",
1059 			       resp->addr, resp->rtt_avg);
1060 		goto update_time;
1061 	}
1062 
1063 	/* Smooth the results based on the tracked RTT average */
1064 	undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT;
1065 	overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
1066 	alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
1067 
1068 	rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100);
1069 
1070 	IWL_DEBUG_INFO(mvm,
1071 		       "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
1072 		       resp->addr, resp->rtt_avg, rtt_avg, rtt);
1073 
1074 	/*
1075 	 * update the responder's average RTT results regardless of
1076 	 * the under/over shoot logic below
1077 	 */
1078 	resp->rtt_avg = rtt_avg;
1079 
1080 	/* smooth the results */
1081 	if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) {
1082 		res->ftm.rtt_avg = rtt_avg;
1083 
1084 		IWL_DEBUG_INFO(mvm,
1085 			       "undershoot: val=%lld\n",
1086 			       (rtt_avg - rtt));
1087 	} else if (rtt_avg < rtt && (rtt - rtt_avg) >
1088 		   overshoot) {
1089 		res->ftm.rtt_avg = rtt_avg;
1090 		IWL_DEBUG_INFO(mvm,
1091 			       "overshoot: val=%lld\n",
1092 			       (rtt - rtt_avg));
1093 	}
1094 
1095 update_time:
1096 	resp->host_time = res->host_time;
1097 }
1098 
1099 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
1100 				     struct cfg80211_pmsr_result *res)
1101 {
1102 	s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
1103 
1104 	IWL_DEBUG_INFO(mvm, "entry %d\n", index);
1105 	IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
1106 	IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
1107 	IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
1108 	IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index);
1109 	IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
1110 	IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
1111 	IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread);
1112 	IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
1113 	IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
1114 	IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
1115 	IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg);
1116 }
1117 
1118 static void
1119 iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
1120 			   struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap)
1121 {
1122 	struct iwl_mvm_ftm_pasn_entry *entry;
1123 
1124 	lockdep_assert_held(&mvm->mutex);
1125 
1126 	list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
1127 		if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr)))
1128 			continue;
1129 
1130 		memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn));
1131 		memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn));
1132 		return;
1133 	}
1134 }
1135 
1136 static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm)
1137 {
1138 	if (!fw_has_api(&mvm->fw->ucode_capa,
1139 			IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ))
1140 		return 5;
1141 
1142 	/* Starting from version 8, the FW advertises the version */
1143 	if (mvm->cmd_ver.range_resp >= 8)
1144 		return mvm->cmd_ver.range_resp;
1145 	else if (fw_has_api(&mvm->fw->ucode_capa,
1146 			    IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1147 		return 7;
1148 
1149 	/* The first version of the new range request API */
1150 	return 6;
1151 }
1152 
1153 static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len)
1154 {
1155 	switch (ver) {
1156 	case 9:
1157 	case 8:
1158 		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8);
1159 	case 7:
1160 		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7);
1161 	case 6:
1162 		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6);
1163 	case 5:
1164 		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5);
1165 	default:
1166 		WARN_ONCE(1, "FTM: unsupported range response version %u", ver);
1167 		return false;
1168 	}
1169 }
1170 
1171 void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1172 {
1173 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1174 	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
1175 	struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
1176 	struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
1177 	struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
1178 	struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data;
1179 	int i;
1180 	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
1181 				  IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
1182 	u8 num_of_aps, last_in_batch;
1183 	u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm);
1184 
1185 	lockdep_assert_held(&mvm->mutex);
1186 
1187 	if (!mvm->ftm_initiator.req) {
1188 		return;
1189 	}
1190 
1191 	if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len)))
1192 		return;
1193 
1194 	if (new_api) {
1195 		if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id,
1196 						 fw_resp_v8->num_of_aps))
1197 			return;
1198 
1199 		num_of_aps = fw_resp_v8->num_of_aps;
1200 		last_in_batch = fw_resp_v8->last_report;
1201 	} else {
1202 		if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id,
1203 						 fw_resp_v5->num_of_aps))
1204 			return;
1205 
1206 		num_of_aps = fw_resp_v5->num_of_aps;
1207 		last_in_batch = fw_resp_v5->last_in_batch;
1208 	}
1209 
1210 	IWL_DEBUG_INFO(mvm, "Range response received\n");
1211 	IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %hhu\n",
1212 		       mvm->ftm_initiator.req->cookie, num_of_aps);
1213 
1214 	for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
1215 		struct cfg80211_pmsr_result result = {};
1216 		struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap;
1217 		int peer_idx;
1218 
1219 		if (new_api) {
1220 			if (notif_ver >= 8) {
1221 				fw_ap = &fw_resp_v8->ap[i];
1222 				iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap);
1223 			} else if (notif_ver == 7) {
1224 				fw_ap = (void *)&fw_resp_v7->ap[i];
1225 			} else {
1226 				fw_ap = (void *)&fw_resp_v6->ap[i];
1227 			}
1228 
1229 			result.final = fw_ap->last_burst;
1230 			result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
1231 			result.ap_tsf_valid = 1;
1232 		} else {
1233 			/* the first part is the same for old and new APIs */
1234 			fw_ap = (void *)&fw_resp_v5->ap[i];
1235 			/*
1236 			 * FIXME: the firmware needs to report this, we don't
1237 			 * even know the number of bursts the responder picked
1238 			 * (if we asked it to)
1239 			 */
1240 			result.final = 0;
1241 		}
1242 
1243 		peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req,
1244 						 fw_ap->bssid);
1245 		if (peer_idx < 0) {
1246 			IWL_WARN(mvm,
1247 				 "Unknown address (%pM, target #%d) in FTM response\n",
1248 				 fw_ap->bssid, i);
1249 			continue;
1250 		}
1251 
1252 		switch (fw_ap->measure_status) {
1253 		case IWL_TOF_ENTRY_SUCCESS:
1254 			result.status = NL80211_PMSR_STATUS_SUCCESS;
1255 			break;
1256 		case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
1257 			result.status = NL80211_PMSR_STATUS_TIMEOUT;
1258 			break;
1259 		case IWL_TOF_ENTRY_NO_RESPONSE:
1260 			result.status = NL80211_PMSR_STATUS_FAILURE;
1261 			result.ftm.failure_reason =
1262 				NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
1263 			break;
1264 		case IWL_TOF_ENTRY_REQUEST_REJECTED:
1265 			result.status = NL80211_PMSR_STATUS_FAILURE;
1266 			result.ftm.failure_reason =
1267 				NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
1268 			result.ftm.busy_retry_time = fw_ap->refusal_period;
1269 			break;
1270 		default:
1271 			result.status = NL80211_PMSR_STATUS_FAILURE;
1272 			result.ftm.failure_reason =
1273 				NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
1274 			break;
1275 		}
1276 		memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
1277 		result.host_time = iwl_mvm_ftm_get_host_time(mvm,
1278 							     fw_ap->timestamp);
1279 		result.type = NL80211_PMSR_TYPE_FTM;
1280 		result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx];
1281 		mvm->ftm_initiator.responses[peer_idx]++;
1282 		result.ftm.rssi_avg = fw_ap->rssi;
1283 		result.ftm.rssi_avg_valid = 1;
1284 		result.ftm.rssi_spread = fw_ap->rssi_spread;
1285 		result.ftm.rssi_spread_valid = 1;
1286 		result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
1287 		result.ftm.rtt_avg_valid = 1;
1288 		result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
1289 		result.ftm.rtt_variance_valid = 1;
1290 		result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
1291 		result.ftm.rtt_spread_valid = 1;
1292 
1293 		iwl_mvm_ftm_get_lci_civic(mvm, &result);
1294 
1295 		iwl_mvm_ftm_rtt_smoothing(mvm, &result);
1296 
1297 		cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
1298 				     mvm->ftm_initiator.req,
1299 				     &result, GFP_KERNEL);
1300 
1301 		if (fw_has_api(&mvm->fw->ucode_capa,
1302 			       IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1303 			IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n",
1304 				       fw_ap->rttConfidence);
1305 
1306 		iwl_mvm_debug_range_resp(mvm, i, &result);
1307 	}
1308 
1309 	if (last_in_batch) {
1310 		cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
1311 				       mvm->ftm_initiator.req,
1312 				       GFP_KERNEL);
1313 		iwl_mvm_ftm_reset(mvm);
1314 	}
1315 }
1316 
1317 void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1318 {
1319 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1320 	const struct ieee80211_mgmt *mgmt = (void *)pkt->data;
1321 	size_t len = iwl_rx_packet_payload_len(pkt);
1322 	struct iwl_mvm_loc_entry *entry;
1323 	const u8 *ies, *lci, *civic, *msr_ie;
1324 	size_t ies_len, lci_len = 0, civic_len = 0;
1325 	size_t baselen = IEEE80211_MIN_ACTION_SIZE +
1326 			 sizeof(mgmt->u.action.u.ftm);
1327 	static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI;
1328 	static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC;
1329 
1330 	if (len <= baselen)
1331 		return;
1332 
1333 	lockdep_assert_held(&mvm->mutex);
1334 
1335 	ies = mgmt->u.action.u.ftm.variable;
1336 	ies_len = len - baselen;
1337 
1338 	msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1339 					&rprt_type_lci, 1, 4);
1340 	if (msr_ie) {
1341 		lci = msr_ie + 2;
1342 		lci_len = msr_ie[1];
1343 	}
1344 
1345 	msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1346 					&rprt_type_civic, 1, 4);
1347 	if (msr_ie) {
1348 		civic = msr_ie + 2;
1349 		civic_len = msr_ie[1];
1350 	}
1351 
1352 	entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL);
1353 	if (!entry)
1354 		return;
1355 
1356 	memcpy(entry->addr, mgmt->bssid, ETH_ALEN);
1357 
1358 	entry->lci_len = lci_len;
1359 	if (lci_len)
1360 		memcpy(entry->buf, lci, lci_len);
1361 
1362 	entry->civic_len = civic_len;
1363 	if (civic_len)
1364 		memcpy(entry->buf + lci_len, civic, civic_len);
1365 
1366 	list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list);
1367 }
1368