xref: /linux/drivers/net/wireless/intel/iwlwifi/mei/main.c (revision 6d2c360b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021-2023 Intel Corporation
4  */
5 
6 #include <linux/etherdevice.h>
7 #include <linux/netdevice.h>
8 #include <linux/ieee80211.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/mei_cl_bus.h>
13 #include <linux/rcupdate.h>
14 #include <linux/debugfs.h>
15 #include <linux/skbuff.h>
16 #include <linux/wait.h>
17 #include <linux/slab.h>
18 #include <linux/mm.h>
19 
20 #include <net/cfg80211.h>
21 
22 #include "internal.h"
23 #include "iwl-mei.h"
24 #include "trace.h"
25 #include "trace-data.h"
26 #include "sap.h"
27 
28 MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface");
29 MODULE_LICENSE("GPL");
30 
31 #define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \
32 			      0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65)
33 
34 /* After CSME takes ownership, it won't release it for 60 seconds to avoid
35  * frequent ownership transitions.
36  */
37 #define MEI_OWNERSHIP_RETAKE_TIMEOUT_MS	msecs_to_jiffies(60000)
38 
39 /*
40  * Since iwlwifi calls iwlmei without any context, hold a pointer to the
41  * mei_cl_device structure here.
42  * Define a mutex that will synchronize all the flows between iwlwifi and
43  * iwlmei.
44  * Note that iwlmei can't have several instances, so it ok to have static
45  * variables here.
46  */
47 static struct mei_cl_device *iwl_mei_global_cldev;
48 static DEFINE_MUTEX(iwl_mei_mutex);
49 static unsigned long iwl_mei_status;
50 
51 enum iwl_mei_status_bits {
52 	IWL_MEI_STATUS_SAP_CONNECTED,
53 };
54 
iwl_mei_is_connected(void)55 bool iwl_mei_is_connected(void)
56 {
57 	return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
58 }
59 EXPORT_SYMBOL_GPL(iwl_mei_is_connected);
60 
61 #define SAP_VERSION	3
62 #define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */
63 
64 struct iwl_sap_q_ctrl_blk {
65 	__le32 wr_ptr;
66 	__le32 rd_ptr;
67 	__le32 size;
68 };
69 
70 enum iwl_sap_q_idx {
71 	SAP_QUEUE_IDX_NOTIF = 0,
72 	SAP_QUEUE_IDX_DATA,
73 	SAP_QUEUE_IDX_MAX,
74 };
75 
76 struct iwl_sap_dir {
77 	__le32 reserved;
78 	struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX];
79 };
80 
81 enum iwl_sap_dir_idx {
82 	SAP_DIRECTION_HOST_TO_ME = 0,
83 	SAP_DIRECTION_ME_TO_HOST,
84 	SAP_DIRECTION_MAX,
85 };
86 
87 struct iwl_sap_shared_mem_ctrl_blk {
88 	__le32 sap_id;
89 	__le32 size;
90 	struct iwl_sap_dir dir[SAP_DIRECTION_MAX];
91 };
92 
93 /*
94  * The shared area has the following layout:
95  *
96  * +-----------------------------------+
97  * |struct iwl_sap_shared_mem_ctrl_blk |
98  * +-----------------------------------+
99  * |Host -> ME data queue              |
100  * +-----------------------------------+
101  * |Host -> ME notif queue             |
102  * +-----------------------------------+
103  * |ME -> Host data queue              |
104  * +-----------------------------------+
105  * |ME -> host notif queue             |
106  * +-----------------------------------+
107  * |SAP control block id (SAP!)        |
108  * +-----------------------------------+
109  */
110 
111 #define SAP_H2M_DATA_Q_SZ	48256
112 #define SAP_M2H_DATA_Q_SZ	24128
113 #define SAP_H2M_NOTIF_Q_SZ	2240
114 #define SAP_M2H_NOTIF_Q_SZ	62720
115 
116 #define _IWL_MEI_SAP_SHARED_MEM_SZ \
117 	(sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
118 	 SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \
119 	 SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
120 
121 #define IWL_MEI_SAP_SHARED_MEM_SZ \
122 	(roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE))
123 
124 struct iwl_mei_shared_mem_ptrs {
125 	struct iwl_sap_shared_mem_ctrl_blk *ctrl;
126 	void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
127 	size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
128 };
129 
130 struct iwl_mei_filters {
131 	struct rcu_head rcu_head;
132 	struct iwl_sap_oob_filters filters;
133 };
134 
135 /**
136  * struct iwl_mei - holds the private date for iwl_mei
137  *
138  * @get_nvm_wq: the wait queue for the get_nvm flow
139  * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA
140  *	message. Used so that we can send CHECK_SHARED_AREA from atomic
141  *	contexts.
142  * @get_ownership_wq: the wait queue for the get_ownership_flow
143  * @shared_mem: the memory that is shared between CSME and the host
144  * @cldev: the pointer to the MEI client device
145  * @nvm: the data returned by the CSME for the NVM
146  * @filters: the filters sent by CSME
147  * @got_ownership: true if we own the device
148  * @amt_enabled: true if CSME has wireless enabled
149  * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI
150  *	bus, but rather need to wait until send_csa_msg_wk runs
151  * @csme_taking_ownership: true when CSME is taking ownership. Used to remember
152  *	to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
153  *	flow.
154  * @link_prot_state: true when we are in link protection PASSIVE
155  * @device_down: true if the device is down. Used to remember to send
156  *	CSME_OWNERSHIP_CONFIRMED when the driver is already down.
157  * @csa_throttle_end_wk: used when &csa_throttled is true
158  * @pldr_wq: the wait queue for PLDR flow
159  * @pldr_active: PLDR flow is in progress
160  * @data_q_lock: protects the access to the data queues which are
161  *	accessed without the mutex.
162  * @netdev_work: used to defer registering and unregistering of the netdev to
163  *	avoid taking the rtnl lock in the SAP messages handlers.
164  * @ownership_dwork: used to re-ask for NIC ownership after ownership was taken
165  *	by CSME or when a previous ownership request failed.
166  * @sap_seq_no: the sequence number for the SAP messages
167  * @seq_no: the sequence number for the SAP messages
168  * @dbgfs_dir: the debugfs dir entry
169  */
170 struct iwl_mei {
171 	wait_queue_head_t get_nvm_wq;
172 	struct work_struct send_csa_msg_wk;
173 	wait_queue_head_t get_ownership_wq;
174 	struct iwl_mei_shared_mem_ptrs shared_mem;
175 	struct mei_cl_device *cldev;
176 	struct iwl_mei_nvm *nvm;
177 	struct iwl_mei_filters __rcu *filters;
178 	bool got_ownership;
179 	bool amt_enabled;
180 	bool csa_throttled;
181 	bool csme_taking_ownership;
182 	bool link_prot_state;
183 	bool device_down;
184 	struct delayed_work csa_throttle_end_wk;
185 	wait_queue_head_t pldr_wq;
186 	bool pldr_active;
187 	spinlock_t data_q_lock;
188 	struct work_struct netdev_work;
189 	struct delayed_work ownership_dwork;
190 
191 	atomic_t sap_seq_no;
192 	atomic_t seq_no;
193 
194 	struct dentry *dbgfs_dir;
195 };
196 
197 /**
198  * struct iwl_mei_cache - cache for the parameters from iwlwifi
199  * @ops: Callbacks to iwlwifi.
200  * @netdev: The netdev that will be used to transmit / receive packets.
201  * @conn_info: The connection info message triggered by iwlwifi's association.
202  * @power_limit: pointer to an array of 10 elements (le16) represents the power
203  *	restrictions per chain.
204  * @rf_kill: rf kill state.
205  * @mcc: MCC info
206  * @mac_address: interface MAC address.
207  * @nvm_address: NVM MAC address.
208  * @priv: A pointer to iwlwifi.
209  *
210  * This used to cache the configurations coming from iwlwifi's way. The data
211  * is cached here so that we can buffer the configuration even if we don't have
212  * a bind from the mei bus and hence, on iwl_mei structure.
213  */
214 struct iwl_mei_cache {
215 	const struct iwl_mei_ops *ops;
216 	struct net_device __rcu *netdev;
217 	const struct iwl_sap_notif_connection_info *conn_info;
218 	const __le16 *power_limit;
219 	u32 rf_kill;
220 	u16 mcc;
221 	u8 mac_address[6];
222 	u8 nvm_address[6];
223 	void *priv;
224 };
225 
226 static struct iwl_mei_cache iwl_mei_cache = {
227 	.rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED
228 };
229 
iwl_mei_free_shared_mem(struct mei_cl_device * cldev)230 static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev)
231 {
232 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
233 
234 	if (mei_cldev_dma_unmap(cldev))
235 		dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n");
236 	memset(&mei->shared_mem, 0, sizeof(mei->shared_mem));
237 }
238 
239 #define HBM_DMA_BUF_ID_WLAN 1
240 
iwl_mei_alloc_shared_mem(struct mei_cl_device * cldev)241 static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
242 {
243 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
244 	struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
245 
246 	mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN,
247 				       IWL_MEI_SAP_SHARED_MEM_SZ);
248 
249 	if (IS_ERR(mem->ctrl)) {
250 		int ret = PTR_ERR(mem->ctrl);
251 
252 		mem->ctrl = NULL;
253 
254 		return ret;
255 	}
256 
257 	memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ);
258 
259 	return 0;
260 }
261 
iwl_mei_init_shared_mem(struct iwl_mei * mei)262 static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
263 {
264 	struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
265 	struct iwl_sap_dir *h2m;
266 	struct iwl_sap_dir *m2h;
267 	int dir, queue;
268 	u8 *q_head;
269 
270 	mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
271 
272 	mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl));
273 
274 	h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
275 	m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
276 
277 	h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
278 		cpu_to_le32(SAP_H2M_DATA_Q_SZ);
279 	h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
280 		cpu_to_le32(SAP_H2M_NOTIF_Q_SZ);
281 	m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
282 		cpu_to_le32(SAP_M2H_DATA_Q_SZ);
283 	m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
284 		cpu_to_le32(SAP_M2H_NOTIF_Q_SZ);
285 
286 	/* q_head points to the start of the first queue */
287 	q_head = (void *)(mem->ctrl + 1);
288 
289 	/* Initialize the queue heads */
290 	for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) {
291 		for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) {
292 			mem->q_head[dir][queue] = q_head;
293 			q_head +=
294 				le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
295 			mem->q_size[dir][queue] =
296 				le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
297 		}
298 	}
299 
300 	*(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
301 }
302 
iwl_mei_write_cyclic_buf(struct mei_cl_device * cldev,struct iwl_sap_q_ctrl_blk * notif_q,u8 * q_head,const struct iwl_sap_hdr * hdr,u32 q_sz)303 static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
304 					struct iwl_sap_q_ctrl_blk *notif_q,
305 					u8 *q_head,
306 					const struct iwl_sap_hdr *hdr,
307 					u32 q_sz)
308 {
309 	u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
310 	u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
311 	size_t room_in_buf;
312 	size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len);
313 
314 	if (rd > q_sz || wr > q_sz) {
315 		dev_err(&cldev->dev,
316 			"Pointers are past the end of the buffer\n");
317 		return -EINVAL;
318 	}
319 
320 	room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
321 
322 	/* we don't have enough room for the data to write */
323 	if (room_in_buf < tx_sz) {
324 		dev_err(&cldev->dev,
325 			"Not enough room in the buffer\n");
326 		return -ENOSPC;
327 	}
328 
329 	if (wr + tx_sz <= q_sz) {
330 		memcpy(q_head + wr, hdr, tx_sz);
331 	} else {
332 		memcpy(q_head + wr, hdr, q_sz - wr);
333 		memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
334 	}
335 
336 	WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
337 	return 0;
338 }
339 
iwl_mei_host_to_me_data_pending(const struct iwl_mei * mei)340 static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei)
341 {
342 	struct iwl_sap_q_ctrl_blk *notif_q;
343 	struct iwl_sap_dir *dir;
344 
345 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
346 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
347 
348 	if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr))
349 		return true;
350 
351 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
352 	return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr);
353 }
354 
iwl_mei_send_check_shared_area(struct mei_cl_device * cldev)355 static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev)
356 {
357 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
358 	struct iwl_sap_me_msg_start msg = {
359 		.hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA),
360 		.hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
361 	};
362 	int ret;
363 
364 	lockdep_assert_held(&iwl_mei_mutex);
365 
366 	if (mei->csa_throttled)
367 		return 0;
368 
369 	trace_iwlmei_me_msg(&msg.hdr, true);
370 	ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
371 	if (ret != sizeof(msg)) {
372 		dev_err(&cldev->dev,
373 			"failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n",
374 			ret);
375 		return ret;
376 	}
377 
378 	mei->csa_throttled = true;
379 
380 	schedule_delayed_work(&mei->csa_throttle_end_wk,
381 			      msecs_to_jiffies(100));
382 
383 	return 0;
384 }
385 
iwl_mei_csa_throttle_end_wk(struct work_struct * wk)386 static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk)
387 {
388 	struct iwl_mei *mei =
389 		container_of(wk, struct iwl_mei, csa_throttle_end_wk.work);
390 
391 	mutex_lock(&iwl_mei_mutex);
392 
393 	mei->csa_throttled = false;
394 
395 	if (iwl_mei_host_to_me_data_pending(mei))
396 		iwl_mei_send_check_shared_area(mei->cldev);
397 
398 	mutex_unlock(&iwl_mei_mutex);
399 }
400 
iwl_mei_send_sap_msg_payload(struct mei_cl_device * cldev,struct iwl_sap_hdr * hdr)401 static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev,
402 					struct iwl_sap_hdr *hdr)
403 {
404 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
405 	struct iwl_sap_q_ctrl_blk *notif_q;
406 	struct iwl_sap_dir *dir;
407 	void *q_head;
408 	u32 q_sz;
409 	int ret;
410 
411 	lockdep_assert_held(&iwl_mei_mutex);
412 
413 	if (!mei->shared_mem.ctrl) {
414 		dev_err(&cldev->dev,
415 			"No shared memory, can't send any SAP message\n");
416 		return -EINVAL;
417 	}
418 
419 	if (!iwl_mei_is_connected()) {
420 		dev_err(&cldev->dev,
421 			"Can't send a SAP message if we're not connected\n");
422 		return -ENODEV;
423 	}
424 
425 	hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
426 	dev_dbg(&cldev->dev, "Sending %d\n", hdr->type);
427 
428 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
429 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
430 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
431 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
432 	ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz);
433 
434 	if (ret < 0)
435 		return ret;
436 
437 	trace_iwlmei_sap_cmd(hdr, true);
438 
439 	return iwl_mei_send_check_shared_area(cldev);
440 }
441 
iwl_mei_add_data_to_ring(struct sk_buff * skb,bool cb_tx)442 void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
443 {
444 	struct iwl_sap_q_ctrl_blk *notif_q;
445 	struct iwl_sap_dir *dir;
446 	struct iwl_mei *mei;
447 	size_t room_in_buf;
448 	size_t tx_sz;
449 	size_t hdr_sz;
450 	u32 q_sz;
451 	u32 rd;
452 	u32 wr;
453 	u8 *q_head;
454 
455 	if (!iwl_mei_global_cldev)
456 		return;
457 
458 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
459 
460 	/*
461 	 * We access this path for Rx packets (the more common case)
462 	 * and from Tx path when we send DHCP packets, the latter is
463 	 * very unlikely.
464 	 * Take the lock already here to make sure we see that remove()
465 	 * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit.
466 	 */
467 	spin_lock_bh(&mei->data_q_lock);
468 
469 	if (!iwl_mei_is_connected()) {
470 		spin_unlock_bh(&mei->data_q_lock);
471 		return;
472 	}
473 
474 	/*
475 	 * We are in a RCU critical section and the remove from the CSME bus
476 	 * which would free this memory waits for the readers to complete (this
477 	 * is done in netdev_rx_handler_unregister).
478 	 */
479 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
480 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
481 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
482 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
483 
484 	rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
485 	wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
486 	hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) :
487 			 sizeof(struct iwl_sap_hdr);
488 	tx_sz = skb->len + hdr_sz;
489 
490 	if (rd > q_sz || wr > q_sz) {
491 		dev_err(&mei->cldev->dev,
492 			"can't write the data: pointers are past the end of the buffer\n");
493 		goto out;
494 	}
495 
496 	room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
497 
498 	/* we don't have enough room for the data to write */
499 	if (room_in_buf < tx_sz) {
500 		dev_err(&mei->cldev->dev,
501 			"Not enough room in the buffer for this data\n");
502 		goto out;
503 	}
504 
505 	if (skb_headroom(skb) < hdr_sz) {
506 		dev_err(&mei->cldev->dev,
507 			"Not enough headroom in the skb to write the SAP header\n");
508 		goto out;
509 	}
510 
511 	if (cb_tx) {
512 		struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr));
513 
514 		memset(cb_hdr, 0, sizeof(*cb_hdr));
515 		cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET);
516 		cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr));
517 		cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
518 		cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX));
519 		cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr));
520 		trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP);
521 	} else {
522 		struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr));
523 
524 		hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET);
525 		hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
526 		hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
527 		trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR);
528 	}
529 
530 	if (wr + tx_sz <= q_sz) {
531 		skb_copy_bits(skb, 0, q_head + wr, tx_sz);
532 	} else {
533 		skb_copy_bits(skb, 0, q_head + wr, q_sz - wr);
534 		skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr));
535 	}
536 
537 	WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
538 
539 out:
540 	spin_unlock_bh(&mei->data_q_lock);
541 }
542 
543 static int
iwl_mei_send_sap_msg(struct mei_cl_device * cldev,u16 type)544 iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type)
545 {
546 	struct iwl_sap_hdr msg = {
547 		.type = cpu_to_le16(type),
548 	};
549 
550 	return iwl_mei_send_sap_msg_payload(cldev, &msg);
551 }
552 
iwl_mei_send_csa_msg_wk(struct work_struct * wk)553 static void iwl_mei_send_csa_msg_wk(struct work_struct *wk)
554 {
555 	struct iwl_mei *mei =
556 		container_of(wk, struct iwl_mei, send_csa_msg_wk);
557 
558 	if (!iwl_mei_is_connected())
559 		return;
560 
561 	mutex_lock(&iwl_mei_mutex);
562 
563 	iwl_mei_send_check_shared_area(mei->cldev);
564 
565 	mutex_unlock(&iwl_mei_mutex);
566 }
567 
568 /* Called in a RCU read critical section from netif_receive_skb */
iwl_mei_rx_handler(struct sk_buff ** pskb)569 static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
570 {
571 	struct sk_buff *skb = *pskb;
572 	struct iwl_mei *mei =
573 		rcu_dereference(skb->dev->rx_handler_data);
574 	struct iwl_mei_filters *filters = rcu_dereference(mei->filters);
575 	bool rx_for_csme = false;
576 	rx_handler_result_t res;
577 
578 	/*
579 	 * remove() unregisters this handler and synchronize_net, so this
580 	 * should never happen.
581 	 */
582 	if (!iwl_mei_is_connected()) {
583 		dev_err(&mei->cldev->dev,
584 			"Got an Rx packet, but we're not connected to SAP?\n");
585 		return RX_HANDLER_PASS;
586 	}
587 
588 	if (filters)
589 		res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme);
590 	else
591 		res = RX_HANDLER_PASS;
592 
593 	/*
594 	 * The data is already on the ring of the shared area, all we
595 	 * need to do is to tell the CSME firmware to check what we have
596 	 * there.
597 	 */
598 	if (rx_for_csme)
599 		schedule_work(&mei->send_csa_msg_wk);
600 
601 	if (res != RX_HANDLER_PASS) {
602 		trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR);
603 		dev_kfree_skb(skb);
604 	}
605 
606 	return res;
607 }
608 
iwl_mei_netdev_work(struct work_struct * wk)609 static void iwl_mei_netdev_work(struct work_struct *wk)
610 {
611 	struct iwl_mei *mei =
612 		container_of(wk, struct iwl_mei, netdev_work);
613 	struct net_device *netdev;
614 
615 	/*
616 	 * First take rtnl and only then the mutex to avoid an ABBA
617 	 * with iwl_mei_set_netdev()
618 	 */
619 	rtnl_lock();
620 	mutex_lock(&iwl_mei_mutex);
621 
622 	netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
623 					   lockdep_is_held(&iwl_mei_mutex));
624 	if (netdev) {
625 		if (mei->amt_enabled)
626 			netdev_rx_handler_register(netdev, iwl_mei_rx_handler,
627 						   mei);
628 		else
629 			netdev_rx_handler_unregister(netdev);
630 	}
631 
632 	mutex_unlock(&iwl_mei_mutex);
633 	rtnl_unlock();
634 }
635 
636 static void
iwl_mei_handle_rx_start_ok(struct mei_cl_device * cldev,const struct iwl_sap_me_msg_start_ok * rsp,ssize_t len)637 iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
638 			   const struct iwl_sap_me_msg_start_ok *rsp,
639 			   ssize_t len)
640 {
641 	if (len != sizeof(*rsp)) {
642 		dev_err(&cldev->dev,
643 			"got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
644 		dev_err(&cldev->dev,
645 			"size is incorrect: %zd instead of %zu\n",
646 			len, sizeof(*rsp));
647 		return;
648 	}
649 
650 	if (rsp->supported_version != SAP_VERSION) {
651 		dev_err(&cldev->dev,
652 			"didn't get the expected version: got %d\n",
653 			rsp->supported_version);
654 		return;
655 	}
656 
657 	mutex_lock(&iwl_mei_mutex);
658 	set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
659 	/*
660 	 * We'll receive AMT_STATE SAP message in a bit and
661 	 * that will continue the flow
662 	 */
663 	mutex_unlock(&iwl_mei_mutex);
664 }
665 
iwl_mei_handle_csme_filters(struct mei_cl_device * cldev,const struct iwl_sap_csme_filters * filters)666 static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev,
667 					const struct iwl_sap_csme_filters *filters)
668 {
669 	struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
670 	struct iwl_mei_filters *new_filters;
671 	struct iwl_mei_filters *old_filters;
672 
673 	old_filters =
674 		rcu_dereference_protected(mei->filters,
675 					  lockdep_is_held(&iwl_mei_mutex));
676 
677 	new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL);
678 	if (!new_filters)
679 		return;
680 
681 	/* Copy the OOB filters */
682 	new_filters->filters = filters->filters;
683 
684 	rcu_assign_pointer(mei->filters, new_filters);
685 
686 	if (old_filters)
687 		kfree_rcu(old_filters, rcu_head);
688 }
689 
690 static void
iwl_mei_handle_conn_status(struct mei_cl_device * cldev,const struct iwl_sap_notif_conn_status * status)691 iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
692 			   const struct iwl_sap_notif_conn_status *status)
693 {
694 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
695 	struct iwl_mei_conn_info conn_info = {
696 		.lp_state = le32_to_cpu(status->link_prot_state),
697 		.ssid_len = le32_to_cpu(status->conn_info.ssid_len),
698 		.channel = status->conn_info.channel,
699 		.band = status->conn_info.band,
700 		.auth_mode = le32_to_cpu(status->conn_info.auth_mode),
701 		.pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher),
702 	};
703 
704 	if (!iwl_mei_cache.ops ||
705 	    conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid))
706 		return;
707 
708 	memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len);
709 	ether_addr_copy(conn_info.bssid, status->conn_info.bssid);
710 
711 	iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
712 
713 	mei->link_prot_state = status->link_prot_state;
714 
715 	/*
716 	 * Update the Rfkill state in case the host does not own the device:
717 	 * if we are in Link Protection, ask to not touch the device, else,
718 	 * unblock rfkill.
719 	 * If the host owns the device, inform the user space whether it can
720 	 * roam.
721 	 */
722 	if (mei->got_ownership)
723 		iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv,
724 						     status->link_prot_state);
725 	else
726 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv,
727 					  status->link_prot_state, false);
728 }
729 
iwl_mei_set_init_conf(struct iwl_mei * mei)730 static void iwl_mei_set_init_conf(struct iwl_mei *mei)
731 {
732 	struct iwl_sap_notif_host_link_up link_msg = {
733 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
734 		.hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)),
735 	};
736 	struct iwl_sap_notif_country_code mcc_msg = {
737 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
738 		.hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)),
739 		.mcc = cpu_to_le16(iwl_mei_cache.mcc),
740 	};
741 	struct iwl_sap_notif_sar_limits sar_msg = {
742 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
743 		.hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)),
744 	};
745 	struct iwl_sap_notif_host_nic_info nic_info_msg = {
746 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
747 		.hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)),
748 	};
749 	struct iwl_sap_msg_dw rfkill_msg = {
750 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
751 		.hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)),
752 		.val = cpu_to_le32(iwl_mei_cache.rf_kill),
753 	};
754 
755 	/* wifi driver has registered already */
756 	if (iwl_mei_cache.ops) {
757 		iwl_mei_send_sap_msg(mei->cldev,
758 				     SAP_MSG_NOTIF_WIFIDR_UP);
759 		iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
760 	}
761 
762 	iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
763 
764 	if (iwl_mei_cache.conn_info) {
765 		link_msg.conn_info = *iwl_mei_cache.conn_info;
766 		iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr);
767 	}
768 
769 	iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr);
770 
771 	if (iwl_mei_cache.power_limit) {
772 		memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit,
773 		       sizeof(sar_msg.sar_chain_info_table));
774 		iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr);
775 	}
776 
777 	if (is_valid_ether_addr(iwl_mei_cache.mac_address)) {
778 		ether_addr_copy(nic_info_msg.mac_address,
779 				iwl_mei_cache.mac_address);
780 		ether_addr_copy(nic_info_msg.nvm_address,
781 				iwl_mei_cache.nvm_address);
782 		iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr);
783 	}
784 
785 	iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr);
786 }
787 
iwl_mei_handle_amt_state(struct mei_cl_device * cldev,const struct iwl_sap_msg_dw * dw)788 static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
789 				     const struct iwl_sap_msg_dw *dw)
790 {
791 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
792 
793 	mutex_lock(&iwl_mei_mutex);
794 
795 	if (mei->amt_enabled == !!le32_to_cpu(dw->val))
796 		goto out;
797 
798 	mei->amt_enabled = dw->val;
799 
800 	if (mei->amt_enabled)
801 		iwl_mei_set_init_conf(mei);
802 	else if (iwl_mei_cache.ops)
803 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
804 
805 	schedule_work(&mei->netdev_work);
806 
807 out:
808 	mutex_unlock(&iwl_mei_mutex);
809 }
810 
iwl_mei_handle_nic_owner(struct mei_cl_device * cldev,const struct iwl_sap_msg_dw * dw)811 static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
812 				     const struct iwl_sap_msg_dw *dw)
813 {
814 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
815 
816 	mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME);
817 }
818 
iwl_mei_handle_can_release_ownership(struct mei_cl_device * cldev,const void * payload)819 static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev,
820 						 const void *payload)
821 {
822 	/* We can get ownership and driver is registered, go ahead */
823 	if (iwl_mei_cache.ops)
824 		iwl_mei_send_sap_msg(cldev,
825 				     SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
826 }
827 
iwl_mei_handle_csme_taking_ownership(struct mei_cl_device * cldev,const void * payload)828 static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
829 						 const void *payload)
830 {
831 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
832 
833 	dev_info(&cldev->dev, "CSME takes ownership\n");
834 
835 	mei->got_ownership = false;
836 
837 	if (iwl_mei_cache.ops && !mei->device_down) {
838 		/*
839 		 * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi
840 		 * driver is finished taking the device down.
841 		 */
842 		mei->csme_taking_ownership = true;
843 
844 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
845 	} else {
846 		iwl_mei_send_sap_msg(cldev,
847 				     SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
848 		schedule_delayed_work(&mei->ownership_dwork,
849 				      MEI_OWNERSHIP_RETAKE_TIMEOUT_MS);
850 	}
851 }
852 
iwl_mei_handle_nvm(struct mei_cl_device * cldev,const struct iwl_sap_nvm * sap_nvm)853 static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
854 			       const struct iwl_sap_nvm *sap_nvm)
855 {
856 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
857 	const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm;
858 	int i;
859 
860 	kfree(mei->nvm);
861 	mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL);
862 	if (!mei->nvm)
863 		return;
864 
865 	ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr);
866 	mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs;
867 	mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg);
868 	mei->nvm->caps = le32_to_cpu(sap_nvm->caps);
869 	mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version);
870 
871 	for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++)
872 		mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]);
873 
874 	wake_up_all(&mei->get_nvm_wq);
875 }
876 
iwl_mei_handle_rx_host_own_req(struct mei_cl_device * cldev,const struct iwl_sap_msg_dw * dw)877 static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev,
878 					   const struct iwl_sap_msg_dw *dw)
879 {
880 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
881 
882 	/*
883 	 * This means that we can't use the wifi device right now, CSME is not
884 	 * ready to let us use it.
885 	 */
886 	if (!dw->val) {
887 		dev_info(&cldev->dev, "Ownership req denied\n");
888 		return;
889 	}
890 
891 	mei->got_ownership = true;
892 	wake_up_all(&mei->get_ownership_wq);
893 
894 	iwl_mei_send_sap_msg(cldev,
895 			     SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED);
896 
897 	/* We can now start the connection, unblock rfkill */
898 	if (iwl_mei_cache.ops)
899 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
900 }
901 
iwl_mei_handle_pldr_ack(struct mei_cl_device * cldev,const struct iwl_sap_pldr_ack_data * ack)902 static void iwl_mei_handle_pldr_ack(struct mei_cl_device *cldev,
903 				    const struct iwl_sap_pldr_ack_data *ack)
904 {
905 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
906 
907 	mei->pldr_active = le32_to_cpu(ack->status) == SAP_PLDR_STATUS_SUCCESS;
908 	wake_up_all(&mei->pldr_wq);
909 }
910 
iwl_mei_handle_ping(struct mei_cl_device * cldev,const struct iwl_sap_hdr * hdr)911 static void iwl_mei_handle_ping(struct mei_cl_device *cldev,
912 				const struct iwl_sap_hdr *hdr)
913 {
914 	iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG);
915 }
916 
iwl_mei_handle_sap_msg(struct mei_cl_device * cldev,const struct iwl_sap_hdr * hdr)917 static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev,
918 				   const struct iwl_sap_hdr *hdr)
919 {
920 	u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr);
921 	u16 type = le16_to_cpu(hdr->type);
922 
923 	dev_dbg(&cldev->dev,
924 		"Got a new SAP message: type %d, len %d, seq %d\n",
925 		le16_to_cpu(hdr->type), len,
926 		le32_to_cpu(hdr->seq_num));
927 
928 #define SAP_MSG_HANDLER(_cmd, _handler, _sz)				\
929 	case SAP_MSG_NOTIF_ ## _cmd:					\
930 		if (len < _sz) {					\
931 			dev_err(&cldev->dev,				\
932 				"Bad size for %d: %u < %u\n",		\
933 				le16_to_cpu(hdr->type),			\
934 				(unsigned int)len,			\
935 				(unsigned int)_sz);			\
936 			break;						\
937 		}							\
938 		mutex_lock(&iwl_mei_mutex);				\
939 		_handler(cldev, (const void *)hdr);			\
940 		mutex_unlock(&iwl_mei_mutex);				\
941 		break
942 
943 #define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz)			\
944 	case SAP_MSG_NOTIF_ ## _cmd:					\
945 		if (len < _sz) {					\
946 			dev_err(&cldev->dev,				\
947 				"Bad size for %d: %u < %u\n",		\
948 				le16_to_cpu(hdr->type),			\
949 				(unsigned int)len,			\
950 				(unsigned int)_sz);			\
951 			break;						\
952 		}							\
953 		_handler(cldev, (const void *)hdr);			\
954 		break
955 
956 #define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz)				\
957 	case SAP_MSG_NOTIF_ ## _cmd:					\
958 		if (len < _sz) {					\
959 			dev_err(&cldev->dev,				\
960 				"Bad size for %d: %u < %u\n",		\
961 				le16_to_cpu(hdr->type),			\
962 				(unsigned int)len,			\
963 				(unsigned int)_sz);			\
964 			break;						\
965 		}							\
966 		break
967 
968 	switch (type) {
969 	SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0);
970 	SAP_MSG_HANDLER(CSME_FILTERS,
971 			iwl_mei_handle_csme_filters,
972 			sizeof(struct iwl_sap_csme_filters));
973 	SAP_MSG_HANDLER(CSME_CONN_STATUS,
974 			iwl_mei_handle_conn_status,
975 			sizeof(struct iwl_sap_notif_conn_status));
976 	SAP_MSG_HANDLER_NO_LOCK(AMT_STATE,
977 				iwl_mei_handle_amt_state,
978 				sizeof(struct iwl_sap_msg_dw));
979 	SAP_MSG_HANDLER_NO_HANDLER(PONG, 0);
980 	SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm,
981 			sizeof(struct iwl_sap_nvm));
982 	SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ,
983 			iwl_mei_handle_rx_host_own_req,
984 			sizeof(struct iwl_sap_msg_dw));
985 	SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner,
986 			sizeof(struct iwl_sap_msg_dw));
987 	SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP,
988 			iwl_mei_handle_can_release_ownership, 0);
989 	SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP,
990 			iwl_mei_handle_csme_taking_ownership, 0);
991 	SAP_MSG_HANDLER(PLDR_ACK, iwl_mei_handle_pldr_ack,
992 			sizeof(struct iwl_sap_pldr_ack_data));
993 	default:
994 	/*
995 	 * This is not really an error, there are message that we decided
996 	 * to ignore, yet, it is useful to be able to leave a note if debug
997 	 * is enabled.
998 	 */
999 	dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n",
1000 		le16_to_cpu(hdr->type), len);
1001 	}
1002 
1003 #undef SAP_MSG_HANDLER
1004 #undef SAP_MSG_HANDLER_NO_LOCK
1005 }
1006 
iwl_mei_read_from_q(const u8 * q_head,u32 q_sz,u32 * _rd,u32 wr,void * _buf,u32 len)1007 static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz,
1008 				u32 *_rd, u32 wr,
1009 				void *_buf, u32 len)
1010 {
1011 	u8 *buf = _buf;
1012 	u32 rd = *_rd;
1013 
1014 	if (rd + len <= q_sz) {
1015 		memcpy(buf, q_head + rd, len);
1016 		rd += len;
1017 	} else {
1018 		memcpy(buf, q_head + rd, q_sz - rd);
1019 		memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd));
1020 		rd = len - (q_sz - rd);
1021 	}
1022 
1023 	*_rd = rd;
1024 }
1025 
1026 #define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) +      \
1027 			     IEEE80211_TKIP_IV_LEN +                 \
1028 			     sizeof(rfc1042_header) + ETH_TLEN)
1029 
iwl_mei_handle_sap_data(struct mei_cl_device * cldev,const u8 * q_head,u32 q_sz,u32 rd,u32 wr,ssize_t valid_rx_sz,struct sk_buff_head * tx_skbs)1030 static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev,
1031 				    const u8 *q_head, u32 q_sz,
1032 				    u32 rd, u32 wr, ssize_t valid_rx_sz,
1033 				    struct sk_buff_head *tx_skbs)
1034 {
1035 	struct iwl_sap_hdr hdr;
1036 	struct net_device *netdev =
1037 		rcu_dereference_protected(iwl_mei_cache.netdev,
1038 					  lockdep_is_held(&iwl_mei_mutex));
1039 
1040 	if (!netdev)
1041 		return;
1042 
1043 	while (valid_rx_sz >= sizeof(hdr)) {
1044 		struct ethhdr *ethhdr;
1045 		unsigned char *data;
1046 		struct sk_buff *skb;
1047 		u16 len;
1048 
1049 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr));
1050 		valid_rx_sz -= sizeof(hdr);
1051 		len = le16_to_cpu(hdr.len);
1052 
1053 		if (valid_rx_sz < len) {
1054 			dev_err(&cldev->dev,
1055 				"Data queue is corrupted: valid data len %zd, len %d\n",
1056 				valid_rx_sz, len);
1057 			break;
1058 		}
1059 
1060 		if (len < sizeof(*ethhdr)) {
1061 			dev_err(&cldev->dev,
1062 				"Data len is smaller than an ethernet header? len = %d\n",
1063 				len);
1064 		}
1065 
1066 		valid_rx_sz -= len;
1067 
1068 		if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) {
1069 			dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n",
1070 				le16_to_cpu(hdr.type), len);
1071 			continue;
1072 		}
1073 
1074 		/* We need enough room for the WiFi header + SNAP + IV */
1075 		skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN);
1076 		if (!skb)
1077 			continue;
1078 
1079 		skb_reserve(skb, QOS_HDR_IV_SNAP_LEN);
1080 		ethhdr = skb_push(skb, sizeof(*ethhdr));
1081 
1082 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr,
1083 				    ethhdr, sizeof(*ethhdr));
1084 		len -= sizeof(*ethhdr);
1085 
1086 		skb_reset_mac_header(skb);
1087 		skb_reset_network_header(skb);
1088 		skb->protocol = ethhdr->h_proto;
1089 
1090 		data = skb_put(skb, len);
1091 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len);
1092 
1093 		/*
1094 		 * Enqueue the skb here so that it can be sent later when we
1095 		 * do not hold the mutex. TX'ing a packet with a mutex held is
1096 		 * possible, but it wouldn't be nice to forbid the TX path to
1097 		 * call any of iwlmei's functions, since every API from iwlmei
1098 		 * needs the mutex.
1099 		 */
1100 		__skb_queue_tail(tx_skbs, skb);
1101 	}
1102 }
1103 
iwl_mei_handle_sap_rx_cmd(struct mei_cl_device * cldev,const u8 * q_head,u32 q_sz,u32 rd,u32 wr,ssize_t valid_rx_sz)1104 static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev,
1105 				      const u8 *q_head, u32 q_sz,
1106 				      u32 rd, u32 wr, ssize_t valid_rx_sz)
1107 {
1108 	struct page *p = alloc_page(GFP_KERNEL);
1109 	struct iwl_sap_hdr *hdr;
1110 
1111 	if (!p)
1112 		return;
1113 
1114 	hdr = page_address(p);
1115 
1116 	while (valid_rx_sz >= sizeof(*hdr)) {
1117 		u16 len;
1118 
1119 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr));
1120 		valid_rx_sz -= sizeof(*hdr);
1121 		len = le16_to_cpu(hdr->len);
1122 
1123 		if (valid_rx_sz < len)
1124 			break;
1125 
1126 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len);
1127 
1128 		trace_iwlmei_sap_cmd(hdr, false);
1129 		iwl_mei_handle_sap_msg(cldev, hdr);
1130 		valid_rx_sz -= len;
1131 	}
1132 
1133 	/* valid_rx_sz must be 0 now... */
1134 	if (valid_rx_sz)
1135 		dev_err(&cldev->dev,
1136 			"More data in the buffer although we read it all\n");
1137 
1138 	__free_page(p);
1139 }
1140 
iwl_mei_handle_sap_rx(struct mei_cl_device * cldev,struct iwl_sap_q_ctrl_blk * notif_q,const u8 * q_head,struct sk_buff_head * skbs,u32 q_sz)1141 static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev,
1142 				  struct iwl_sap_q_ctrl_blk *notif_q,
1143 				  const u8 *q_head,
1144 				  struct sk_buff_head *skbs,
1145 				  u32 q_sz)
1146 {
1147 	u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
1148 	u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
1149 	ssize_t valid_rx_sz;
1150 
1151 	if (rd > q_sz || wr > q_sz) {
1152 		dev_err(&cldev->dev,
1153 			"Pointers are past the buffer limit\n");
1154 		return;
1155 	}
1156 
1157 	if (rd == wr)
1158 		return;
1159 
1160 	valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr;
1161 
1162 	if (skbs)
1163 		iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr,
1164 					valid_rx_sz, skbs);
1165 	else
1166 		iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr,
1167 					  valid_rx_sz);
1168 
1169 	/* Increment the read pointer to point to the write pointer */
1170 	WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr));
1171 }
1172 
iwl_mei_handle_check_shared_area(struct mei_cl_device * cldev)1173 static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev)
1174 {
1175 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1176 	struct iwl_sap_q_ctrl_blk *notif_q;
1177 	struct sk_buff_head tx_skbs;
1178 	struct iwl_sap_dir *dir;
1179 	void *q_head;
1180 	u32 q_sz;
1181 
1182 	if (!mei->shared_mem.ctrl)
1183 		return;
1184 
1185 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1186 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
1187 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1188 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1189 
1190 	/*
1191 	 * Do not hold the mutex here, but rather each and every message
1192 	 * handler takes it.
1193 	 * This allows message handlers to take it at a certain time.
1194 	 */
1195 	iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz);
1196 
1197 	mutex_lock(&iwl_mei_mutex);
1198 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1199 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
1200 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1201 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1202 
1203 	__skb_queue_head_init(&tx_skbs);
1204 
1205 	iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz);
1206 
1207 	if (skb_queue_empty(&tx_skbs)) {
1208 		mutex_unlock(&iwl_mei_mutex);
1209 		return;
1210 	}
1211 
1212 	/*
1213 	 * Take the RCU read lock before we unlock the mutex to make sure that
1214 	 * even if the netdev is replaced by another non-NULL netdev right after
1215 	 * we unlock the mutex, the old netdev will still be valid when we
1216 	 * transmit the frames. We can't allow to replace the netdev here because
1217 	 * the skbs hold a pointer to the netdev.
1218 	 */
1219 	rcu_read_lock();
1220 
1221 	mutex_unlock(&iwl_mei_mutex);
1222 
1223 	if (!rcu_access_pointer(iwl_mei_cache.netdev)) {
1224 		dev_err(&cldev->dev, "Can't Tx without a netdev\n");
1225 		skb_queue_purge(&tx_skbs);
1226 		goto out;
1227 	}
1228 
1229 	while (!skb_queue_empty(&tx_skbs)) {
1230 		struct sk_buff *skb = __skb_dequeue(&tx_skbs);
1231 
1232 		trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR);
1233 		dev_queue_xmit(skb);
1234 	}
1235 
1236 out:
1237 	rcu_read_unlock();
1238 }
1239 
iwl_mei_rx(struct mei_cl_device * cldev)1240 static void iwl_mei_rx(struct mei_cl_device *cldev)
1241 {
1242 	struct iwl_sap_me_msg_hdr *hdr;
1243 	u8 msg[100];
1244 	ssize_t ret;
1245 
1246 	ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg));
1247 	if (ret < 0) {
1248 		dev_err(&cldev->dev, "failed to receive data: %zd\n", ret);
1249 		return;
1250 	}
1251 
1252 	if (ret == 0) {
1253 		dev_err(&cldev->dev, "got an empty response\n");
1254 		return;
1255 	}
1256 
1257 	hdr = (void *)msg;
1258 	trace_iwlmei_me_msg(hdr, false);
1259 
1260 	switch (le32_to_cpu(hdr->type)) {
1261 	case SAP_ME_MSG_START_OK:
1262 		BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) >
1263 			     sizeof(msg));
1264 
1265 		iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret);
1266 		break;
1267 	case SAP_ME_MSG_CHECK_SHARED_AREA:
1268 		iwl_mei_handle_check_shared_area(cldev);
1269 		break;
1270 	default:
1271 		dev_err(&cldev->dev, "got a RX notification: %d\n",
1272 			le32_to_cpu(hdr->type));
1273 		break;
1274 	}
1275 }
1276 
iwl_mei_send_start(struct mei_cl_device * cldev)1277 static int iwl_mei_send_start(struct mei_cl_device *cldev)
1278 {
1279 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1280 	struct iwl_sap_me_msg_start msg = {
1281 		.hdr.type = cpu_to_le32(SAP_ME_MSG_START),
1282 		.hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
1283 		.hdr.len = cpu_to_le32(sizeof(msg)),
1284 		.supported_versions[0] = SAP_VERSION,
1285 		.init_data_seq_num = cpu_to_le16(0x100),
1286 		.init_notif_seq_num = cpu_to_le16(0x800),
1287 	};
1288 	int ret;
1289 
1290 	trace_iwlmei_me_msg(&msg.hdr, true);
1291 	ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
1292 	if (ret != sizeof(msg)) {
1293 		dev_err(&cldev->dev,
1294 			"failed to send the SAP_ME_MSG_START message %d\n",
1295 			ret);
1296 		return ret;
1297 	}
1298 
1299 	return 0;
1300 }
1301 
iwl_mei_enable(struct mei_cl_device * cldev)1302 static int iwl_mei_enable(struct mei_cl_device *cldev)
1303 {
1304 	int ret;
1305 
1306 	ret = mei_cldev_enable(cldev);
1307 	if (ret < 0) {
1308 		dev_err(&cldev->dev, "failed to enable the device: %d\n", ret);
1309 		return ret;
1310 	}
1311 
1312 	ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx);
1313 	if (ret) {
1314 		dev_err(&cldev->dev,
1315 			"failed to register to the rx cb: %d\n", ret);
1316 		mei_cldev_disable(cldev);
1317 		return ret;
1318 	}
1319 
1320 	return 0;
1321 }
1322 
iwl_mei_get_nvm(void)1323 struct iwl_mei_nvm *iwl_mei_get_nvm(void)
1324 {
1325 	struct iwl_mei_nvm *nvm = NULL;
1326 	struct iwl_mei *mei;
1327 	int ret;
1328 
1329 	mutex_lock(&iwl_mei_mutex);
1330 
1331 	if (!iwl_mei_is_connected())
1332 		goto out;
1333 
1334 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1335 
1336 	if (!mei)
1337 		goto out;
1338 
1339 	ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev,
1340 				   SAP_MSG_NOTIF_GET_NVM);
1341 	if (ret)
1342 		goto out;
1343 
1344 	mutex_unlock(&iwl_mei_mutex);
1345 
1346 	ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ);
1347 	if (!ret)
1348 		return NULL;
1349 
1350 	mutex_lock(&iwl_mei_mutex);
1351 
1352 	if (!iwl_mei_is_connected())
1353 		goto out;
1354 
1355 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1356 
1357 	if (!mei)
1358 		goto out;
1359 
1360 	if (mei->nvm)
1361 		nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL);
1362 
1363 out:
1364 	mutex_unlock(&iwl_mei_mutex);
1365 	return nvm;
1366 }
1367 EXPORT_SYMBOL_GPL(iwl_mei_get_nvm);
1368 
1369 #define IWL_MEI_PLDR_NUM_RETRIES	3
1370 
iwl_mei_pldr_req(void)1371 int iwl_mei_pldr_req(void)
1372 {
1373 	struct iwl_mei *mei;
1374 	int ret;
1375 	struct iwl_sap_pldr_data msg = {
1376 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR),
1377 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1378 	};
1379 	int i;
1380 
1381 	mutex_lock(&iwl_mei_mutex);
1382 
1383 	/* In case we didn't have a bind */
1384 	if (!iwl_mei_is_connected()) {
1385 		ret = 0;
1386 		goto out;
1387 	}
1388 
1389 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1390 
1391 	if (!mei) {
1392 		ret = -ENODEV;
1393 		goto out;
1394 	}
1395 
1396 	if (!mei->amt_enabled) {
1397 		ret = 0;
1398 		goto out;
1399 	}
1400 
1401 	for (i = 0; i < IWL_MEI_PLDR_NUM_RETRIES; i++) {
1402 		ret = iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1403 		mutex_unlock(&iwl_mei_mutex);
1404 		if (ret)
1405 			return ret;
1406 
1407 		ret = wait_event_timeout(mei->pldr_wq, mei->pldr_active, HZ / 2);
1408 		if (ret)
1409 			break;
1410 
1411 		/* Take the mutex for the next iteration */
1412 		mutex_lock(&iwl_mei_mutex);
1413 	}
1414 
1415 	if (ret)
1416 		return 0;
1417 
1418 	ret = -ETIMEDOUT;
1419 out:
1420 	mutex_unlock(&iwl_mei_mutex);
1421 	return ret;
1422 }
1423 EXPORT_SYMBOL_GPL(iwl_mei_pldr_req);
1424 
iwl_mei_get_ownership(void)1425 int iwl_mei_get_ownership(void)
1426 {
1427 	struct iwl_mei *mei;
1428 	int ret;
1429 
1430 	mutex_lock(&iwl_mei_mutex);
1431 
1432 	/* In case we didn't have a bind */
1433 	if (!iwl_mei_is_connected()) {
1434 		ret = 0;
1435 		goto out;
1436 	}
1437 
1438 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1439 
1440 	if (!mei) {
1441 		ret = -ENODEV;
1442 		goto out;
1443 	}
1444 
1445 	if (!mei->amt_enabled) {
1446 		ret = 0;
1447 		goto out;
1448 	}
1449 
1450 	if (mei->got_ownership) {
1451 		ret = 0;
1452 		goto out;
1453 	}
1454 
1455 	ret = iwl_mei_send_sap_msg(mei->cldev,
1456 				   SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
1457 	if (ret)
1458 		goto out;
1459 
1460 	mutex_unlock(&iwl_mei_mutex);
1461 
1462 	ret = wait_event_timeout(mei->get_ownership_wq,
1463 				 mei->got_ownership, HZ / 2);
1464 	if (!ret) {
1465 		schedule_delayed_work(&mei->ownership_dwork,
1466 				      MEI_OWNERSHIP_RETAKE_TIMEOUT_MS);
1467 		return -ETIMEDOUT;
1468 	}
1469 
1470 	return 0;
1471 out:
1472 	mutex_unlock(&iwl_mei_mutex);
1473 	return ret;
1474 }
1475 EXPORT_SYMBOL_GPL(iwl_mei_get_ownership);
1476 
iwl_mei_alive_notif(bool success)1477 void iwl_mei_alive_notif(bool success)
1478 {
1479 	struct iwl_mei *mei;
1480 	struct iwl_sap_pldr_end_data msg = {
1481 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR_END),
1482 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1483 		.status = success ? cpu_to_le32(SAP_PLDR_STATUS_SUCCESS) :
1484 			cpu_to_le32(SAP_PLDR_STATUS_FAILURE),
1485 	};
1486 
1487 	mutex_lock(&iwl_mei_mutex);
1488 
1489 	if (!iwl_mei_is_connected())
1490 		goto out;
1491 
1492 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1493 	if (!mei || !mei->pldr_active)
1494 		goto out;
1495 
1496 	mei->pldr_active = false;
1497 
1498 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1499 out:
1500 	mutex_unlock(&iwl_mei_mutex);
1501 }
1502 EXPORT_SYMBOL_GPL(iwl_mei_alive_notif);
1503 
iwl_mei_host_associated(const struct iwl_mei_conn_info * conn_info,const struct iwl_mei_colloc_info * colloc_info)1504 void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
1505 			     const struct iwl_mei_colloc_info *colloc_info)
1506 {
1507 	struct iwl_sap_notif_host_link_up msg = {
1508 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
1509 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1510 		.conn_info = {
1511 			.ssid_len = cpu_to_le32(conn_info->ssid_len),
1512 			.channel = conn_info->channel,
1513 			.band = conn_info->band,
1514 			.pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher),
1515 			.auth_mode = cpu_to_le32(conn_info->auth_mode),
1516 		},
1517 	};
1518 	struct iwl_mei *mei;
1519 
1520 	if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid))
1521 		return;
1522 
1523 	memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len);
1524 	memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN);
1525 
1526 	if (colloc_info) {
1527 		msg.colloc_channel = colloc_info->channel;
1528 		msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1;
1529 		memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN);
1530 	}
1531 
1532 	mutex_lock(&iwl_mei_mutex);
1533 
1534 	if (!iwl_mei_is_connected())
1535 		goto out;
1536 
1537 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1538 
1539 	if (!mei || !mei->amt_enabled)
1540 		goto out;
1541 
1542 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1543 
1544 out:
1545 	kfree(iwl_mei_cache.conn_info);
1546 	iwl_mei_cache.conn_info =
1547 		kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL);
1548 	mutex_unlock(&iwl_mei_mutex);
1549 }
1550 EXPORT_SYMBOL_GPL(iwl_mei_host_associated);
1551 
iwl_mei_host_disassociated(void)1552 void iwl_mei_host_disassociated(void)
1553 {
1554 	struct iwl_mei *mei;
1555 	struct iwl_sap_notif_host_link_down msg = {
1556 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN),
1557 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1558 		.type = HOST_LINK_DOWN_TYPE_TEMPORARY,
1559 	};
1560 
1561 	mutex_lock(&iwl_mei_mutex);
1562 
1563 	if (!iwl_mei_is_connected())
1564 		goto out;
1565 
1566 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1567 
1568 	if (!mei || !mei->amt_enabled)
1569 		goto out;
1570 
1571 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1572 
1573 out:
1574 	kfree(iwl_mei_cache.conn_info);
1575 	iwl_mei_cache.conn_info = NULL;
1576 	mutex_unlock(&iwl_mei_mutex);
1577 }
1578 EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated);
1579 
iwl_mei_set_rfkill_state(bool hw_rfkill,bool sw_rfkill)1580 void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
1581 {
1582 	struct iwl_mei *mei;
1583 	u32 rfkill_state = 0;
1584 	struct iwl_sap_msg_dw msg = {
1585 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
1586 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1587 	};
1588 
1589 	if (!sw_rfkill)
1590 		rfkill_state |= SAP_SW_RFKILL_DEASSERTED;
1591 
1592 	if (!hw_rfkill)
1593 		rfkill_state |= SAP_HW_RFKILL_DEASSERTED;
1594 
1595 	mutex_lock(&iwl_mei_mutex);
1596 
1597 	if (!iwl_mei_is_connected())
1598 		goto out;
1599 
1600 	msg.val = cpu_to_le32(rfkill_state);
1601 
1602 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1603 
1604 	if (!mei || !mei->amt_enabled)
1605 		goto out;
1606 
1607 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1608 
1609 out:
1610 	iwl_mei_cache.rf_kill = rfkill_state;
1611 	mutex_unlock(&iwl_mei_mutex);
1612 }
1613 EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state);
1614 
iwl_mei_set_nic_info(const u8 * mac_address,const u8 * nvm_address)1615 void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
1616 {
1617 	struct iwl_mei *mei;
1618 	struct iwl_sap_notif_host_nic_info msg = {
1619 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
1620 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1621 	};
1622 
1623 	mutex_lock(&iwl_mei_mutex);
1624 
1625 	if (!iwl_mei_is_connected())
1626 		goto out;
1627 
1628 	ether_addr_copy(msg.mac_address, mac_address);
1629 	ether_addr_copy(msg.nvm_address, nvm_address);
1630 
1631 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1632 
1633 	if (!mei || !mei->amt_enabled)
1634 		goto out;
1635 
1636 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1637 
1638 out:
1639 	ether_addr_copy(iwl_mei_cache.mac_address, mac_address);
1640 	ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address);
1641 	mutex_unlock(&iwl_mei_mutex);
1642 }
1643 EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info);
1644 
iwl_mei_set_country_code(u16 mcc)1645 void iwl_mei_set_country_code(u16 mcc)
1646 {
1647 	struct iwl_mei *mei;
1648 	struct iwl_sap_notif_country_code msg = {
1649 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
1650 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1651 		.mcc = cpu_to_le16(mcc),
1652 	};
1653 
1654 	mutex_lock(&iwl_mei_mutex);
1655 
1656 	if (!iwl_mei_is_connected())
1657 		goto out;
1658 
1659 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1660 
1661 	if (!mei || !mei->amt_enabled)
1662 		goto out;
1663 
1664 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1665 
1666 out:
1667 	iwl_mei_cache.mcc = mcc;
1668 	mutex_unlock(&iwl_mei_mutex);
1669 }
1670 EXPORT_SYMBOL_GPL(iwl_mei_set_country_code);
1671 
iwl_mei_set_power_limit(const __le16 * power_limit)1672 void iwl_mei_set_power_limit(const __le16 *power_limit)
1673 {
1674 	struct iwl_mei *mei;
1675 	struct iwl_sap_notif_sar_limits msg = {
1676 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
1677 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1678 	};
1679 
1680 	mutex_lock(&iwl_mei_mutex);
1681 
1682 	if (!iwl_mei_is_connected())
1683 		goto out;
1684 
1685 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1686 
1687 	if (!mei || !mei->amt_enabled)
1688 		goto out;
1689 
1690 	memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
1691 
1692 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1693 
1694 out:
1695 	kfree(iwl_mei_cache.power_limit);
1696 	iwl_mei_cache.power_limit = kmemdup(power_limit,
1697 					    sizeof(msg.sar_chain_info_table), GFP_KERNEL);
1698 	mutex_unlock(&iwl_mei_mutex);
1699 }
1700 EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit);
1701 
iwl_mei_set_netdev(struct net_device * netdev)1702 void iwl_mei_set_netdev(struct net_device *netdev)
1703 {
1704 	struct iwl_mei *mei;
1705 
1706 	mutex_lock(&iwl_mei_mutex);
1707 
1708 	if (!iwl_mei_is_connected()) {
1709 		rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1710 		goto out;
1711 	}
1712 
1713 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1714 
1715 	if (!mei)
1716 		goto out;
1717 
1718 	if (!netdev) {
1719 		struct net_device *dev =
1720 			rcu_dereference_protected(iwl_mei_cache.netdev,
1721 						  lockdep_is_held(&iwl_mei_mutex));
1722 
1723 		if (!dev)
1724 			goto out;
1725 
1726 		netdev_rx_handler_unregister(dev);
1727 	}
1728 
1729 	rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1730 
1731 	if (netdev && mei->amt_enabled)
1732 		netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
1733 
1734 out:
1735 	mutex_unlock(&iwl_mei_mutex);
1736 }
1737 EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
1738 
iwl_mei_device_state(bool up)1739 void iwl_mei_device_state(bool up)
1740 {
1741 	struct iwl_mei *mei;
1742 
1743 	mutex_lock(&iwl_mei_mutex);
1744 
1745 	if (!iwl_mei_is_connected())
1746 		goto out;
1747 
1748 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1749 
1750 	if (!mei)
1751 		goto out;
1752 
1753 	mei->device_down = !up;
1754 
1755 	if (up || !mei->csme_taking_ownership)
1756 		goto out;
1757 
1758 	iwl_mei_send_sap_msg(mei->cldev,
1759 			     SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
1760 	mei->csme_taking_ownership = false;
1761 	schedule_delayed_work(&mei->ownership_dwork,
1762 			      MEI_OWNERSHIP_RETAKE_TIMEOUT_MS);
1763 out:
1764 	mutex_unlock(&iwl_mei_mutex);
1765 }
1766 EXPORT_SYMBOL_GPL(iwl_mei_device_state);
1767 
iwl_mei_register(void * priv,const struct iwl_mei_ops * ops)1768 int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
1769 {
1770 	int ret;
1771 
1772 	/*
1773 	 * We must have a non-NULL priv pointer to not crash when there are
1774 	 * multiple WiFi devices.
1775 	 */
1776 	if (!priv)
1777 		return -EINVAL;
1778 
1779 	mutex_lock(&iwl_mei_mutex);
1780 
1781 	/* do not allow registration if someone else already registered */
1782 	if (iwl_mei_cache.priv || iwl_mei_cache.ops) {
1783 		ret = -EBUSY;
1784 		goto out;
1785 	}
1786 
1787 	iwl_mei_cache.priv = priv;
1788 	iwl_mei_cache.ops = ops;
1789 
1790 	if (iwl_mei_global_cldev) {
1791 		struct iwl_mei *mei =
1792 			mei_cldev_get_drvdata(iwl_mei_global_cldev);
1793 
1794 		/* we have already a SAP connection */
1795 		if (iwl_mei_is_connected()) {
1796 			if (mei->amt_enabled)
1797 				iwl_mei_send_sap_msg(mei->cldev,
1798 						     SAP_MSG_NOTIF_WIFIDR_UP);
1799 			ops->rfkill(priv, mei->link_prot_state, false);
1800 		}
1801 	}
1802 	ret = 0;
1803 
1804 out:
1805 	mutex_unlock(&iwl_mei_mutex);
1806 	return ret;
1807 }
1808 EXPORT_SYMBOL_GPL(iwl_mei_register);
1809 
iwl_mei_start_unregister(void)1810 void iwl_mei_start_unregister(void)
1811 {
1812 	mutex_lock(&iwl_mei_mutex);
1813 
1814 	/* At this point, the wifi driver should have removed the netdev */
1815 	if (rcu_access_pointer(iwl_mei_cache.netdev))
1816 		pr_err("Still had a netdev pointer set upon unregister\n");
1817 
1818 	kfree(iwl_mei_cache.conn_info);
1819 	iwl_mei_cache.conn_info = NULL;
1820 	kfree(iwl_mei_cache.power_limit);
1821 	iwl_mei_cache.power_limit = NULL;
1822 	iwl_mei_cache.ops = NULL;
1823 	/* leave iwl_mei_cache.priv non-NULL to prevent any new registration */
1824 
1825 	mutex_unlock(&iwl_mei_mutex);
1826 }
1827 EXPORT_SYMBOL_GPL(iwl_mei_start_unregister);
1828 
iwl_mei_unregister_complete(void)1829 void iwl_mei_unregister_complete(void)
1830 {
1831 	mutex_lock(&iwl_mei_mutex);
1832 
1833 	iwl_mei_cache.priv = NULL;
1834 
1835 	if (iwl_mei_global_cldev) {
1836 		struct iwl_mei *mei =
1837 			mei_cldev_get_drvdata(iwl_mei_global_cldev);
1838 
1839 		if (mei->amt_enabled)
1840 			iwl_mei_send_sap_msg(mei->cldev,
1841 					     SAP_MSG_NOTIF_WIFIDR_DOWN);
1842 		mei->got_ownership = false;
1843 	}
1844 
1845 	mutex_unlock(&iwl_mei_mutex);
1846 }
1847 EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete);
1848 
1849 #if IS_ENABLED(CONFIG_DEBUG_FS)
1850 
1851 static ssize_t
iwl_mei_dbgfs_send_start_message_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1852 iwl_mei_dbgfs_send_start_message_write(struct file *file,
1853 				       const char __user *user_buf,
1854 				       size_t count, loff_t *ppos)
1855 {
1856 	int ret;
1857 
1858 	mutex_lock(&iwl_mei_mutex);
1859 
1860 	if (!iwl_mei_global_cldev) {
1861 		ret = -ENODEV;
1862 		goto out;
1863 	}
1864 
1865 	ret = iwl_mei_send_start(iwl_mei_global_cldev);
1866 
1867 out:
1868 	mutex_unlock(&iwl_mei_mutex);
1869 	return ret ?: count;
1870 }
1871 
1872 static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = {
1873 	.write = iwl_mei_dbgfs_send_start_message_write,
1874 	.open = simple_open,
1875 	.llseek = default_llseek,
1876 };
1877 
iwl_mei_dbgfs_req_ownership_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1878 static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file,
1879 						 const char __user *user_buf,
1880 						 size_t count, loff_t *ppos)
1881 {
1882 	iwl_mei_get_ownership();
1883 
1884 	return count;
1885 }
1886 
1887 static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = {
1888 	.write = iwl_mei_dbgfs_req_ownership_write,
1889 	.open = simple_open,
1890 	.llseek = default_llseek,
1891 };
1892 
iwl_mei_dbgfs_register(struct iwl_mei * mei)1893 static void iwl_mei_dbgfs_register(struct iwl_mei *mei)
1894 {
1895 	mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1896 
1897 	if (!mei->dbgfs_dir)
1898 		return;
1899 
1900 	debugfs_create_ulong("status", S_IRUSR,
1901 			     mei->dbgfs_dir, &iwl_mei_status);
1902 	debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir,
1903 			    mei, &iwl_mei_dbgfs_send_start_message_ops);
1904 	debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir,
1905 			    mei, &iwl_mei_dbgfs_req_ownership_ops);
1906 }
1907 
iwl_mei_dbgfs_unregister(struct iwl_mei * mei)1908 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei)
1909 {
1910 	debugfs_remove_recursive(mei->dbgfs_dir);
1911 	mei->dbgfs_dir = NULL;
1912 }
1913 
1914 #else
1915 
iwl_mei_dbgfs_register(struct iwl_mei * mei)1916 static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {}
iwl_mei_dbgfs_unregister(struct iwl_mei * mei)1917 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
1918 
1919 #endif /* CONFIG_DEBUG_FS */
1920 
iwl_mei_ownership_dwork(struct work_struct * wk)1921 static void iwl_mei_ownership_dwork(struct work_struct *wk)
1922 {
1923 	iwl_mei_get_ownership();
1924 }
1925 
1926 #define ALLOC_SHARED_MEM_RETRY_MAX_NUM	3
1927 
1928 /*
1929  * iwl_mei_probe - the probe function called by the mei bus enumeration
1930  *
1931  * This allocates the data needed by iwlmei and sets a pointer to this data
1932  * into the mei_cl_device's drvdata.
1933  * It starts the SAP protocol by sending the SAP_ME_MSG_START without
1934  * waiting for the answer. The answer will be caught later by the Rx callback.
1935  */
iwl_mei_probe(struct mei_cl_device * cldev,const struct mei_cl_device_id * id)1936 static int iwl_mei_probe(struct mei_cl_device *cldev,
1937 			 const struct mei_cl_device_id *id)
1938 {
1939 	int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
1940 	struct iwl_mei *mei;
1941 	int ret;
1942 
1943 	mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL);
1944 	if (!mei)
1945 		return -ENOMEM;
1946 
1947 	init_waitqueue_head(&mei->get_nvm_wq);
1948 	INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk);
1949 	INIT_DELAYED_WORK(&mei->csa_throttle_end_wk,
1950 			  iwl_mei_csa_throttle_end_wk);
1951 	init_waitqueue_head(&mei->get_ownership_wq);
1952 	init_waitqueue_head(&mei->pldr_wq);
1953 	spin_lock_init(&mei->data_q_lock);
1954 	INIT_WORK(&mei->netdev_work, iwl_mei_netdev_work);
1955 	INIT_DELAYED_WORK(&mei->ownership_dwork, iwl_mei_ownership_dwork);
1956 
1957 	mei_cldev_set_drvdata(cldev, mei);
1958 	mei->cldev = cldev;
1959 	mei->device_down = true;
1960 
1961 	do {
1962 		ret = iwl_mei_alloc_shared_mem(cldev);
1963 		if (!ret)
1964 			break;
1965 		/*
1966 		 * The CSME firmware needs to boot the internal WLAN client.
1967 		 * This can take time in certain configurations (usually
1968 		 * upon resume and when the whole CSME firmware is shut down
1969 		 * during suspend).
1970 		 *
1971 		 * Wait a bit before retrying and hope we'll succeed next time.
1972 		 */
1973 
1974 		dev_dbg(&cldev->dev,
1975 			"Couldn't allocate the shared memory: %d, attempt %d / %d\n",
1976 			ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
1977 		msleep(100);
1978 		alloc_retry--;
1979 	} while (alloc_retry);
1980 
1981 	if (ret) {
1982 		dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
1983 			ret);
1984 		goto free;
1985 	}
1986 
1987 	iwl_mei_init_shared_mem(mei);
1988 
1989 	ret = iwl_mei_enable(cldev);
1990 	if (ret)
1991 		goto free_shared_mem;
1992 
1993 	iwl_mei_dbgfs_register(mei);
1994 
1995 	/*
1996 	 * We now have a Rx function in place, start the SAP protocol
1997 	 * we expect to get the SAP_ME_MSG_START_OK response later on.
1998 	 */
1999 	mutex_lock(&iwl_mei_mutex);
2000 	ret = iwl_mei_send_start(cldev);
2001 	mutex_unlock(&iwl_mei_mutex);
2002 	if (ret)
2003 		goto debugfs_unregister;
2004 
2005 	/* must be last */
2006 	iwl_mei_global_cldev = cldev;
2007 
2008 	return 0;
2009 
2010 debugfs_unregister:
2011 	iwl_mei_dbgfs_unregister(mei);
2012 	mei_cldev_disable(cldev);
2013 free_shared_mem:
2014 	iwl_mei_free_shared_mem(cldev);
2015 free:
2016 	mei_cldev_set_drvdata(cldev, NULL);
2017 	devm_kfree(&cldev->dev, mei);
2018 
2019 	return ret;
2020 }
2021 
2022 #define SEND_SAP_MAX_WAIT_ITERATION 10
2023 #define IWLMEI_DEVICE_DOWN_WAIT_ITERATION 50
2024 
iwl_mei_remove(struct mei_cl_device * cldev)2025 static void iwl_mei_remove(struct mei_cl_device *cldev)
2026 {
2027 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
2028 	int i;
2029 
2030 	/*
2031 	 * We are being removed while the bus is active, it means we are
2032 	 * going to suspend/ shutdown, so the NIC will disappear.
2033 	 */
2034 	if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops) {
2035 		unsigned int iter = IWLMEI_DEVICE_DOWN_WAIT_ITERATION;
2036 		bool down = false;
2037 
2038 		/*
2039 		 * In case of suspend, wait for the mac to stop and don't remove
2040 		 * the interface. This will allow the interface to come back
2041 		 * on resume.
2042 		 */
2043 		while (!down && iter--) {
2044 			mdelay(1);
2045 
2046 			mutex_lock(&iwl_mei_mutex);
2047 			down = mei->device_down;
2048 			mutex_unlock(&iwl_mei_mutex);
2049 		}
2050 
2051 		if (!down)
2052 			iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv);
2053 	}
2054 
2055 	if (rcu_access_pointer(iwl_mei_cache.netdev)) {
2056 		struct net_device *dev;
2057 
2058 		/*
2059 		 * First take rtnl and only then the mutex to avoid an ABBA
2060 		 * with iwl_mei_set_netdev()
2061 		 */
2062 		rtnl_lock();
2063 		mutex_lock(&iwl_mei_mutex);
2064 
2065 		/*
2066 		 * If we are suspending and the wifi driver hasn't removed it's netdev
2067 		 * yet, do it now. In any case, don't change the cache.netdev pointer.
2068 		 */
2069 		dev = rcu_dereference_protected(iwl_mei_cache.netdev,
2070 						lockdep_is_held(&iwl_mei_mutex));
2071 
2072 		netdev_rx_handler_unregister(dev);
2073 		mutex_unlock(&iwl_mei_mutex);
2074 		rtnl_unlock();
2075 	}
2076 
2077 	mutex_lock(&iwl_mei_mutex);
2078 
2079 	/* Tell CSME that we are going down so that it won't access the
2080 	 * memory anymore, make sure this message goes through immediately.
2081 	 */
2082 	mei->csa_throttled = false;
2083 	iwl_mei_send_sap_msg(mei->cldev,
2084 			     SAP_MSG_NOTIF_HOST_GOES_DOWN);
2085 
2086 	for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
2087 		if (!iwl_mei_host_to_me_data_pending(mei))
2088 			break;
2089 
2090 		msleep(20);
2091 	}
2092 
2093 	/* If we couldn't make sure that CSME saw the HOST_GOES_DOWN
2094 	 * message, it means that it will probably keep reading memory
2095 	 * that we are going to unmap and free, expect IOMMU error
2096 	 * messages.
2097 	 */
2098 	if (i == SEND_SAP_MAX_WAIT_ITERATION)
2099 		dev_err(&mei->cldev->dev,
2100 			"Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
2101 
2102 	mutex_unlock(&iwl_mei_mutex);
2103 
2104 	/*
2105 	 * This looks strange, but this lock is taken here to make sure that
2106 	 * iwl_mei_add_data_to_ring called from the Tx path sees that we
2107 	 * clear the IWL_MEI_STATUS_SAP_CONNECTED bit.
2108 	 * Rx isn't a problem because the rx_handler can't be called after
2109 	 * having been unregistered.
2110 	 */
2111 	spin_lock_bh(&mei->data_q_lock);
2112 	clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
2113 	spin_unlock_bh(&mei->data_q_lock);
2114 
2115 	if (iwl_mei_cache.ops)
2116 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
2117 
2118 	/*
2119 	 * mei_cldev_disable will return only after all the MEI Rx is done.
2120 	 * It must be called when iwl_mei_mutex is *not* held, since it waits
2121 	 * for our Rx handler to complete.
2122 	 * After it returns, no new Rx will start.
2123 	 */
2124 	mei_cldev_disable(cldev);
2125 
2126 	/*
2127 	 * Since the netdev was already removed and the netdev's removal
2128 	 * includes a call to synchronize_net() so that we know there won't be
2129 	 * any new Rx that will trigger the following workers.
2130 	 */
2131 	cancel_work_sync(&mei->send_csa_msg_wk);
2132 	cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
2133 	cancel_work_sync(&mei->netdev_work);
2134 	cancel_delayed_work_sync(&mei->ownership_dwork);
2135 
2136 	/*
2137 	 * If someone waits for the ownership, let him know that we are going
2138 	 * down and that we are not connected anymore. He'll be able to take
2139 	 * the device.
2140 	 */
2141 	wake_up_all(&mei->get_ownership_wq);
2142 	wake_up_all(&mei->pldr_wq);
2143 
2144 	mutex_lock(&iwl_mei_mutex);
2145 
2146 	iwl_mei_global_cldev = NULL;
2147 
2148 	wake_up_all(&mei->get_nvm_wq);
2149 
2150 	iwl_mei_free_shared_mem(cldev);
2151 
2152 	iwl_mei_dbgfs_unregister(mei);
2153 
2154 	mei_cldev_set_drvdata(cldev, NULL);
2155 
2156 	kfree(mei->nvm);
2157 
2158 	kfree(rcu_access_pointer(mei->filters));
2159 
2160 	devm_kfree(&cldev->dev, mei);
2161 
2162 	mutex_unlock(&iwl_mei_mutex);
2163 }
2164 
2165 static const struct mei_cl_device_id iwl_mei_tbl[] = {
2166 	{
2167 		.name = KBUILD_MODNAME,
2168 		.uuid = MEI_WLAN_UUID,
2169 		.version = MEI_CL_VERSION_ANY,
2170 	},
2171 
2172 	/* required last entry */
2173 	{ }
2174 };
2175 
2176 /*
2177  * Do not export the device table because this module is loaded by
2178  * iwlwifi's dependency.
2179  */
2180 
2181 static struct mei_cl_driver iwl_mei_cl_driver = {
2182 	.id_table = iwl_mei_tbl,
2183 	.name = KBUILD_MODNAME,
2184 	.probe = iwl_mei_probe,
2185 	.remove = iwl_mei_remove,
2186 };
2187 
2188 module_mei_cl_driver(iwl_mei_cl_driver);
2189