xref: /freebsd/sys/contrib/dev/athk/ath11k/ahb.c (revision 315ee00f)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/platform_device.h>
9 #include <linux/of_device.h>
10 #include <linux/of.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/of_address.h>
13 #include <linux/iommu.h>
14 #include "ahb.h"
15 #include "debug.h"
16 #include "hif.h"
17 #include "qmi.h"
18 #include <linux/remoteproc.h>
19 #include "pcic.h"
20 #include <linux/soc/qcom/smem.h>
21 #include <linux/soc/qcom/smem_state.h>
22 
23 static const struct of_device_id ath11k_ahb_of_match[] = {
24 	/* TODO: Should we change the compatible string to something similar
25 	 * to one that ath10k uses?
26 	 */
27 	{ .compatible = "qcom,ipq8074-wifi",
28 	  .data = (void *)ATH11K_HW_IPQ8074,
29 	},
30 	{ .compatible = "qcom,ipq6018-wifi",
31 	  .data = (void *)ATH11K_HW_IPQ6018_HW10,
32 	},
33 	{ .compatible = "qcom,wcn6750-wifi",
34 	  .data = (void *)ATH11K_HW_WCN6750_HW10,
35 	},
36 	{ .compatible = "qcom,ipq5018-wifi",
37 	  .data = (void *)ATH11K_HW_IPQ5018_HW10,
38 	},
39 	{ }
40 };
41 
42 MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
43 
44 #define ATH11K_IRQ_CE0_OFFSET 4
45 
46 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
47 	"misc-pulse1",
48 	"misc-latch",
49 	"sw-exception",
50 	"watchdog",
51 	"ce0",
52 	"ce1",
53 	"ce2",
54 	"ce3",
55 	"ce4",
56 	"ce5",
57 	"ce6",
58 	"ce7",
59 	"ce8",
60 	"ce9",
61 	"ce10",
62 	"ce11",
63 	"host2wbm-desc-feed",
64 	"host2reo-re-injection",
65 	"host2reo-command",
66 	"host2rxdma-monitor-ring3",
67 	"host2rxdma-monitor-ring2",
68 	"host2rxdma-monitor-ring1",
69 	"reo2ost-exception",
70 	"wbm2host-rx-release",
71 	"reo2host-status",
72 	"reo2host-destination-ring4",
73 	"reo2host-destination-ring3",
74 	"reo2host-destination-ring2",
75 	"reo2host-destination-ring1",
76 	"rxdma2host-monitor-destination-mac3",
77 	"rxdma2host-monitor-destination-mac2",
78 	"rxdma2host-monitor-destination-mac1",
79 	"ppdu-end-interrupts-mac3",
80 	"ppdu-end-interrupts-mac2",
81 	"ppdu-end-interrupts-mac1",
82 	"rxdma2host-monitor-status-ring-mac3",
83 	"rxdma2host-monitor-status-ring-mac2",
84 	"rxdma2host-monitor-status-ring-mac1",
85 	"host2rxdma-host-buf-ring-mac3",
86 	"host2rxdma-host-buf-ring-mac2",
87 	"host2rxdma-host-buf-ring-mac1",
88 	"rxdma2host-destination-ring-mac3",
89 	"rxdma2host-destination-ring-mac2",
90 	"rxdma2host-destination-ring-mac1",
91 	"host2tcl-input-ring4",
92 	"host2tcl-input-ring3",
93 	"host2tcl-input-ring2",
94 	"host2tcl-input-ring1",
95 	"wbm2host-tx-completions-ring3",
96 	"wbm2host-tx-completions-ring2",
97 	"wbm2host-tx-completions-ring1",
98 	"tcl2host-status-ring",
99 };
100 
101 /* enum ext_irq_num - irq numbers that can be used by external modules
102  * like datapath
103  */
104 enum ext_irq_num {
105 	host2wbm_desc_feed = 16,
106 	host2reo_re_injection,
107 	host2reo_command,
108 	host2rxdma_monitor_ring3,
109 	host2rxdma_monitor_ring2,
110 	host2rxdma_monitor_ring1,
111 	reo2host_exception,
112 	wbm2host_rx_release,
113 	reo2host_status,
114 	reo2host_destination_ring4,
115 	reo2host_destination_ring3,
116 	reo2host_destination_ring2,
117 	reo2host_destination_ring1,
118 	rxdma2host_monitor_destination_mac3,
119 	rxdma2host_monitor_destination_mac2,
120 	rxdma2host_monitor_destination_mac1,
121 	ppdu_end_interrupts_mac3,
122 	ppdu_end_interrupts_mac2,
123 	ppdu_end_interrupts_mac1,
124 	rxdma2host_monitor_status_ring_mac3,
125 	rxdma2host_monitor_status_ring_mac2,
126 	rxdma2host_monitor_status_ring_mac1,
127 	host2rxdma_host_buf_ring_mac3,
128 	host2rxdma_host_buf_ring_mac2,
129 	host2rxdma_host_buf_ring_mac1,
130 	rxdma2host_destination_ring_mac3,
131 	rxdma2host_destination_ring_mac2,
132 	rxdma2host_destination_ring_mac1,
133 	host2tcl_input_ring4,
134 	host2tcl_input_ring3,
135 	host2tcl_input_ring2,
136 	host2tcl_input_ring1,
137 	wbm2host_tx_completions_ring3,
138 	wbm2host_tx_completions_ring2,
139 	wbm2host_tx_completions_ring1,
140 	tcl2host_status_ring,
141 };
142 
143 static int
144 ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
145 {
146 	return ab->pci.msi.irqs[vector];
147 }
148 
149 static inline u32
150 ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
151 {
152 	u32 window_start = 0;
153 
154 	/* If offset lies within DP register range, use 1st window */
155 	if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
156 		window_start = ATH11K_PCI_WINDOW_START;
157 	/* If offset lies within CE register range, use 2nd window */
158 	else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
159 		 ATH11K_PCI_WINDOW_RANGE_MASK)
160 		window_start = 2 * ATH11K_PCI_WINDOW_START;
161 
162 	return window_start;
163 }
164 
165 static void
166 ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
167 {
168 	u32 window_start;
169 
170 	/* WCN6750 uses static window based register access*/
171 	window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
172 
173 	iowrite32(value, ab->mem + window_start +
174 		  (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
175 }
176 
177 static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
178 {
179 	u32 window_start;
180 	u32 val;
181 
182 	/* WCN6750 uses static window based register access */
183 	window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
184 
185 	val = ioread32(ab->mem + window_start +
186 		       (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
187 	return val;
188 }
189 
190 static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
191 	.wakeup = NULL,
192 	.release = NULL,
193 	.get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
194 	.window_write32 = ath11k_ahb_window_write32_wcn6750,
195 	.window_read32 = ath11k_ahb_window_read32_wcn6750,
196 };
197 
198 static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
199 {
200 	return ioread32(ab->mem + offset);
201 }
202 
203 static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
204 {
205 	iowrite32(value, ab->mem + offset);
206 }
207 
208 static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
209 {
210 	int i;
211 
212 	for (i = 0; i < ab->hw_params.ce_count; i++) {
213 		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
214 
215 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
216 			continue;
217 
218 		tasklet_kill(&ce_pipe->intr_tq);
219 	}
220 }
221 
222 static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
223 {
224 	int i;
225 
226 	for (i = 0; i < irq_grp->num_irq; i++)
227 		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
228 }
229 
230 static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
231 {
232 	int i;
233 
234 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
235 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
236 
237 		ath11k_ahb_ext_grp_disable(irq_grp);
238 
239 		if (irq_grp->napi_enabled) {
240 			napi_synchronize(&irq_grp->napi);
241 			napi_disable(&irq_grp->napi);
242 			irq_grp->napi_enabled = false;
243 		}
244 	}
245 }
246 
247 static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
248 {
249 	int i;
250 
251 	for (i = 0; i < irq_grp->num_irq; i++)
252 		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
253 }
254 
255 static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
256 {
257 	u32 val;
258 
259 	val = ath11k_ahb_read32(ab, offset);
260 	ath11k_ahb_write32(ab, offset, val | BIT(bit));
261 }
262 
263 static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
264 {
265 	u32 val;
266 
267 	val = ath11k_ahb_read32(ab, offset);
268 	ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
269 }
270 
271 static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
272 {
273 	const struct ce_attr *ce_attr;
274 	const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
275 	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
276 
277 	ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
278 	ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
279 	ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
280 
281 	ce_attr = &ab->hw_params.host_ce_config[ce_id];
282 	if (ce_attr->src_nentries)
283 		ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
284 
285 	if (ce_attr->dest_nentries) {
286 		ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
287 		ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
288 				    ie3_reg_addr);
289 	}
290 }
291 
292 static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
293 {
294 	const struct ce_attr *ce_attr;
295 	const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
296 	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
297 
298 	ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
299 	ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
300 	ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
301 
302 	ce_attr = &ab->hw_params.host_ce_config[ce_id];
303 	if (ce_attr->src_nentries)
304 		ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
305 
306 	if (ce_attr->dest_nentries) {
307 		ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
308 		ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
309 				      ie3_reg_addr);
310 	}
311 }
312 
313 static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
314 {
315 	int i;
316 	int irq_idx;
317 
318 	for (i = 0; i < ab->hw_params.ce_count; i++) {
319 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
320 			continue;
321 
322 		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
323 		synchronize_irq(ab->irq_num[irq_idx]);
324 	}
325 }
326 
327 static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
328 {
329 	int i, j;
330 	int irq_idx;
331 
332 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
333 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
334 
335 		for (j = 0; j < irq_grp->num_irq; j++) {
336 			irq_idx = irq_grp->irqs[j];
337 			synchronize_irq(ab->irq_num[irq_idx]);
338 		}
339 	}
340 }
341 
342 static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
343 {
344 	int i;
345 
346 	for (i = 0; i < ab->hw_params.ce_count; i++) {
347 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
348 			continue;
349 		ath11k_ahb_ce_irq_enable(ab, i);
350 	}
351 }
352 
353 static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
354 {
355 	int i;
356 
357 	for (i = 0; i < ab->hw_params.ce_count; i++) {
358 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
359 			continue;
360 		ath11k_ahb_ce_irq_disable(ab, i);
361 	}
362 }
363 
364 static int ath11k_ahb_start(struct ath11k_base *ab)
365 {
366 	ath11k_ahb_ce_irqs_enable(ab);
367 	ath11k_ce_rx_post_buf(ab);
368 
369 	return 0;
370 }
371 
372 static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
373 {
374 	int i;
375 
376 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
377 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
378 
379 		if (!irq_grp->napi_enabled) {
380 			napi_enable(&irq_grp->napi);
381 			irq_grp->napi_enabled = true;
382 		}
383 		ath11k_ahb_ext_grp_enable(irq_grp);
384 	}
385 }
386 
387 static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
388 {
389 	__ath11k_ahb_ext_irq_disable(ab);
390 	ath11k_ahb_sync_ext_irqs(ab);
391 }
392 
393 static void ath11k_ahb_stop(struct ath11k_base *ab)
394 {
395 	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
396 		ath11k_ahb_ce_irqs_disable(ab);
397 	ath11k_ahb_sync_ce_irqs(ab);
398 	ath11k_ahb_kill_tasklets(ab);
399 	del_timer_sync(&ab->rx_replenish_retry);
400 	ath11k_ce_cleanup_pipes(ab);
401 }
402 
403 static int ath11k_ahb_power_up(struct ath11k_base *ab)
404 {
405 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
406 	int ret;
407 
408 	ret = rproc_boot(ab_ahb->tgt_rproc);
409 	if (ret)
410 		ath11k_err(ab, "failed to boot the remote processor Q6\n");
411 
412 	return ret;
413 }
414 
415 static void ath11k_ahb_power_down(struct ath11k_base *ab)
416 {
417 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
418 
419 	rproc_shutdown(ab_ahb->tgt_rproc);
420 }
421 
422 static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
423 {
424 	struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
425 
426 	cfg->tgt_ce_len = ab->hw_params.target_ce_count;
427 	cfg->tgt_ce = ab->hw_params.target_ce_config;
428 	cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
429 	cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
430 	ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
431 }
432 
433 static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
434 {
435 	int i, j;
436 
437 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
438 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
439 
440 		for (j = 0; j < irq_grp->num_irq; j++)
441 			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
442 
443 		netif_napi_del(&irq_grp->napi);
444 	}
445 }
446 
447 static void ath11k_ahb_free_irq(struct ath11k_base *ab)
448 {
449 	int irq_idx;
450 	int i;
451 
452 	if (ab->hw_params.hybrid_bus_type)
453 		return ath11k_pcic_free_irq(ab);
454 
455 	for (i = 0; i < ab->hw_params.ce_count; i++) {
456 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
457 			continue;
458 		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
459 		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
460 	}
461 
462 	ath11k_ahb_free_ext_irq(ab);
463 }
464 
465 static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
466 {
467 	struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
468 
469 	ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
470 
471 	ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
472 }
473 
474 static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
475 {
476 	struct ath11k_ce_pipe *ce_pipe = arg;
477 
478 	/* last interrupt received for this CE */
479 	ce_pipe->timestamp = jiffies;
480 
481 	ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
482 
483 	tasklet_schedule(&ce_pipe->intr_tq);
484 
485 	return IRQ_HANDLED;
486 }
487 
488 static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
489 {
490 	struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
491 						struct ath11k_ext_irq_grp,
492 						napi);
493 	struct ath11k_base *ab = irq_grp->ab;
494 	int work_done;
495 
496 	work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
497 	if (work_done < budget) {
498 		napi_complete_done(napi, work_done);
499 		ath11k_ahb_ext_grp_enable(irq_grp);
500 	}
501 
502 	if (work_done > budget)
503 		work_done = budget;
504 
505 	return work_done;
506 }
507 
508 static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
509 {
510 	struct ath11k_ext_irq_grp *irq_grp = arg;
511 
512 	/* last interrupt received for this group */
513 	irq_grp->timestamp = jiffies;
514 
515 	ath11k_ahb_ext_grp_disable(irq_grp);
516 
517 	napi_schedule(&irq_grp->napi);
518 
519 	return IRQ_HANDLED;
520 }
521 
522 static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
523 {
524 	struct ath11k_hw_params *hw = &ab->hw_params;
525 	int i, j;
526 	int irq;
527 	int ret;
528 
529 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
530 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
531 		u32 num_irq = 0;
532 
533 		irq_grp->ab = ab;
534 		irq_grp->grp_id = i;
535 		init_dummy_netdev(&irq_grp->napi_ndev);
536 		netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
537 			       ath11k_ahb_ext_grp_napi_poll);
538 
539 		for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
540 			if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
541 				irq_grp->irqs[num_irq++] =
542 					wbm2host_tx_completions_ring1 - j;
543 			}
544 
545 			if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
546 				irq_grp->irqs[num_irq++] =
547 					reo2host_destination_ring1 - j;
548 			}
549 
550 			if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
551 				irq_grp->irqs[num_irq++] = reo2host_exception;
552 
553 			if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
554 				irq_grp->irqs[num_irq++] = wbm2host_rx_release;
555 
556 			if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
557 				irq_grp->irqs[num_irq++] = reo2host_status;
558 
559 			if (j < ab->hw_params.max_radios) {
560 				if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
561 					irq_grp->irqs[num_irq++] =
562 						rxdma2host_destination_ring_mac1 -
563 						ath11k_hw_get_mac_from_pdev_id(hw, j);
564 				}
565 
566 				if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
567 					irq_grp->irqs[num_irq++] =
568 						host2rxdma_host_buf_ring_mac1 -
569 						ath11k_hw_get_mac_from_pdev_id(hw, j);
570 				}
571 
572 				if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
573 					irq_grp->irqs[num_irq++] =
574 						ppdu_end_interrupts_mac1 -
575 						ath11k_hw_get_mac_from_pdev_id(hw, j);
576 					irq_grp->irqs[num_irq++] =
577 						rxdma2host_monitor_status_ring_mac1 -
578 						ath11k_hw_get_mac_from_pdev_id(hw, j);
579 				}
580 			}
581 		}
582 		irq_grp->num_irq = num_irq;
583 
584 		for (j = 0; j < irq_grp->num_irq; j++) {
585 			int irq_idx = irq_grp->irqs[j];
586 
587 			irq = platform_get_irq_byname(ab->pdev,
588 						      irq_name[irq_idx]);
589 			ab->irq_num[irq_idx] = irq;
590 			irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
591 			ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
592 					  IRQF_TRIGGER_RISING,
593 					  irq_name[irq_idx], irq_grp);
594 			if (ret) {
595 				ath11k_err(ab, "failed request_irq for %d\n",
596 					   irq);
597 			}
598 		}
599 	}
600 
601 	return 0;
602 }
603 
604 static int ath11k_ahb_config_irq(struct ath11k_base *ab)
605 {
606 	int irq, irq_idx, i;
607 	int ret;
608 
609 	if (ab->hw_params.hybrid_bus_type)
610 		return ath11k_pcic_config_irq(ab);
611 
612 	/* Configure CE irqs */
613 	for (i = 0; i < ab->hw_params.ce_count; i++) {
614 		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
615 
616 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
617 			continue;
618 
619 		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
620 
621 		tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
622 		irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
623 		ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
624 				  IRQF_TRIGGER_RISING, irq_name[irq_idx],
625 				  ce_pipe);
626 		if (ret)
627 			return ret;
628 
629 		ab->irq_num[irq_idx] = irq;
630 	}
631 
632 	/* Configure external interrupts */
633 	ret = ath11k_ahb_config_ext_irq(ab);
634 
635 	return ret;
636 }
637 
638 static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
639 					  u8 *ul_pipe, u8 *dl_pipe)
640 {
641 	const struct service_to_pipe *entry;
642 	bool ul_set = false, dl_set = false;
643 	int i;
644 
645 	for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
646 		entry = &ab->hw_params.svc_to_ce_map[i];
647 
648 		if (__le32_to_cpu(entry->service_id) != service_id)
649 			continue;
650 
651 		switch (__le32_to_cpu(entry->pipedir)) {
652 		case PIPEDIR_NONE:
653 			break;
654 		case PIPEDIR_IN:
655 			WARN_ON(dl_set);
656 			*dl_pipe = __le32_to_cpu(entry->pipenum);
657 			dl_set = true;
658 			break;
659 		case PIPEDIR_OUT:
660 			WARN_ON(ul_set);
661 			*ul_pipe = __le32_to_cpu(entry->pipenum);
662 			ul_set = true;
663 			break;
664 		case PIPEDIR_INOUT:
665 			WARN_ON(dl_set);
666 			WARN_ON(ul_set);
667 			*dl_pipe = __le32_to_cpu(entry->pipenum);
668 			*ul_pipe = __le32_to_cpu(entry->pipenum);
669 			dl_set = true;
670 			ul_set = true;
671 			break;
672 		}
673 	}
674 
675 	if (WARN_ON(!ul_set || !dl_set))
676 		return -ENOENT;
677 
678 	return 0;
679 }
680 
681 static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
682 {
683 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
684 	u32 wake_irq;
685 	u32 value = 0;
686 	int ret;
687 
688 	if (!device_may_wakeup(ab->dev))
689 		return -EPERM;
690 
691 	wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
692 
693 	ret = enable_irq_wake(wake_irq);
694 	if (ret) {
695 		ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
696 		return ret;
697 	}
698 
699 	value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
700 				ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
701 	value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
702 				 ATH11K_AHB_SMP2P_SMEM_MSG);
703 
704 	ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
705 					  ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
706 	if (ret) {
707 		ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
708 		return ret;
709 	}
710 
711 	ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
712 
713 	return ret;
714 }
715 
716 static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
717 {
718 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
719 	u32 wake_irq;
720 	u32 value = 0;
721 	int ret;
722 
723 	if (!device_may_wakeup(ab->dev))
724 		return -EPERM;
725 
726 	wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
727 
728 	ret = disable_irq_wake(wake_irq);
729 	if (ret) {
730 		ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
731 		return ret;
732 	}
733 
734 	reinit_completion(&ab->wow.wakeup_completed);
735 
736 	value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
737 				ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
738 	value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
739 				 ATH11K_AHB_SMP2P_SMEM_MSG);
740 
741 	ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
742 					  ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
743 	if (ret) {
744 		ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
745 		return ret;
746 	}
747 
748 	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
749 	if (ret == 0) {
750 		ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
751 		return -ETIMEDOUT;
752 	}
753 
754 	ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
755 
756 	return 0;
757 }
758 
759 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
760 	.start = ath11k_ahb_start,
761 	.stop = ath11k_ahb_stop,
762 	.read32 = ath11k_ahb_read32,
763 	.write32 = ath11k_ahb_write32,
764 	.read = NULL,
765 	.irq_enable = ath11k_ahb_ext_irq_enable,
766 	.irq_disable = ath11k_ahb_ext_irq_disable,
767 	.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
768 	.power_down = ath11k_ahb_power_down,
769 	.power_up = ath11k_ahb_power_up,
770 };
771 
772 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
773 	.start = ath11k_pcic_start,
774 	.stop = ath11k_pcic_stop,
775 	.read32 = ath11k_pcic_read32,
776 	.write32 = ath11k_pcic_write32,
777 	.read = NULL,
778 	.irq_enable = ath11k_pcic_ext_irq_enable,
779 	.irq_disable = ath11k_pcic_ext_irq_disable,
780 	.get_msi_address =  ath11k_pcic_get_msi_address,
781 	.get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
782 	.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
783 	.power_down = ath11k_ahb_power_down,
784 	.power_up = ath11k_ahb_power_up,
785 	.suspend = ath11k_ahb_hif_suspend,
786 	.resume = ath11k_ahb_hif_resume,
787 	.ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
788 	.ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
789 };
790 
791 static int ath11k_core_get_rproc(struct ath11k_base *ab)
792 {
793 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
794 	struct device *dev = ab->dev;
795 	struct rproc *prproc;
796 	phandle rproc_phandle;
797 
798 	if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
799 		ath11k_err(ab, "failed to get q6_rproc handle\n");
800 		return -ENOENT;
801 	}
802 
803 	prproc = rproc_get_by_phandle(rproc_phandle);
804 	if (!prproc) {
805 		ath11k_err(ab, "failed to get rproc\n");
806 		return -EINVAL;
807 	}
808 	ab_ahb->tgt_rproc = prproc;
809 
810 	return 0;
811 }
812 
813 static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
814 {
815 	struct platform_device *pdev = ab->pdev;
816 	phys_addr_t msi_addr_pa;
817 	dma_addr_t msi_addr_iova;
818 	struct resource *res;
819 	int int_prop;
820 	int ret;
821 	int i;
822 
823 	ret = ath11k_pcic_init_msi_config(ab);
824 	if (ret) {
825 		ath11k_err(ab, "failed to init msi config: %d\n", ret);
826 		return ret;
827 	}
828 
829 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
830 	if (!res) {
831 		ath11k_err(ab, "failed to fetch msi_addr\n");
832 		return -ENOENT;
833 	}
834 
835 	msi_addr_pa = res->start;
836 	msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
837 					 DMA_FROM_DEVICE, 0);
838 	if (dma_mapping_error(ab->dev, msi_addr_iova))
839 		return -ENOMEM;
840 
841 	ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
842 	ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
843 
844 	ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
845 	if (ret)
846 		return ret;
847 
848 	ab->pci.msi.ep_base_data = int_prop + 32;
849 
850 	for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
851 		ret = platform_get_irq(pdev, i);
852 		if (ret < 0)
853 			return ret;
854 
855 		ab->pci.msi.irqs[i] = ret;
856 	}
857 
858 	set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
859 
860 	return 0;
861 }
862 
863 static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
864 {
865 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
866 
867 	if (!ab->hw_params.smp2p_wow_exit)
868 		return 0;
869 
870 	ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
871 							    &ab_ahb->smp2p_info.smem_bit);
872 	if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
873 		ath11k_err(ab, "failed to fetch smem state: %ld\n",
874 			   PTR_ERR(ab_ahb->smp2p_info.smem_state));
875 		return PTR_ERR(ab_ahb->smp2p_info.smem_state);
876 	}
877 
878 	return 0;
879 }
880 
881 static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
882 {
883 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
884 
885 	if (!ab->hw_params.smp2p_wow_exit)
886 		return;
887 
888 	qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
889 }
890 
891 static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
892 {
893 	struct platform_device *pdev = ab->pdev;
894 	struct resource *mem_res;
895 	void __iomem *mem;
896 
897 	if (ab->hw_params.hybrid_bus_type)
898 		return ath11k_ahb_setup_msi_resources(ab);
899 
900 	mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
901 	if (IS_ERR(mem)) {
902 		dev_err(&pdev->dev, "ioremap error\n");
903 		return PTR_ERR(mem);
904 	}
905 
906 	ab->mem = mem;
907 	ab->mem_len = resource_size(mem_res);
908 
909 	return 0;
910 }
911 
912 static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
913 {
914 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
915 	struct device *dev = ab->dev;
916 	struct device_node *node;
917 	struct resource r;
918 	int ret;
919 
920 	node = of_parse_phandle(dev->of_node, "memory-region", 0);
921 	if (!node)
922 		return -ENOENT;
923 
924 	ret = of_address_to_resource(node, 0, &r);
925 	of_node_put(node);
926 	if (ret) {
927 		dev_err(dev, "failed to resolve msa fixed region\n");
928 		return ret;
929 	}
930 
931 	ab_ahb->fw.msa_paddr = r.start;
932 	ab_ahb->fw.msa_size = resource_size(&r);
933 
934 	node = of_parse_phandle(dev->of_node, "memory-region", 1);
935 	if (!node)
936 		return -ENOENT;
937 
938 	ret = of_address_to_resource(node, 0, &r);
939 	of_node_put(node);
940 	if (ret) {
941 		dev_err(dev, "failed to resolve ce fixed region\n");
942 		return ret;
943 	}
944 
945 	ab_ahb->fw.ce_paddr = r.start;
946 	ab_ahb->fw.ce_size = resource_size(&r);
947 
948 	return 0;
949 }
950 
951 static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
952 {
953 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
954 	struct device *host_dev = ab->dev;
955 	struct platform_device_info info = {0};
956 	struct iommu_domain *iommu_dom;
957 	struct platform_device *pdev;
958 	struct device_node *node;
959 	int ret;
960 
961 	/* Chipsets not requiring MSA need not initialize
962 	 * MSA resources, return success in such cases.
963 	 */
964 	if (!ab->hw_params.fixed_fw_mem)
965 		return 0;
966 
967 	ret = ath11k_ahb_setup_msa_resources(ab);
968 	if (ret) {
969 		ath11k_err(ab, "failed to setup msa resources\n");
970 		return ret;
971 	}
972 
973 	node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
974 	if (!node) {
975 		ab_ahb->fw.use_tz = true;
976 		return 0;
977 	}
978 
979 	info.fwnode = &node->fwnode;
980 	info.parent = host_dev;
981 	info.name = node->name;
982 	info.dma_mask = DMA_BIT_MASK(32);
983 
984 	pdev = platform_device_register_full(&info);
985 	if (IS_ERR(pdev)) {
986 		of_node_put(node);
987 		return PTR_ERR(pdev);
988 	}
989 
990 	ret = of_dma_configure(&pdev->dev, node, true);
991 	if (ret) {
992 		ath11k_err(ab, "dma configure fail: %d\n", ret);
993 		goto err_unregister;
994 	}
995 
996 	ab_ahb->fw.dev = &pdev->dev;
997 
998 	iommu_dom = iommu_domain_alloc(&platform_bus_type);
999 	if (!iommu_dom) {
1000 		ath11k_err(ab, "failed to allocate iommu domain\n");
1001 		ret = -ENOMEM;
1002 		goto err_unregister;
1003 	}
1004 
1005 	ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
1006 	if (ret) {
1007 		ath11k_err(ab, "could not attach device: %d\n", ret);
1008 		goto err_iommu_free;
1009 	}
1010 
1011 	ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
1012 			ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
1013 			IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1014 	if (ret) {
1015 		ath11k_err(ab, "failed to map firmware region: %d\n", ret);
1016 		goto err_iommu_detach;
1017 	}
1018 
1019 	ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
1020 			ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
1021 			IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1022 	if (ret) {
1023 		ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
1024 		goto err_iommu_unmap;
1025 	}
1026 
1027 	ab_ahb->fw.use_tz = false;
1028 	ab_ahb->fw.iommu_domain = iommu_dom;
1029 	of_node_put(node);
1030 
1031 	return 0;
1032 
1033 err_iommu_unmap:
1034 	iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1035 
1036 err_iommu_detach:
1037 	iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
1038 
1039 err_iommu_free:
1040 	iommu_domain_free(iommu_dom);
1041 
1042 err_unregister:
1043 	platform_device_unregister(pdev);
1044 	of_node_put(node);
1045 
1046 	return ret;
1047 }
1048 
1049 static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
1050 {
1051 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
1052 	struct iommu_domain *iommu;
1053 	size_t unmapped_size;
1054 
1055 	/* Chipsets not requiring MSA would have not initialized
1056 	 * MSA resources, return success in such cases.
1057 	 */
1058 	if (!ab->hw_params.fixed_fw_mem)
1059 		return 0;
1060 
1061 	if (ab_ahb->fw.use_tz)
1062 		return 0;
1063 
1064 	iommu = ab_ahb->fw.iommu_domain;
1065 
1066 	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1067 	if (unmapped_size != ab_ahb->fw.msa_size)
1068 		ath11k_err(ab, "failed to unmap firmware: %zu\n",
1069 			   unmapped_size);
1070 
1071 	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
1072 	if (unmapped_size != ab_ahb->fw.ce_size)
1073 		ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
1074 			   unmapped_size);
1075 
1076 	iommu_detach_device(iommu, ab_ahb->fw.dev);
1077 	iommu_domain_free(iommu);
1078 
1079 	platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
1080 
1081 	return 0;
1082 }
1083 
1084 static int ath11k_ahb_probe(struct platform_device *pdev)
1085 {
1086 	struct ath11k_base *ab;
1087 	const struct of_device_id *of_id;
1088 	const struct ath11k_hif_ops *hif_ops;
1089 	const struct ath11k_pci_ops *pci_ops;
1090 	enum ath11k_hw_rev hw_rev;
1091 	int ret;
1092 
1093 	of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
1094 	if (!of_id) {
1095 		dev_err(&pdev->dev, "failed to find matching device tree id\n");
1096 		return -EINVAL;
1097 	}
1098 
1099 	hw_rev = (enum ath11k_hw_rev)of_id->data;
1100 
1101 	switch (hw_rev) {
1102 	case ATH11K_HW_IPQ8074:
1103 	case ATH11K_HW_IPQ6018_HW10:
1104 	case ATH11K_HW_IPQ5018_HW10:
1105 		hif_ops = &ath11k_ahb_hif_ops_ipq8074;
1106 		pci_ops = NULL;
1107 		break;
1108 	case ATH11K_HW_WCN6750_HW10:
1109 		hif_ops = &ath11k_ahb_hif_ops_wcn6750;
1110 		pci_ops = &ath11k_ahb_pci_ops_wcn6750;
1111 		break;
1112 	default:
1113 		dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
1114 		return -EOPNOTSUPP;
1115 	}
1116 
1117 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1118 	if (ret) {
1119 		dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
1120 		return ret;
1121 	}
1122 
1123 	ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
1124 			       ATH11K_BUS_AHB);
1125 	if (!ab) {
1126 		dev_err(&pdev->dev, "failed to allocate ath11k base\n");
1127 		return -ENOMEM;
1128 	}
1129 
1130 	ab->hif.ops = hif_ops;
1131 	ab->pdev = pdev;
1132 	ab->hw_rev = hw_rev;
1133 	ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
1134 	platform_set_drvdata(pdev, ab);
1135 
1136 	ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
1137 	if (ret) {
1138 		ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1139 		goto err_core_free;
1140 	}
1141 
1142 	ret = ath11k_core_pre_init(ab);
1143 	if (ret)
1144 		goto err_core_free;
1145 
1146 	ret = ath11k_ahb_setup_resources(ab);
1147 	if (ret)
1148 		goto err_core_free;
1149 
1150 	ab->mem_ce = ab->mem;
1151 
1152 	if (ab->hw_params.ce_remap) {
1153 		const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
1154 		/* ce register space is moved out of wcss unlike ipq8074 or ipq6018
1155 		 * and the space is not contiguous, hence remapping the CE registers
1156 		 * to a new space for accessing them.
1157 		 */
1158 		ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
1159 		if (!ab->mem_ce) {
1160 			dev_err(&pdev->dev, "ce ioremap error\n");
1161 			ret = -ENOMEM;
1162 			goto err_core_free;
1163 		}
1164 	}
1165 
1166 	ret = ath11k_ahb_fw_resources_init(ab);
1167 	if (ret)
1168 		goto err_core_free;
1169 
1170 	ret = ath11k_ahb_setup_smp2p_handle(ab);
1171 	if (ret)
1172 		goto err_fw_deinit;
1173 
1174 	ret = ath11k_hal_srng_init(ab);
1175 	if (ret)
1176 		goto err_release_smp2p_handle;
1177 
1178 	ret = ath11k_ce_alloc_pipes(ab);
1179 	if (ret) {
1180 		ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1181 		goto err_hal_srng_deinit;
1182 	}
1183 
1184 	ath11k_ahb_init_qmi_ce_config(ab);
1185 
1186 	ret = ath11k_core_get_rproc(ab);
1187 	if (ret) {
1188 		ath11k_err(ab, "failed to get rproc: %d\n", ret);
1189 		goto err_ce_free;
1190 	}
1191 
1192 	ret = ath11k_core_init(ab);
1193 	if (ret) {
1194 		ath11k_err(ab, "failed to init core: %d\n", ret);
1195 		goto err_ce_free;
1196 	}
1197 
1198 	ret = ath11k_ahb_config_irq(ab);
1199 	if (ret) {
1200 		ath11k_err(ab, "failed to configure irq: %d\n", ret);
1201 		goto err_ce_free;
1202 	}
1203 
1204 	ath11k_qmi_fwreset_from_cold_boot(ab);
1205 
1206 	return 0;
1207 
1208 err_ce_free:
1209 	ath11k_ce_free_pipes(ab);
1210 
1211 err_hal_srng_deinit:
1212 	ath11k_hal_srng_deinit(ab);
1213 
1214 err_release_smp2p_handle:
1215 	ath11k_ahb_release_smp2p_handle(ab);
1216 
1217 err_fw_deinit:
1218 	ath11k_ahb_fw_resource_deinit(ab);
1219 
1220 err_core_free:
1221 	ath11k_core_free(ab);
1222 	platform_set_drvdata(pdev, NULL);
1223 
1224 	return ret;
1225 }
1226 
1227 static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
1228 {
1229 	unsigned long left;
1230 
1231 	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
1232 		left = wait_for_completion_timeout(&ab->driver_recovery,
1233 						   ATH11K_AHB_RECOVERY_TIMEOUT);
1234 		if (!left)
1235 			ath11k_warn(ab, "failed to receive recovery response completion\n");
1236 	}
1237 
1238 	set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1239 	cancel_work_sync(&ab->restart_work);
1240 	cancel_work_sync(&ab->qmi.event_work);
1241 }
1242 
1243 static void ath11k_ahb_free_resources(struct ath11k_base *ab)
1244 {
1245 	struct platform_device *pdev = ab->pdev;
1246 
1247 	ath11k_ahb_free_irq(ab);
1248 	ath11k_hal_srng_deinit(ab);
1249 	ath11k_ahb_release_smp2p_handle(ab);
1250 	ath11k_ahb_fw_resource_deinit(ab);
1251 	ath11k_ce_free_pipes(ab);
1252 
1253 	if (ab->hw_params.ce_remap)
1254 		iounmap(ab->mem_ce);
1255 
1256 	ath11k_core_free(ab);
1257 	platform_set_drvdata(pdev, NULL);
1258 }
1259 
1260 static int ath11k_ahb_remove(struct platform_device *pdev)
1261 {
1262 	struct ath11k_base *ab = platform_get_drvdata(pdev);
1263 
1264 	if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1265 		ath11k_ahb_power_down(ab);
1266 		ath11k_debugfs_soc_destroy(ab);
1267 		ath11k_qmi_deinit_service(ab);
1268 		goto qmi_fail;
1269 	}
1270 
1271 	ath11k_ahb_remove_prepare(ab);
1272 	ath11k_core_deinit(ab);
1273 
1274 qmi_fail:
1275 	ath11k_ahb_free_resources(ab);
1276 
1277 	return 0;
1278 }
1279 
1280 static void ath11k_ahb_shutdown(struct platform_device *pdev)
1281 {
1282 	struct ath11k_base *ab = platform_get_drvdata(pdev);
1283 
1284 	/* platform shutdown() & remove() are mutually exclusive.
1285 	 * remove() is invoked during rmmod & shutdown() during
1286 	 * system reboot/shutdown.
1287 	 */
1288 	ath11k_ahb_remove_prepare(ab);
1289 
1290 	if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
1291 		goto free_resources;
1292 
1293 	ath11k_core_deinit(ab);
1294 
1295 free_resources:
1296 	ath11k_ahb_free_resources(ab);
1297 }
1298 
1299 static struct platform_driver ath11k_ahb_driver = {
1300 	.driver         = {
1301 		.name   = "ath11k",
1302 		.of_match_table = ath11k_ahb_of_match,
1303 	},
1304 	.probe  = ath11k_ahb_probe,
1305 	.remove = ath11k_ahb_remove,
1306 	.shutdown = ath11k_ahb_shutdown,
1307 };
1308 
1309 static int ath11k_ahb_init(void)
1310 {
1311 	return platform_driver_register(&ath11k_ahb_driver);
1312 }
1313 module_init(ath11k_ahb_init);
1314 
1315 static void ath11k_ahb_exit(void)
1316 {
1317 	platform_driver_unregister(&ath11k_ahb_driver);
1318 }
1319 module_exit(ath11k_ahb_exit);
1320 
1321 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
1322 MODULE_LICENSE("Dual BSD/GPL");
1323