xref: /freebsd/sys/contrib/dev/iwlwifi/pcie/rx.c (revision 61e21613)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2003-2014, 2018-2023 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #include <linux/sched.h>
8 #include <linux/wait.h>
9 #include <linux/gfp.h>
10 
11 #include "iwl-prph.h"
12 #include "iwl-io.h"
13 #include "internal.h"
14 #include "iwl-op-mode.h"
15 #include "iwl-context-info-gen3.h"
16 
17 /******************************************************************************
18  *
19  * RX path functions
20  *
21  ******************************************************************************/
22 
23 /*
24  * Rx theory of operation
25  *
26  * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
27  * each of which point to Receive Buffers to be filled by the NIC.  These get
28  * used not only for Rx frames, but for any command response or notification
29  * from the NIC.  The driver and NIC manage the Rx buffers by means
30  * of indexes into the circular buffer.
31  *
32  * Rx Queue Indexes
33  * The host/firmware share two index registers for managing the Rx buffers.
34  *
35  * The READ index maps to the first position that the firmware may be writing
36  * to -- the driver can read up to (but not including) this position and get
37  * good data.
38  * The READ index is managed by the firmware once the card is enabled.
39  *
40  * The WRITE index maps to the last position the driver has read from -- the
41  * position preceding WRITE is the last slot the firmware can place a packet.
42  *
43  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
44  * WRITE = READ.
45  *
46  * During initialization, the host sets up the READ queue position to the first
47  * INDEX position, and WRITE to the last (READ - 1 wrapped)
48  *
49  * When the firmware places a packet in a buffer, it will advance the READ index
50  * and fire the RX interrupt.  The driver can then query the READ index and
51  * process as many packets as possible, moving the WRITE index forward as it
52  * resets the Rx queue buffers with new memory.
53  *
54  * The management in the driver is as follows:
55  * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
56  *   When the interrupt handler is called, the request is processed.
57  *   The page is either stolen - transferred to the upper layer
58  *   or reused - added immediately to the iwl->rxq->rx_free list.
59  * + When the page is stolen - the driver updates the matching queue's used
60  *   count, detaches the RBD and transfers it to the queue used list.
61  *   When there are two used RBDs - they are transferred to the allocator empty
62  *   list. Work is then scheduled for the allocator to start allocating
63  *   eight buffers.
64  *   When there are another 6 used RBDs - they are transferred to the allocator
65  *   empty list and the driver tries to claim the pre-allocated buffers and
66  *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
67  *   until ready.
68  *   When there are 8+ buffers in the free list - either from allocation or from
69  *   8 reused unstolen pages - restock is called to update the FW and indexes.
70  * + In order to make sure the allocator always has RBDs to use for allocation
71  *   the allocator has initial pool in the size of num_queues*(8-2) - the
72  *   maximum missing RBDs per allocation request (request posted with 2
73  *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
74  *   The queues supplies the recycle of the rest of the RBDs.
75  * + A received packet is processed and handed to the kernel network stack,
76  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
77  * + If there are no allocated buffers in iwl->rxq->rx_free,
78  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
79  *   If there were enough free buffers and RX_STALLED is set it is cleared.
80  *
81  *
82  * Driver sequence:
83  *
84  * iwl_rxq_alloc()            Allocates rx_free
85  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
86  *                            iwl_pcie_rxq_restock.
87  *                            Used only during initialization.
88  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
89  *                            queue, updates firmware pointers, and updates
90  *                            the WRITE index.
91  * iwl_pcie_rx_allocator()     Background work for allocating pages.
92  *
93  * -- enable interrupts --
94  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
95  *                            READ INDEX, detaching the SKB from the pool.
96  *                            Moves the packet buffer from queue to rx_used.
97  *                            Posts and claims requests to the allocator.
98  *                            Calls iwl_pcie_rxq_restock to refill any empty
99  *                            slots.
100  *
101  * RBD life-cycle:
102  *
103  * Init:
104  * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
105  *
106  * Regular Receive interrupt:
107  * Page Stolen:
108  * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
109  * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
110  * Page not Stolen:
111  * rxq.queue -> rxq.rx_free -> rxq.queue
112  * ...
113  *
114  */
115 
116 /*
117  * iwl_rxq_space - Return number of free slots available in queue.
118  */
119 static int iwl_rxq_space(const struct iwl_rxq *rxq)
120 {
121 	/* Make sure rx queue size is a power of 2 */
122 	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
123 
124 	/*
125 	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
126 	 * between empty and completely full queues.
127 	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
128 	 * defined for negative dividends.
129 	 */
130 	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
131 }
132 
133 /*
134  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
135  */
136 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
137 {
138 	return cpu_to_le32((u32)(dma_addr >> 8));
139 }
140 
141 /*
142  * iwl_pcie_rx_stop - stops the Rx DMA
143  */
144 int iwl_pcie_rx_stop(struct iwl_trans *trans)
145 {
146 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
147 		/* TODO: remove this once fw does it */
148 		iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
149 		return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
150 					      RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
151 	} else if (trans->trans_cfg->mq_rx_supported) {
152 		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
153 		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
154 					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
155 	} else {
156 		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
157 		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
158 					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
159 					   1000);
160 	}
161 }
162 
163 /*
164  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
165  */
166 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
167 				    struct iwl_rxq *rxq)
168 {
169 	u32 reg;
170 
171 	lockdep_assert_held(&rxq->lock);
172 
173 	/*
174 	 * explicitly wake up the NIC if:
175 	 * 1. shadow registers aren't enabled
176 	 * 2. there is a chance that the NIC is asleep
177 	 */
178 	if (!trans->trans_cfg->base_params->shadow_reg_enable &&
179 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
180 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
181 
182 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
183 			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
184 				       reg);
185 			iwl_set_bit(trans, CSR_GP_CNTRL,
186 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
187 			rxq->need_update = true;
188 			return;
189 		}
190 	}
191 
192 	rxq->write_actual = round_down(rxq->write, 8);
193 	if (!trans->trans_cfg->mq_rx_supported)
194 		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
195 	else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
196 		iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
197 			    HBUS_TARG_WRPTR_RX_Q(rxq->id));
198 	else
199 		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
200 			    rxq->write_actual);
201 }
202 
203 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
204 {
205 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
206 	int i;
207 
208 	for (i = 0; i < trans->num_rx_queues; i++) {
209 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
210 
211 		if (!rxq->need_update)
212 			continue;
213 		spin_lock_bh(&rxq->lock);
214 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
215 		rxq->need_update = false;
216 		spin_unlock_bh(&rxq->lock);
217 	}
218 }
219 
220 static void iwl_pcie_restock_bd(struct iwl_trans *trans,
221 				struct iwl_rxq *rxq,
222 				struct iwl_rx_mem_buffer *rxb)
223 {
224 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
225 		struct iwl_rx_transfer_desc *bd = rxq->bd;
226 
227 		BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
228 
229 		bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
230 		bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
231 	} else {
232 		__le64 *bd = rxq->bd;
233 
234 		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
235 	}
236 
237 #if defined(__linux__)
238 	IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
239 #elif defined(__FreeBSD__)
240 	IWL_DEBUG_PCI_RW(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
241 		     (u32)rxb->vid, rxq->id, rxq->write);
242 #endif
243 }
244 
245 /*
246  * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
247  */
248 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
249 				  struct iwl_rxq *rxq)
250 {
251 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
252 	struct iwl_rx_mem_buffer *rxb;
253 
254 	/*
255 	 * If the device isn't enabled - no need to try to add buffers...
256 	 * This can happen when we stop the device and still have an interrupt
257 	 * pending. We stop the APM before we sync the interrupts because we
258 	 * have to (see comment there). On the other hand, since the APM is
259 	 * stopped, we cannot access the HW (in particular not prph).
260 	 * So don't try to restock if the APM has been already stopped.
261 	 */
262 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
263 		return;
264 
265 	spin_lock_bh(&rxq->lock);
266 	while (rxq->free_count) {
267 		/* Get next free Rx buffer, remove from free list */
268 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
269 				       list);
270 		list_del(&rxb->list);
271 		rxb->invalid = false;
272 		/* some low bits are expected to be unset (depending on hw) */
273 		WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
274 		/* Point to Rx buffer via next RBD in circular buffer */
275 		iwl_pcie_restock_bd(trans, rxq, rxb);
276 		rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
277 		rxq->free_count--;
278 	}
279 	spin_unlock_bh(&rxq->lock);
280 
281 	/*
282 	 * If we've added more space for the firmware to place data, tell it.
283 	 * Increment device's write pointer in multiples of 8.
284 	 */
285 	if (rxq->write_actual != (rxq->write & ~0x7)) {
286 		spin_lock_bh(&rxq->lock);
287 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
288 		spin_unlock_bh(&rxq->lock);
289 	}
290 }
291 
292 /*
293  * iwl_pcie_rxsq_restock - restock implementation for single queue rx
294  */
295 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
296 				  struct iwl_rxq *rxq)
297 {
298 	struct iwl_rx_mem_buffer *rxb;
299 
300 	/*
301 	 * If the device isn't enabled - not need to try to add buffers...
302 	 * This can happen when we stop the device and still have an interrupt
303 	 * pending. We stop the APM before we sync the interrupts because we
304 	 * have to (see comment there). On the other hand, since the APM is
305 	 * stopped, we cannot access the HW (in particular not prph).
306 	 * So don't try to restock if the APM has been already stopped.
307 	 */
308 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
309 		return;
310 
311 	spin_lock_bh(&rxq->lock);
312 	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
313 		__le32 *bd = (__le32 *)rxq->bd;
314 		/* The overwritten rxb must be a used one */
315 		rxb = rxq->queue[rxq->write];
316 		BUG_ON(rxb && rxb->page);
317 
318 		/* Get next free Rx buffer, remove from free list */
319 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
320 				       list);
321 		list_del(&rxb->list);
322 		rxb->invalid = false;
323 
324 		/* Point to Rx buffer via next RBD in circular buffer */
325 		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
326 		rxq->queue[rxq->write] = rxb;
327 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
328 		rxq->free_count--;
329 	}
330 	spin_unlock_bh(&rxq->lock);
331 
332 	/* If we've added more space for the firmware to place data, tell it.
333 	 * Increment device's write pointer in multiples of 8. */
334 	if (rxq->write_actual != (rxq->write & ~0x7)) {
335 		spin_lock_bh(&rxq->lock);
336 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
337 		spin_unlock_bh(&rxq->lock);
338 	}
339 }
340 
341 /*
342  * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
343  *
344  * If there are slots in the RX queue that need to be restocked,
345  * and we have free pre-allocated buffers, fill the ranks as much
346  * as we can, pulling from rx_free.
347  *
348  * This moves the 'write' index forward to catch up with 'processed', and
349  * also updates the memory address in the firmware to reference the new
350  * target buffer.
351  */
352 static
353 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
354 {
355 	if (trans->trans_cfg->mq_rx_supported)
356 		iwl_pcie_rxmq_restock(trans, rxq);
357 	else
358 		iwl_pcie_rxsq_restock(trans, rxq);
359 }
360 
361 /*
362  * iwl_pcie_rx_alloc_page - allocates and returns a page.
363  *
364  */
365 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
366 					   u32 *offset, gfp_t priority)
367 {
368 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
369 	unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
370 	unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
371 	struct page *page;
372 	gfp_t gfp_mask = priority;
373 
374 	if (trans_pcie->rx_page_order > 0)
375 		gfp_mask |= __GFP_COMP;
376 
377 	if (trans_pcie->alloc_page) {
378 		spin_lock_bh(&trans_pcie->alloc_page_lock);
379 		/* recheck */
380 		if (trans_pcie->alloc_page) {
381 			*offset = trans_pcie->alloc_page_used;
382 			page = trans_pcie->alloc_page;
383 			trans_pcie->alloc_page_used += rbsize;
384 			if (trans_pcie->alloc_page_used >= allocsize)
385 				trans_pcie->alloc_page = NULL;
386 			else
387 				get_page(page);
388 			spin_unlock_bh(&trans_pcie->alloc_page_lock);
389 			return page;
390 		}
391 		spin_unlock_bh(&trans_pcie->alloc_page_lock);
392 	}
393 
394 	/* Alloc a new receive buffer */
395 	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
396 	if (!page) {
397 		if (net_ratelimit())
398 			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
399 				       trans_pcie->rx_page_order);
400 		/*
401 		 * Issue an error if we don't have enough pre-allocated
402 		  * buffers.
403 		 */
404 		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
405 			IWL_CRIT(trans,
406 				 "Failed to alloc_pages\n");
407 		return NULL;
408 	}
409 
410 	if (2 * rbsize <= allocsize) {
411 		spin_lock_bh(&trans_pcie->alloc_page_lock);
412 		if (!trans_pcie->alloc_page) {
413 			get_page(page);
414 			trans_pcie->alloc_page = page;
415 			trans_pcie->alloc_page_used = rbsize;
416 		}
417 		spin_unlock_bh(&trans_pcie->alloc_page_lock);
418 	}
419 
420 	*offset = 0;
421 	return page;
422 }
423 
424 /*
425  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
426  *
427  * A used RBD is an Rx buffer that has been given to the stack. To use it again
428  * a page must be allocated and the RBD must point to the page. This function
429  * doesn't change the HW pointer but handles the list of pages that is used by
430  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
431  * allocated buffers.
432  */
433 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
434 			    struct iwl_rxq *rxq)
435 {
436 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
437 	struct iwl_rx_mem_buffer *rxb;
438 	struct page *page;
439 
440 	while (1) {
441 		unsigned int offset;
442 
443 		spin_lock_bh(&rxq->lock);
444 		if (list_empty(&rxq->rx_used)) {
445 			spin_unlock_bh(&rxq->lock);
446 			return;
447 		}
448 		spin_unlock_bh(&rxq->lock);
449 
450 		page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
451 		if (!page)
452 			return;
453 
454 		spin_lock_bh(&rxq->lock);
455 
456 		if (list_empty(&rxq->rx_used)) {
457 			spin_unlock_bh(&rxq->lock);
458 			__free_pages(page, trans_pcie->rx_page_order);
459 			return;
460 		}
461 		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
462 				       list);
463 		list_del(&rxb->list);
464 		spin_unlock_bh(&rxq->lock);
465 
466 		BUG_ON(rxb->page);
467 		rxb->page = page;
468 		rxb->offset = offset;
469 		/* Get physical address of the RB */
470 		rxb->page_dma =
471 			dma_map_page(trans->dev, page, rxb->offset,
472 				     trans_pcie->rx_buf_bytes,
473 				     DMA_FROM_DEVICE);
474 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
475 			rxb->page = NULL;
476 			spin_lock_bh(&rxq->lock);
477 			list_add(&rxb->list, &rxq->rx_used);
478 			spin_unlock_bh(&rxq->lock);
479 			__free_pages(page, trans_pcie->rx_page_order);
480 			return;
481 		}
482 
483 		spin_lock_bh(&rxq->lock);
484 
485 		list_add_tail(&rxb->list, &rxq->rx_free);
486 		rxq->free_count++;
487 
488 		spin_unlock_bh(&rxq->lock);
489 	}
490 }
491 
492 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
493 {
494 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
495 	int i;
496 
497 	if (!trans_pcie->rx_pool)
498 		return;
499 
500 	for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
501 		if (!trans_pcie->rx_pool[i].page)
502 			continue;
503 		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
504 			       trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
505 		__free_pages(trans_pcie->rx_pool[i].page,
506 			     trans_pcie->rx_page_order);
507 		trans_pcie->rx_pool[i].page = NULL;
508 	}
509 }
510 
511 /*
512  * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
513  *
514  * Allocates for each received request 8 pages
515  * Called as a scheduled work item.
516  */
517 static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
518 {
519 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
520 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
521 	struct list_head local_empty;
522 	int pending = atomic_read(&rba->req_pending);
523 
524 	IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
525 
526 	/* If we were scheduled - there is at least one request */
527 	spin_lock_bh(&rba->lock);
528 	/* swap out the rba->rbd_empty to a local list */
529 	list_replace_init(&rba->rbd_empty, &local_empty);
530 	spin_unlock_bh(&rba->lock);
531 
532 	while (pending) {
533 		int i;
534 		LIST_HEAD(local_allocated);
535 		gfp_t gfp_mask = GFP_KERNEL;
536 
537 		/* Do not post a warning if there are only a few requests */
538 		if (pending < RX_PENDING_WATERMARK)
539 			gfp_mask |= __GFP_NOWARN;
540 
541 		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
542 			struct iwl_rx_mem_buffer *rxb;
543 			struct page *page;
544 
545 			/* List should never be empty - each reused RBD is
546 			 * returned to the list, and initial pool covers any
547 			 * possible gap between the time the page is allocated
548 			 * to the time the RBD is added.
549 			 */
550 			BUG_ON(list_empty(&local_empty));
551 			/* Get the first rxb from the rbd list */
552 			rxb = list_first_entry(&local_empty,
553 					       struct iwl_rx_mem_buffer, list);
554 			BUG_ON(rxb->page);
555 
556 			/* Alloc a new receive buffer */
557 			page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
558 						      gfp_mask);
559 			if (!page)
560 				continue;
561 			rxb->page = page;
562 
563 			/* Get physical address of the RB */
564 			rxb->page_dma = dma_map_page(trans->dev, page,
565 						     rxb->offset,
566 						     trans_pcie->rx_buf_bytes,
567 						     DMA_FROM_DEVICE);
568 			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
569 				rxb->page = NULL;
570 				__free_pages(page, trans_pcie->rx_page_order);
571 				continue;
572 			}
573 
574 			/* move the allocated entry to the out list */
575 			list_move(&rxb->list, &local_allocated);
576 			i++;
577 		}
578 
579 		atomic_dec(&rba->req_pending);
580 		pending--;
581 
582 		if (!pending) {
583 			pending = atomic_read(&rba->req_pending);
584 			if (pending)
585 				IWL_DEBUG_TPT(trans,
586 					      "Got more pending allocation requests = %d\n",
587 					      pending);
588 		}
589 
590 		spin_lock_bh(&rba->lock);
591 		/* add the allocated rbds to the allocator allocated list */
592 		list_splice_tail(&local_allocated, &rba->rbd_allocated);
593 		/* get more empty RBDs for current pending requests */
594 		list_splice_tail_init(&rba->rbd_empty, &local_empty);
595 		spin_unlock_bh(&rba->lock);
596 
597 		atomic_inc(&rba->req_ready);
598 
599 	}
600 
601 	spin_lock_bh(&rba->lock);
602 	/* return unused rbds to the allocator empty list */
603 	list_splice_tail(&local_empty, &rba->rbd_empty);
604 	spin_unlock_bh(&rba->lock);
605 
606 	IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
607 }
608 
609 /*
610  * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
611 .*
612 .* Called by queue when the queue posted allocation request and
613  * has freed 8 RBDs in order to restock itself.
614  * This function directly moves the allocated RBs to the queue's ownership
615  * and updates the relevant counters.
616  */
617 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
618 				      struct iwl_rxq *rxq)
619 {
620 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
621 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
622 	int i;
623 
624 	lockdep_assert_held(&rxq->lock);
625 
626 	/*
627 	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
628 	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
629 	 * function will return early, as there are no ready requests.
630 	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
631 	 * req_ready > 0, i.e. - there are ready requests and the function
632 	 * hands one request to the caller.
633 	 */
634 	if (atomic_dec_if_positive(&rba->req_ready) < 0)
635 		return;
636 
637 	spin_lock(&rba->lock);
638 	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
639 		/* Get next free Rx buffer, remove it from free list */
640 		struct iwl_rx_mem_buffer *rxb =
641 			list_first_entry(&rba->rbd_allocated,
642 					 struct iwl_rx_mem_buffer, list);
643 
644 		list_move(&rxb->list, &rxq->rx_free);
645 	}
646 	spin_unlock(&rba->lock);
647 
648 	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
649 	rxq->free_count += RX_CLAIM_REQ_ALLOC;
650 }
651 
652 void iwl_pcie_rx_allocator_work(struct work_struct *data)
653 {
654 	struct iwl_rb_allocator *rba_p =
655 		container_of(data, struct iwl_rb_allocator, rx_alloc);
656 	struct iwl_trans_pcie *trans_pcie =
657 		container_of(rba_p, struct iwl_trans_pcie, rba);
658 
659 	iwl_pcie_rx_allocator(trans_pcie->trans);
660 }
661 
662 static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
663 {
664 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
665 		return sizeof(struct iwl_rx_transfer_desc);
666 
667 	return trans->trans_cfg->mq_rx_supported ?
668 			sizeof(__le64) : sizeof(__le32);
669 }
670 
671 static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
672 {
673 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
674 		return sizeof(struct iwl_rx_completion_desc_bz);
675 
676 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
677 		return sizeof(struct iwl_rx_completion_desc);
678 
679 	return sizeof(__le32);
680 }
681 
682 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
683 				  struct iwl_rxq *rxq)
684 {
685 	int free_size = iwl_pcie_free_bd_size(trans);
686 
687 	if (rxq->bd)
688 		dma_free_coherent(trans->dev,
689 				  free_size * rxq->queue_size,
690 				  rxq->bd, rxq->bd_dma);
691 	rxq->bd_dma = 0;
692 	rxq->bd = NULL;
693 
694 	rxq->rb_stts_dma = 0;
695 	rxq->rb_stts = NULL;
696 
697 	if (rxq->used_bd)
698 		dma_free_coherent(trans->dev,
699 				  iwl_pcie_used_bd_size(trans) *
700 					rxq->queue_size,
701 				  rxq->used_bd, rxq->used_bd_dma);
702 	rxq->used_bd_dma = 0;
703 	rxq->used_bd = NULL;
704 }
705 
706 static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)
707 {
708 	bool use_rx_td = (trans->trans_cfg->device_family >=
709 			  IWL_DEVICE_FAMILY_AX210);
710 
711 	if (use_rx_td)
712 		return sizeof(__le16);
713 
714 	return sizeof(struct iwl_rb_status);
715 }
716 
717 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
718 				  struct iwl_rxq *rxq)
719 {
720 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
721 	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
722 	struct device *dev = trans->dev;
723 	int i;
724 	int free_size;
725 
726 	spin_lock_init(&rxq->lock);
727 	if (trans->trans_cfg->mq_rx_supported)
728 		rxq->queue_size = trans->cfg->num_rbds;
729 	else
730 		rxq->queue_size = RX_QUEUE_SIZE;
731 
732 	free_size = iwl_pcie_free_bd_size(trans);
733 
734 	/*
735 	 * Allocate the circular buffer of Read Buffer Descriptors
736 	 * (RBDs)
737 	 */
738 	rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
739 				     &rxq->bd_dma, GFP_KERNEL);
740 	if (!rxq->bd)
741 		goto err;
742 
743 	if (trans->trans_cfg->mq_rx_supported) {
744 		rxq->used_bd = dma_alloc_coherent(dev,
745 						  iwl_pcie_used_bd_size(trans) *
746 							rxq->queue_size,
747 						  &rxq->used_bd_dma,
748 						  GFP_KERNEL);
749 		if (!rxq->used_bd)
750 			goto err;
751 	}
752 
753 	rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
754 	rxq->rb_stts_dma =
755 		trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
756 
757 	return 0;
758 
759 err:
760 	for (i = 0; i < trans->num_rx_queues; i++) {
761 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
762 
763 		iwl_pcie_free_rxq_dma(trans, rxq);
764 	}
765 
766 	return -ENOMEM;
767 }
768 
769 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
770 {
771 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
772 	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
773 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
774 	int i, ret;
775 
776 	if (WARN_ON(trans_pcie->rxq))
777 		return -EINVAL;
778 
779 	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
780 				  GFP_KERNEL);
781 	trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
782 				      sizeof(trans_pcie->rx_pool[0]),
783 				      GFP_KERNEL);
784 	trans_pcie->global_table =
785 		kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
786 			sizeof(trans_pcie->global_table[0]),
787 			GFP_KERNEL);
788 	if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
789 	    !trans_pcie->global_table) {
790 		ret = -ENOMEM;
791 		goto err;
792 	}
793 
794 	spin_lock_init(&rba->lock);
795 
796 	/*
797 	 * Allocate the driver's pointer to receive buffer status.
798 	 * Allocate for all queues continuously (HW requirement).
799 	 */
800 	trans_pcie->base_rb_stts =
801 			dma_alloc_coherent(trans->dev,
802 					   rb_stts_size * trans->num_rx_queues,
803 					   &trans_pcie->base_rb_stts_dma,
804 					   GFP_KERNEL);
805 	if (!trans_pcie->base_rb_stts) {
806 		ret = -ENOMEM;
807 		goto err;
808 	}
809 
810 	for (i = 0; i < trans->num_rx_queues; i++) {
811 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
812 
813 		rxq->id = i;
814 		ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
815 		if (ret)
816 			goto err;
817 	}
818 	return 0;
819 
820 err:
821 	if (trans_pcie->base_rb_stts) {
822 		dma_free_coherent(trans->dev,
823 				  rb_stts_size * trans->num_rx_queues,
824 				  trans_pcie->base_rb_stts,
825 				  trans_pcie->base_rb_stts_dma);
826 		trans_pcie->base_rb_stts = NULL;
827 		trans_pcie->base_rb_stts_dma = 0;
828 	}
829 	kfree(trans_pcie->rx_pool);
830 	trans_pcie->rx_pool = NULL;
831 	kfree(trans_pcie->global_table);
832 	trans_pcie->global_table = NULL;
833 	kfree(trans_pcie->rxq);
834 	trans_pcie->rxq = NULL;
835 
836 	return ret;
837 }
838 
839 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
840 {
841 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
842 	u32 rb_size;
843 	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
844 
845 	switch (trans_pcie->rx_buf_size) {
846 	case IWL_AMSDU_4K:
847 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
848 		break;
849 	case IWL_AMSDU_8K:
850 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
851 		break;
852 	case IWL_AMSDU_12K:
853 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
854 		break;
855 	default:
856 		WARN_ON(1);
857 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
858 	}
859 
860 	if (!iwl_trans_grab_nic_access(trans))
861 		return;
862 
863 	/* Stop Rx DMA */
864 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
865 	/* reset and flush pointers */
866 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
867 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
868 	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
869 
870 	/* Reset driver's Rx queue write index */
871 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
872 
873 	/* Tell device where to find RBD circular buffer in DRAM */
874 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
875 		    (u32)(rxq->bd_dma >> 8));
876 
877 	/* Tell device where in DRAM to update its Rx status */
878 	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
879 		    rxq->rb_stts_dma >> 4);
880 
881 	/* Enable Rx DMA
882 	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
883 	 *      the credit mechanism in 5000 HW RX FIFO
884 	 * Direct rx interrupts to hosts
885 	 * Rx buffer size 4 or 8k or 12k
886 	 * RB timeout 0x10
887 	 * 256 RBDs
888 	 */
889 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
890 		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
891 		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
892 		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
893 		    rb_size |
894 		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
895 		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
896 
897 	iwl_trans_release_nic_access(trans);
898 
899 	/* Set interrupt coalescing timer to default (2048 usecs) */
900 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
901 
902 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
903 	if (trans->cfg->host_interrupt_operation_mode)
904 		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
905 }
906 
907 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
908 {
909 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
910 	u32 rb_size, enabled = 0;
911 	int i;
912 
913 	switch (trans_pcie->rx_buf_size) {
914 	case IWL_AMSDU_2K:
915 		rb_size = RFH_RXF_DMA_RB_SIZE_2K;
916 		break;
917 	case IWL_AMSDU_4K:
918 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
919 		break;
920 	case IWL_AMSDU_8K:
921 		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
922 		break;
923 	case IWL_AMSDU_12K:
924 		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
925 		break;
926 	default:
927 		WARN_ON(1);
928 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
929 	}
930 
931 	if (!iwl_trans_grab_nic_access(trans))
932 		return;
933 
934 	/* Stop Rx DMA */
935 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
936 	/* disable free amd used rx queue operation */
937 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
938 
939 	for (i = 0; i < trans->num_rx_queues; i++) {
940 		/* Tell device where to find RBD free table in DRAM */
941 		iwl_write_prph64_no_grab(trans,
942 					 RFH_Q_FRBDCB_BA_LSB(i),
943 					 trans_pcie->rxq[i].bd_dma);
944 		/* Tell device where to find RBD used table in DRAM */
945 		iwl_write_prph64_no_grab(trans,
946 					 RFH_Q_URBDCB_BA_LSB(i),
947 					 trans_pcie->rxq[i].used_bd_dma);
948 		/* Tell device where in DRAM to update its Rx status */
949 		iwl_write_prph64_no_grab(trans,
950 					 RFH_Q_URBD_STTS_WPTR_LSB(i),
951 					 trans_pcie->rxq[i].rb_stts_dma);
952 		/* Reset device indice tables */
953 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
954 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
955 		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
956 
957 		enabled |= BIT(i) | BIT(i + 16);
958 	}
959 
960 	/*
961 	 * Enable Rx DMA
962 	 * Rx buffer size 4 or 8k or 12k
963 	 * Min RB size 4 or 8
964 	 * Drop frames that exceed RB size
965 	 * 512 RBDs
966 	 */
967 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
968 			       RFH_DMA_EN_ENABLE_VAL | rb_size |
969 			       RFH_RXF_DMA_MIN_RB_4_8 |
970 			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
971 			       RFH_RXF_DMA_RBDCB_SIZE_512);
972 
973 	/*
974 	 * Activate DMA snooping.
975 	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
976 	 * Default queue is 0
977 	 */
978 	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
979 			       RFH_GEN_CFG_RFH_DMA_SNOOP |
980 			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
981 			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
982 			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
983 					       trans->trans_cfg->integrated ?
984 					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
985 					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
986 	/* Enable the relevant rx queues */
987 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
988 
989 	iwl_trans_release_nic_access(trans);
990 
991 	/* Set interrupt coalescing timer to default (2048 usecs) */
992 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
993 }
994 
995 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
996 {
997 	lockdep_assert_held(&rxq->lock);
998 
999 	INIT_LIST_HEAD(&rxq->rx_free);
1000 	INIT_LIST_HEAD(&rxq->rx_used);
1001 	rxq->free_count = 0;
1002 	rxq->used_count = 0;
1003 }
1004 
1005 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
1006 
1007 static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
1008 {
1009 	struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1010 	struct iwl_trans_pcie *trans_pcie;
1011 	struct iwl_trans *trans;
1012 	int ret;
1013 
1014 	trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
1015 	trans = trans_pcie->trans;
1016 
1017 	ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1018 
1019 	IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",
1020 		      rxq->id, ret, budget);
1021 
1022 	if (ret < budget) {
1023 		spin_lock(&trans_pcie->irq_lock);
1024 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1025 			_iwl_enable_interrupts(trans);
1026 		spin_unlock(&trans_pcie->irq_lock);
1027 
1028 		napi_complete_done(&rxq->napi, ret);
1029 	}
1030 
1031 	return ret;
1032 }
1033 
1034 static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
1035 {
1036 	struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1037 	struct iwl_trans_pcie *trans_pcie;
1038 	struct iwl_trans *trans;
1039 	int ret;
1040 
1041 	trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
1042 	trans = trans_pcie->trans;
1043 
1044 	ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1045 	IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
1046 		      budget);
1047 
1048 	if (ret < budget) {
1049 		int irq_line = rxq->id;
1050 
1051 		/* FIRST_RSS is shared with line 0 */
1052 		if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
1053 		    rxq->id == 1)
1054 			irq_line = 0;
1055 
1056 		spin_lock(&trans_pcie->irq_lock);
1057 		iwl_pcie_clear_irq(trans, irq_line);
1058 		spin_unlock(&trans_pcie->irq_lock);
1059 
1060 		napi_complete_done(&rxq->napi, ret);
1061 	}
1062 
1063 	return ret;
1064 }
1065 
1066 void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
1067 {
1068 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1069 	int i;
1070 
1071 	if (unlikely(!trans_pcie->rxq))
1072 		return;
1073 
1074 	for (i = 0; i < trans->num_rx_queues; i++) {
1075 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1076 
1077 		if (rxq && rxq->napi.poll)
1078 			napi_synchronize(&rxq->napi);
1079 	}
1080 }
1081 
1082 static int _iwl_pcie_rx_init(struct iwl_trans *trans)
1083 {
1084 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1085 	struct iwl_rxq *def_rxq;
1086 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1087 	int i, err, queue_size, allocator_pool_size, num_alloc;
1088 
1089 	if (!trans_pcie->rxq) {
1090 		err = iwl_pcie_rx_alloc(trans);
1091 		if (err)
1092 			return err;
1093 	}
1094 	def_rxq = trans_pcie->rxq;
1095 
1096 	cancel_work_sync(&rba->rx_alloc);
1097 
1098 	spin_lock_bh(&rba->lock);
1099 	atomic_set(&rba->req_pending, 0);
1100 	atomic_set(&rba->req_ready, 0);
1101 	INIT_LIST_HEAD(&rba->rbd_allocated);
1102 	INIT_LIST_HEAD(&rba->rbd_empty);
1103 	spin_unlock_bh(&rba->lock);
1104 
1105 	/* free all first - we overwrite everything here */
1106 	iwl_pcie_free_rbs_pool(trans);
1107 
1108 	for (i = 0; i < RX_QUEUE_SIZE; i++)
1109 		def_rxq->queue[i] = NULL;
1110 
1111 	for (i = 0; i < trans->num_rx_queues; i++) {
1112 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1113 
1114 		spin_lock_bh(&rxq->lock);
1115 		/*
1116 		 * Set read write pointer to reflect that we have processed
1117 		 * and used all buffers, but have not restocked the Rx queue
1118 		 * with fresh buffers
1119 		 */
1120 		rxq->read = 0;
1121 		rxq->write = 0;
1122 		rxq->write_actual = 0;
1123 		memset(rxq->rb_stts, 0,
1124 		       (trans->trans_cfg->device_family >=
1125 			IWL_DEVICE_FAMILY_AX210) ?
1126 		       sizeof(__le16) : sizeof(struct iwl_rb_status));
1127 
1128 		iwl_pcie_rx_init_rxb_lists(rxq);
1129 
1130 		spin_unlock_bh(&rxq->lock);
1131 
1132 		if (!rxq->napi.poll) {
1133 			int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
1134 
1135 			if (trans_pcie->msix_enabled)
1136 				poll = iwl_pcie_napi_poll_msix;
1137 
1138 			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
1139 				       poll);
1140 			napi_enable(&rxq->napi);
1141 		}
1142 
1143 	}
1144 
1145 	/* move the pool to the default queue and allocator ownerships */
1146 	queue_size = trans->trans_cfg->mq_rx_supported ?
1147 			trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
1148 	allocator_pool_size = trans->num_rx_queues *
1149 		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1150 	num_alloc = queue_size + allocator_pool_size;
1151 
1152 	for (i = 0; i < num_alloc; i++) {
1153 		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1154 
1155 		if (i < allocator_pool_size)
1156 			list_add(&rxb->list, &rba->rbd_empty);
1157 		else
1158 			list_add(&rxb->list, &def_rxq->rx_used);
1159 		trans_pcie->global_table[i] = rxb;
1160 		rxb->vid = (u16)(i + 1);
1161 		rxb->invalid = true;
1162 	}
1163 
1164 	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1165 
1166 	return 0;
1167 }
1168 
1169 int iwl_pcie_rx_init(struct iwl_trans *trans)
1170 {
1171 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1172 	int ret = _iwl_pcie_rx_init(trans);
1173 
1174 	if (ret)
1175 		return ret;
1176 
1177 	if (trans->trans_cfg->mq_rx_supported)
1178 		iwl_pcie_rx_mq_hw_init(trans);
1179 	else
1180 		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1181 
1182 	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1183 
1184 	spin_lock_bh(&trans_pcie->rxq->lock);
1185 	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1186 	spin_unlock_bh(&trans_pcie->rxq->lock);
1187 
1188 	return 0;
1189 }
1190 
1191 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1192 {
1193 	/* Set interrupt coalescing timer to default (2048 usecs) */
1194 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1195 
1196 	/*
1197 	 * We don't configure the RFH.
1198 	 * Restock will be done at alive, after firmware configured the RFH.
1199 	 */
1200 	return _iwl_pcie_rx_init(trans);
1201 }
1202 
1203 void iwl_pcie_rx_free(struct iwl_trans *trans)
1204 {
1205 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1206 	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
1207 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1208 	int i;
1209 
1210 	/*
1211 	 * if rxq is NULL, it means that nothing has been allocated,
1212 	 * exit now
1213 	 */
1214 	if (!trans_pcie->rxq) {
1215 		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1216 		return;
1217 	}
1218 
1219 	cancel_work_sync(&rba->rx_alloc);
1220 
1221 	iwl_pcie_free_rbs_pool(trans);
1222 
1223 	if (trans_pcie->base_rb_stts) {
1224 		dma_free_coherent(trans->dev,
1225 				  rb_stts_size * trans->num_rx_queues,
1226 				  trans_pcie->base_rb_stts,
1227 				  trans_pcie->base_rb_stts_dma);
1228 		trans_pcie->base_rb_stts = NULL;
1229 		trans_pcie->base_rb_stts_dma = 0;
1230 	}
1231 
1232 	for (i = 0; i < trans->num_rx_queues; i++) {
1233 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1234 
1235 		iwl_pcie_free_rxq_dma(trans, rxq);
1236 
1237 		if (rxq->napi.poll) {
1238 			napi_disable(&rxq->napi);
1239 			netif_napi_del(&rxq->napi);
1240 		}
1241 	}
1242 	kfree(trans_pcie->rx_pool);
1243 	kfree(trans_pcie->global_table);
1244 	kfree(trans_pcie->rxq);
1245 
1246 	if (trans_pcie->alloc_page)
1247 		__free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
1248 }
1249 
1250 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1251 					  struct iwl_rb_allocator *rba)
1252 {
1253 	spin_lock(&rba->lock);
1254 	list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1255 	spin_unlock(&rba->lock);
1256 }
1257 
1258 /*
1259  * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1260  *
1261  * Called when a RBD can be reused. The RBD is transferred to the allocator.
1262  * When there are 2 empty RBDs - a request for allocation is posted
1263  */
1264 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1265 				  struct iwl_rx_mem_buffer *rxb,
1266 				  struct iwl_rxq *rxq, bool emergency)
1267 {
1268 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1269 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1270 
1271 	/* Move the RBD to the used list, will be moved to allocator in batches
1272 	 * before claiming or posting a request*/
1273 	list_add_tail(&rxb->list, &rxq->rx_used);
1274 
1275 	if (unlikely(emergency))
1276 		return;
1277 
1278 	/* Count the allocator owned RBDs */
1279 	rxq->used_count++;
1280 
1281 	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
1282 	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1283 	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1284 	 * after but we still need to post another request.
1285 	 */
1286 	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1287 		/* Move the 2 RBDs to the allocator ownership.
1288 		 Allocator has another 6 from pool for the request completion*/
1289 		iwl_pcie_rx_move_to_allocator(rxq, rba);
1290 
1291 		atomic_inc(&rba->req_pending);
1292 		queue_work(rba->alloc_wq, &rba->rx_alloc);
1293 	}
1294 }
1295 
1296 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1297 				struct iwl_rxq *rxq,
1298 				struct iwl_rx_mem_buffer *rxb,
1299 				bool emergency,
1300 				int i)
1301 {
1302 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1303 	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1304 	bool page_stolen = false;
1305 	int max_len = trans_pcie->rx_buf_bytes;
1306 	u32 offset = 0;
1307 
1308 	if (WARN_ON(!rxb))
1309 		return;
1310 
1311 	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1312 
1313 	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1314 		struct iwl_rx_packet *pkt;
1315 		bool reclaim;
1316 		int len;
1317 		struct iwl_rx_cmd_buffer rxcb = {
1318 			._offset = rxb->offset + offset,
1319 			._rx_page_order = trans_pcie->rx_page_order,
1320 			._page = rxb->page,
1321 			._page_stolen = false,
1322 			.truesize = max_len,
1323 		};
1324 
1325 		pkt = rxb_addr(&rxcb);
1326 
1327 		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1328 			IWL_DEBUG_RX(trans,
1329 				     "Q %d: RB end marker at offset %d\n",
1330 				     rxq->id, offset);
1331 			break;
1332 		}
1333 
1334 		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1335 			FH_RSCSR_RXQ_POS != rxq->id,
1336 		     "frame on invalid queue - is on %d and indicates %d\n",
1337 		     rxq->id,
1338 		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1339 			FH_RSCSR_RXQ_POS);
1340 
1341 		IWL_DEBUG_RX(trans,
1342 			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1343 			     rxq->id, offset,
1344 			     iwl_get_cmd_string(trans,
1345 						WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
1346 			     pkt->hdr.group_id, pkt->hdr.cmd,
1347 			     le16_to_cpu(pkt->hdr.sequence));
1348 
1349 		len = iwl_rx_packet_len(pkt);
1350 		len += sizeof(u32); /* account for status word */
1351 
1352 		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1353 
1354 		/* check that what the device tells us made sense */
1355 		if (len < sizeof(*pkt) || offset > max_len)
1356 			break;
1357 
1358 		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1359 		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1360 
1361 		/* Reclaim a command buffer only if this packet is a response
1362 		 *   to a (driver-originated) command.
1363 		 * If the packet (e.g. Rx frame) originated from uCode,
1364 		 *   there is no command buffer to reclaim.
1365 		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1366 		 *   but apparently a few don't get set; catch them here. */
1367 		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1368 		if (reclaim && !pkt->hdr.group_id) {
1369 			int i;
1370 
1371 			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1372 				if (trans_pcie->no_reclaim_cmds[i] ==
1373 							pkt->hdr.cmd) {
1374 					reclaim = false;
1375 					break;
1376 				}
1377 			}
1378 		}
1379 
1380 		if (rxq->id == trans_pcie->def_rx_queue)
1381 			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1382 				       &rxcb);
1383 		else
1384 			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1385 					   &rxcb, rxq->id);
1386 
1387 		/*
1388 		 * After here, we should always check rxcb._page_stolen,
1389 		 * if it is true then one of the handlers took the page.
1390 		 */
1391 
1392 		if (reclaim) {
1393 			u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1394 			int index = SEQ_TO_INDEX(sequence);
1395 			int cmd_index = iwl_txq_get_cmd_index(txq, index);
1396 
1397 			kfree_sensitive(txq->entries[cmd_index].free_buf);
1398 			txq->entries[cmd_index].free_buf = NULL;
1399 
1400 			/* Invoke any callbacks, transfer the buffer to caller,
1401 			 * and fire off the (possibly) blocking
1402 			 * iwl_trans_send_cmd()
1403 			 * as we reclaim the driver command queue */
1404 			if (!rxcb._page_stolen)
1405 				iwl_pcie_hcmd_complete(trans, &rxcb);
1406 			else
1407 				IWL_WARN(trans, "Claim null rxb?\n");
1408 		}
1409 
1410 		page_stolen |= rxcb._page_stolen;
1411 		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1412 			break;
1413 	}
1414 
1415 	/* page was stolen from us -- free our reference */
1416 	if (page_stolen) {
1417 		__free_pages(rxb->page, trans_pcie->rx_page_order);
1418 		rxb->page = NULL;
1419 	}
1420 
1421 	/* Reuse the page if possible. For notification packets and
1422 	 * SKBs that fail to Rx correctly, add them back into the
1423 	 * rx_free list for reuse later. */
1424 	if (rxb->page != NULL) {
1425 		rxb->page_dma =
1426 			dma_map_page(trans->dev, rxb->page, rxb->offset,
1427 				     trans_pcie->rx_buf_bytes,
1428 				     DMA_FROM_DEVICE);
1429 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1430 			/*
1431 			 * free the page(s) as well to not break
1432 			 * the invariant that the items on the used
1433 			 * list have no page(s)
1434 			 */
1435 			__free_pages(rxb->page, trans_pcie->rx_page_order);
1436 			rxb->page = NULL;
1437 			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1438 		} else {
1439 			list_add_tail(&rxb->list, &rxq->rx_free);
1440 			rxq->free_count++;
1441 		}
1442 	} else
1443 		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1444 }
1445 
1446 static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1447 						  struct iwl_rxq *rxq, int i,
1448 						  bool *join)
1449 {
1450 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1451 	struct iwl_rx_mem_buffer *rxb;
1452 	u16 vid;
1453 
1454 	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1455 	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
1456 
1457 	if (!trans->trans_cfg->mq_rx_supported) {
1458 		rxb = rxq->queue[i];
1459 		rxq->queue[i] = NULL;
1460 		return rxb;
1461 	}
1462 
1463 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
1464 		struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
1465 
1466 		vid = le16_to_cpu(cd[i].rbid);
1467 		*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1468 	} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1469 		struct iwl_rx_completion_desc *cd = rxq->used_bd;
1470 
1471 		vid = le16_to_cpu(cd[i].rbid);
1472 		*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1473 	} else {
1474 		__le32 *cd = rxq->used_bd;
1475 
1476 		vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
1477 	}
1478 
1479 	if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
1480 		goto out_err;
1481 
1482 	rxb = trans_pcie->global_table[vid - 1];
1483 	if (rxb->invalid)
1484 		goto out_err;
1485 
1486 	IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1487 
1488 	rxb->invalid = true;
1489 
1490 	return rxb;
1491 
1492 out_err:
1493 	WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1494 	iwl_force_nmi(trans);
1495 	return NULL;
1496 }
1497 
1498 /*
1499  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1500  */
1501 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
1502 {
1503 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1504 	struct iwl_rxq *rxq;
1505 	u32 r, i, count = 0, handled = 0;
1506 	bool emergency = false;
1507 
1508 	if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1509 		return budget;
1510 
1511 	rxq = &trans_pcie->rxq[queue];
1512 
1513 restart:
1514 	spin_lock(&rxq->lock);
1515 	/* uCode's read index (stored in shared DRAM) indicates the last Rx
1516 	 * buffer that the driver may process (last buffer filled by ucode). */
1517 	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1518 	i = rxq->read;
1519 
1520 	/* W/A 9000 device step A0 wrap-around bug */
1521 	r &= (rxq->queue_size - 1);
1522 
1523 	/* Rx interrupt, but nothing sent from uCode */
1524 	if (i == r)
1525 		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1526 
1527 	while (i != r && ++handled < budget) {
1528 		struct iwl_rb_allocator *rba = &trans_pcie->rba;
1529 		struct iwl_rx_mem_buffer *rxb;
1530 		/* number of RBDs still waiting for page allocation */
1531 		u32 rb_pending_alloc =
1532 			atomic_read(&trans_pcie->rba.req_pending) *
1533 			RX_CLAIM_REQ_ALLOC;
1534 		bool join = false;
1535 
1536 		if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1537 			     !emergency)) {
1538 			iwl_pcie_rx_move_to_allocator(rxq, rba);
1539 			emergency = true;
1540 			IWL_DEBUG_TPT(trans,
1541 				      "RX path is in emergency. Pending allocations %d\n",
1542 				      rb_pending_alloc);
1543 		}
1544 
1545 		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1546 
1547 		rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
1548 		if (!rxb)
1549 			goto out;
1550 
1551 		if (unlikely(join || rxq->next_rb_is_fragment)) {
1552 			rxq->next_rb_is_fragment = join;
1553 			/*
1554 			 * We can only get a multi-RB in the following cases:
1555 			 *  - firmware issue, sending a too big notification
1556 			 *  - sniffer mode with a large A-MSDU
1557 			 *  - large MTU frames (>2k)
1558 			 * since the multi-RB functionality is limited to newer
1559 			 * hardware that cannot put multiple entries into a
1560 			 * single RB.
1561 			 *
1562 			 * Right now, the higher layers aren't set up to deal
1563 			 * with that, so discard all of these.
1564 			 */
1565 			list_add_tail(&rxb->list, &rxq->rx_free);
1566 			rxq->free_count++;
1567 		} else {
1568 			iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1569 		}
1570 
1571 		i = (i + 1) & (rxq->queue_size - 1);
1572 
1573 		/*
1574 		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1575 		 * try to claim the pre-allocated buffers from the allocator.
1576 		 * If not ready - will try to reclaim next time.
1577 		 * There is no need to reschedule work - allocator exits only
1578 		 * on success
1579 		 */
1580 		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1581 			iwl_pcie_rx_allocator_get(trans, rxq);
1582 
1583 		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1584 			/* Add the remaining empty RBDs for allocator use */
1585 			iwl_pcie_rx_move_to_allocator(rxq, rba);
1586 		} else if (emergency) {
1587 			count++;
1588 			if (count == 8) {
1589 				count = 0;
1590 				if (rb_pending_alloc < rxq->queue_size / 3) {
1591 					IWL_DEBUG_TPT(trans,
1592 						      "RX path exited emergency. Pending allocations %d\n",
1593 						      rb_pending_alloc);
1594 					emergency = false;
1595 				}
1596 
1597 				rxq->read = i;
1598 				spin_unlock(&rxq->lock);
1599 				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1600 				iwl_pcie_rxq_restock(trans, rxq);
1601 				goto restart;
1602 			}
1603 		}
1604 	}
1605 out:
1606 	/* Backtrack one entry */
1607 	rxq->read = i;
1608 	spin_unlock(&rxq->lock);
1609 
1610 	/*
1611 	 * handle a case where in emergency there are some unallocated RBDs.
1612 	 * those RBDs are in the used list, but are not tracked by the queue's
1613 	 * used_count which counts allocator owned RBDs.
1614 	 * unallocated emergency RBDs must be allocated on exit, otherwise
1615 	 * when called again the function may not be in emergency mode and
1616 	 * they will be handed to the allocator with no tracking in the RBD
1617 	 * allocator counters, which will lead to them never being claimed back
1618 	 * by the queue.
1619 	 * by allocating them here, they are now in the queue free list, and
1620 	 * will be restocked by the next call of iwl_pcie_rxq_restock.
1621 	 */
1622 	if (unlikely(emergency && count))
1623 		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1624 
1625 	iwl_pcie_rxq_restock(trans, rxq);
1626 
1627 	return handled;
1628 }
1629 
1630 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1631 {
1632 	u8 queue = entry->entry;
1633 	struct msix_entry *entries = entry - queue;
1634 
1635 	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1636 }
1637 
1638 /*
1639  * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1640  * This interrupt handler should be used with RSS queue only.
1641  */
1642 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1643 {
1644 	struct msix_entry *entry = dev_id;
1645 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1646 	struct iwl_trans *trans = trans_pcie->trans;
1647 	struct iwl_rxq *rxq;
1648 
1649 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1650 
1651 	if (WARN_ON(entry->entry >= trans->num_rx_queues))
1652 		return IRQ_NONE;
1653 
1654 	if (!trans_pcie->rxq) {
1655 		if (net_ratelimit())
1656 			IWL_ERR(trans,
1657 				"[%d] Got MSI-X interrupt before we have Rx queues\n",
1658 				entry->entry);
1659 		return IRQ_NONE;
1660 	}
1661 
1662 	rxq = &trans_pcie->rxq[entry->entry];
1663 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1664 	IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
1665 
1666 	local_bh_disable();
1667 	if (napi_schedule_prep(&rxq->napi))
1668 		__napi_schedule(&rxq->napi);
1669 	else
1670 		iwl_pcie_clear_irq(trans, entry->entry);
1671 	local_bh_enable();
1672 
1673 	lock_map_release(&trans->sync_cmd_lockdep_map);
1674 
1675 	return IRQ_HANDLED;
1676 }
1677 
1678 /*
1679  * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1680  */
1681 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1682 {
1683 	int i;
1684 
1685 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1686 	if (trans->cfg->internal_wimax_coex &&
1687 	    !trans->cfg->apmg_not_supported &&
1688 	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1689 			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1690 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1691 			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1692 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1693 		iwl_op_mode_wimax_active(trans->op_mode);
1694 		wake_up(&trans->wait_command_queue);
1695 		return;
1696 	}
1697 
1698 	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1699 		if (!trans->txqs.txq[i])
1700 			continue;
1701 		del_timer(&trans->txqs.txq[i]->stuck_timer);
1702 	}
1703 
1704 	/* The STATUS_FW_ERROR bit is set in this function. This must happen
1705 	 * before we wake up the command caller, to ensure a proper cleanup. */
1706 	iwl_trans_fw_error(trans, false);
1707 
1708 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1709 	wake_up(&trans->wait_command_queue);
1710 }
1711 
1712 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1713 {
1714 	u32 inta;
1715 
1716 	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1717 
1718 	trace_iwlwifi_dev_irq(trans->dev);
1719 
1720 	/* Discover which interrupts are active/pending */
1721 	inta = iwl_read32(trans, CSR_INT);
1722 
1723 	/* the thread will service interrupts and re-enable them */
1724 	return inta;
1725 }
1726 
1727 /* a device (PCI-E) page is 4096 bytes long */
1728 #define ICT_SHIFT	12
1729 #define ICT_SIZE	(1 << ICT_SHIFT)
1730 #define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1731 
1732 /* interrupt handler using ict table, with this interrupt driver will
1733  * stop using INTA register to get device's interrupt, reading this register
1734  * is expensive, device will write interrupts in ICT dram table, increment
1735  * index then will fire interrupt to driver, driver will OR all ICT table
1736  * entries from current index up to table entry with 0 value. the result is
1737  * the interrupt we need to service, driver will set the entries back to 0 and
1738  * set index.
1739  */
1740 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1741 {
1742 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1743 	u32 inta;
1744 	u32 val = 0;
1745 	u32 read;
1746 
1747 	trace_iwlwifi_dev_irq(trans->dev);
1748 
1749 	/* Ignore interrupt if there's nothing in NIC to service.
1750 	 * This may be due to IRQ shared with another device,
1751 	 * or due to sporadic interrupts thrown from our NIC. */
1752 	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1753 	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1754 	if (!read)
1755 		return 0;
1756 
1757 	/*
1758 	 * Collect all entries up to the first 0, starting from ict_index;
1759 	 * note we already read at ict_index.
1760 	 */
1761 	do {
1762 		val |= read;
1763 		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1764 				trans_pcie->ict_index, read);
1765 		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1766 		trans_pcie->ict_index =
1767 			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1768 
1769 		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1770 		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1771 					   read);
1772 	} while (read);
1773 
1774 	/* We should not get this value, just ignore it. */
1775 	if (val == 0xffffffff)
1776 		val = 0;
1777 
1778 	/*
1779 	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1780 	 * (bit 15 before shifting it to 31) to clear when using interrupt
1781 	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1782 	 * so we use them to decide on the real state of the Rx bit.
1783 	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1784 	 */
1785 	if (val & 0xC0000)
1786 		val |= 0x8000;
1787 
1788 	inta = (0xff & val) | ((0xff00 & val) << 16);
1789 	return inta;
1790 }
1791 
1792 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1793 {
1794 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1795 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1796 	bool hw_rfkill, prev, report;
1797 
1798 	mutex_lock(&trans_pcie->mutex);
1799 	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1800 	hw_rfkill = iwl_is_rfkill_set(trans);
1801 	if (hw_rfkill) {
1802 		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1803 		set_bit(STATUS_RFKILL_HW, &trans->status);
1804 	}
1805 	if (trans_pcie->opmode_down)
1806 		report = hw_rfkill;
1807 	else
1808 		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1809 
1810 	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1811 		 hw_rfkill ? "disable radio" : "enable radio");
1812 
1813 	isr_stats->rfkill++;
1814 
1815 	if (prev != report)
1816 		iwl_trans_pcie_rf_kill(trans, report);
1817 	mutex_unlock(&trans_pcie->mutex);
1818 
1819 	if (hw_rfkill) {
1820 		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1821 				       &trans->status))
1822 			IWL_DEBUG_RF_KILL(trans,
1823 					  "Rfkill while SYNC HCMD in flight\n");
1824 		wake_up(&trans->wait_command_queue);
1825 	} else {
1826 		clear_bit(STATUS_RFKILL_HW, &trans->status);
1827 		if (trans_pcie->opmode_down)
1828 			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1829 	}
1830 }
1831 
1832 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1833 {
1834 	struct iwl_trans *trans = dev_id;
1835 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1836 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1837 	u32 inta = 0;
1838 	u32 handled = 0;
1839 	bool polling = false;
1840 
1841 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1842 
1843 	spin_lock_bh(&trans_pcie->irq_lock);
1844 
1845 	/* dram interrupt table not set yet,
1846 	 * use legacy interrupt.
1847 	 */
1848 	if (likely(trans_pcie->use_ict))
1849 		inta = iwl_pcie_int_cause_ict(trans);
1850 	else
1851 		inta = iwl_pcie_int_cause_non_ict(trans);
1852 
1853 #ifdef CONFIG_IWLWIFI_DEBUG
1854 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1855 		IWL_DEBUG_ISR(trans,
1856 			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1857 			      inta, trans_pcie->inta_mask,
1858 			      iwl_read32(trans, CSR_INT_MASK),
1859 			      iwl_read32(trans, CSR_FH_INT_STATUS));
1860 		if (inta & (~trans_pcie->inta_mask))
1861 			IWL_DEBUG_ISR(trans,
1862 				      "We got a masked interrupt (0x%08x)\n",
1863 				      inta & (~trans_pcie->inta_mask));
1864 	}
1865 #endif
1866 
1867 	inta &= trans_pcie->inta_mask;
1868 
1869 	/*
1870 	 * Ignore interrupt if there's nothing in NIC to service.
1871 	 * This may be due to IRQ shared with another device,
1872 	 * or due to sporadic interrupts thrown from our NIC.
1873 	 */
1874 	if (unlikely(!inta)) {
1875 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1876 		/*
1877 		 * Re-enable interrupts here since we don't
1878 		 * have anything to service
1879 		 */
1880 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1881 			_iwl_enable_interrupts(trans);
1882 		spin_unlock_bh(&trans_pcie->irq_lock);
1883 		lock_map_release(&trans->sync_cmd_lockdep_map);
1884 		return IRQ_NONE;
1885 	}
1886 
1887 	if (unlikely(inta == 0xFFFFFFFF || iwl_trans_is_hw_error_value(inta))) {
1888 		/*
1889 		 * Hardware disappeared. It might have
1890 		 * already raised an interrupt.
1891 		 */
1892 		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1893 		spin_unlock_bh(&trans_pcie->irq_lock);
1894 		goto out;
1895 	}
1896 
1897 	/* Ack/clear/reset pending uCode interrupts.
1898 	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1899 	 */
1900 	/* There is a hardware bug in the interrupt mask function that some
1901 	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1902 	 * they are disabled in the CSR_INT_MASK register. Furthermore the
1903 	 * ICT interrupt handling mechanism has another bug that might cause
1904 	 * these unmasked interrupts fail to be detected. We workaround the
1905 	 * hardware bugs here by ACKing all the possible interrupts so that
1906 	 * interrupt coalescing can still be achieved.
1907 	 */
1908 	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1909 
1910 #ifdef CONFIG_IWLWIFI_DEBUG
1911 	if (iwl_have_debug_level(IWL_DL_ISR))
1912 		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1913 			      inta, iwl_read32(trans, CSR_INT_MASK));
1914 #endif
1915 
1916 	spin_unlock_bh(&trans_pcie->irq_lock);
1917 
1918 	/* Now service all interrupt bits discovered above. */
1919 	if (inta & CSR_INT_BIT_HW_ERR) {
1920 		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1921 
1922 		/* Tell the device to stop sending interrupts */
1923 		iwl_disable_interrupts(trans);
1924 
1925 		isr_stats->hw++;
1926 		iwl_pcie_irq_handle_error(trans);
1927 
1928 		handled |= CSR_INT_BIT_HW_ERR;
1929 
1930 		goto out;
1931 	}
1932 
1933 	/* NIC fires this, but we don't use it, redundant with WAKEUP */
1934 	if (inta & CSR_INT_BIT_SCD) {
1935 		IWL_DEBUG_ISR(trans,
1936 			      "Scheduler finished to transmit the frame/frames.\n");
1937 		isr_stats->sch++;
1938 	}
1939 
1940 	/* Alive notification via Rx interrupt will do the real work */
1941 	if (inta & CSR_INT_BIT_ALIVE) {
1942 		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1943 		isr_stats->alive++;
1944 		if (trans->trans_cfg->gen2) {
1945 			/*
1946 			 * We can restock, since firmware configured
1947 			 * the RFH
1948 			 */
1949 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1950 		}
1951 
1952 		handled |= CSR_INT_BIT_ALIVE;
1953 	}
1954 
1955 	/* Safely ignore these bits for debug checks below */
1956 	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1957 
1958 	/* HW RF KILL switch toggled */
1959 	if (inta & CSR_INT_BIT_RF_KILL) {
1960 		iwl_pcie_handle_rfkill_irq(trans);
1961 		handled |= CSR_INT_BIT_RF_KILL;
1962 	}
1963 
1964 	/* Chip got too hot and stopped itself */
1965 	if (inta & CSR_INT_BIT_CT_KILL) {
1966 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1967 		isr_stats->ctkill++;
1968 		handled |= CSR_INT_BIT_CT_KILL;
1969 	}
1970 
1971 	/* Error detected by uCode */
1972 	if (inta & CSR_INT_BIT_SW_ERR) {
1973 		IWL_ERR(trans, "Microcode SW error detected. "
1974 			" Restarting 0x%X.\n", inta);
1975 		isr_stats->sw++;
1976 		iwl_pcie_irq_handle_error(trans);
1977 		handled |= CSR_INT_BIT_SW_ERR;
1978 	}
1979 
1980 	/* uCode wakes up after power-down sleep */
1981 	if (inta & CSR_INT_BIT_WAKEUP) {
1982 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1983 		iwl_pcie_rxq_check_wrptr(trans);
1984 		iwl_pcie_txq_check_wrptrs(trans);
1985 
1986 		isr_stats->wakeup++;
1987 
1988 		handled |= CSR_INT_BIT_WAKEUP;
1989 	}
1990 
1991 	/* All uCode command responses, including Tx command responses,
1992 	 * Rx "responses" (frame-received notification), and other
1993 	 * notifications from uCode come through here*/
1994 	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1995 		    CSR_INT_BIT_RX_PERIODIC)) {
1996 		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1997 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1998 			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1999 			iwl_write32(trans, CSR_FH_INT_STATUS,
2000 					CSR_FH_INT_RX_MASK);
2001 		}
2002 		if (inta & CSR_INT_BIT_RX_PERIODIC) {
2003 			handled |= CSR_INT_BIT_RX_PERIODIC;
2004 			iwl_write32(trans,
2005 				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
2006 		}
2007 		/* Sending RX interrupt require many steps to be done in the
2008 		 * device:
2009 		 * 1- write interrupt to current index in ICT table.
2010 		 * 2- dma RX frame.
2011 		 * 3- update RX shared data to indicate last write index.
2012 		 * 4- send interrupt.
2013 		 * This could lead to RX race, driver could receive RX interrupt
2014 		 * but the shared data changes does not reflect this;
2015 		 * periodic interrupt will detect any dangling Rx activity.
2016 		 */
2017 
2018 		/* Disable periodic interrupt; we use it as just a one-shot. */
2019 		iwl_write8(trans, CSR_INT_PERIODIC_REG,
2020 			    CSR_INT_PERIODIC_DIS);
2021 
2022 		/*
2023 		 * Enable periodic interrupt in 8 msec only if we received
2024 		 * real RX interrupt (instead of just periodic int), to catch
2025 		 * any dangling Rx interrupt.  If it was just the periodic
2026 		 * interrupt, there was no dangling Rx activity, and no need
2027 		 * to extend the periodic interrupt; one-shot is enough.
2028 		 */
2029 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
2030 			iwl_write8(trans, CSR_INT_PERIODIC_REG,
2031 				   CSR_INT_PERIODIC_ENA);
2032 
2033 		isr_stats->rx++;
2034 
2035 		local_bh_disable();
2036 		if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2037 			polling = true;
2038 			__napi_schedule(&trans_pcie->rxq[0].napi);
2039 		}
2040 		local_bh_enable();
2041 	}
2042 
2043 	/* This "Tx" DMA channel is used only for loading uCode */
2044 	if (inta & CSR_INT_BIT_FH_TX) {
2045 		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
2046 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2047 		isr_stats->tx++;
2048 		handled |= CSR_INT_BIT_FH_TX;
2049 		/* Wake up uCode load routine, now that load is complete */
2050 		trans_pcie->ucode_write_complete = true;
2051 		wake_up(&trans_pcie->ucode_write_waitq);
2052 		/* Wake up IMR write routine, now that write to SRAM is complete */
2053 		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2054 			trans_pcie->imr_status = IMR_D2S_COMPLETED;
2055 			wake_up(&trans_pcie->ucode_write_waitq);
2056 		}
2057 	}
2058 
2059 	if (inta & ~handled) {
2060 		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
2061 		isr_stats->unhandled++;
2062 	}
2063 
2064 	if (inta & ~(trans_pcie->inta_mask)) {
2065 		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
2066 			 inta & ~trans_pcie->inta_mask);
2067 	}
2068 
2069 	if (!polling) {
2070 		spin_lock_bh(&trans_pcie->irq_lock);
2071 		/* only Re-enable all interrupt if disabled by irq */
2072 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
2073 			_iwl_enable_interrupts(trans);
2074 		/* we are loading the firmware, enable FH_TX interrupt only */
2075 		else if (handled & CSR_INT_BIT_FH_TX)
2076 			iwl_enable_fw_load_int(trans);
2077 		/* Re-enable RF_KILL if it occurred */
2078 		else if (handled & CSR_INT_BIT_RF_KILL)
2079 			iwl_enable_rfkill_int(trans);
2080 		/* Re-enable the ALIVE / Rx interrupt if it occurred */
2081 		else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
2082 			iwl_enable_fw_load_int_ctx_info(trans);
2083 		spin_unlock_bh(&trans_pcie->irq_lock);
2084 	}
2085 
2086 out:
2087 	lock_map_release(&trans->sync_cmd_lockdep_map);
2088 	return IRQ_HANDLED;
2089 }
2090 
2091 /******************************************************************************
2092  *
2093  * ICT functions
2094  *
2095  ******************************************************************************/
2096 
2097 /* Free dram table */
2098 void iwl_pcie_free_ict(struct iwl_trans *trans)
2099 {
2100 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2101 
2102 	if (trans_pcie->ict_tbl) {
2103 		dma_free_coherent(trans->dev, ICT_SIZE,
2104 				  trans_pcie->ict_tbl,
2105 				  trans_pcie->ict_tbl_dma);
2106 		trans_pcie->ict_tbl = NULL;
2107 		trans_pcie->ict_tbl_dma = 0;
2108 	}
2109 }
2110 
2111 /*
2112  * allocate dram shared table, it is an aligned memory
2113  * block of ICT_SIZE.
2114  * also reset all data related to ICT table interrupt.
2115  */
2116 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2117 {
2118 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2119 
2120 	trans_pcie->ict_tbl =
2121 		dma_alloc_coherent(trans->dev, ICT_SIZE,
2122 				   &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2123 	if (!trans_pcie->ict_tbl)
2124 		return -ENOMEM;
2125 
2126 	/* just an API sanity check ... it is guaranteed to be aligned */
2127 	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2128 		iwl_pcie_free_ict(trans);
2129 		return -EINVAL;
2130 	}
2131 
2132 	return 0;
2133 }
2134 
2135 /* Device is going up inform it about using ICT interrupt table,
2136  * also we need to tell the driver to start using ICT interrupt.
2137  */
2138 void iwl_pcie_reset_ict(struct iwl_trans *trans)
2139 {
2140 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2141 	u32 val;
2142 
2143 	if (!trans_pcie->ict_tbl)
2144 		return;
2145 
2146 	spin_lock_bh(&trans_pcie->irq_lock);
2147 	_iwl_disable_interrupts(trans);
2148 
2149 	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2150 
2151 	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2152 
2153 	val |= CSR_DRAM_INT_TBL_ENABLE |
2154 	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
2155 	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
2156 
2157 	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2158 
2159 	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2160 	trans_pcie->use_ict = true;
2161 	trans_pcie->ict_index = 0;
2162 	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2163 	_iwl_enable_interrupts(trans);
2164 	spin_unlock_bh(&trans_pcie->irq_lock);
2165 }
2166 
2167 /* Device is going down disable ict interrupt usage */
2168 void iwl_pcie_disable_ict(struct iwl_trans *trans)
2169 {
2170 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2171 
2172 	spin_lock_bh(&trans_pcie->irq_lock);
2173 	trans_pcie->use_ict = false;
2174 	spin_unlock_bh(&trans_pcie->irq_lock);
2175 }
2176 
2177 irqreturn_t iwl_pcie_isr(int irq, void *data)
2178 {
2179 	struct iwl_trans *trans = data;
2180 
2181 	if (!trans)
2182 		return IRQ_NONE;
2183 
2184 	/* Disable (but don't clear!) interrupts here to avoid
2185 	 * back-to-back ISRs and sporadic interrupts from our NIC.
2186 	 * If we have something to service, the tasklet will re-enable ints.
2187 	 * If we *don't* have something, we'll re-enable before leaving here.
2188 	 */
2189 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2190 
2191 	return IRQ_WAKE_THREAD;
2192 }
2193 
2194 irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2195 {
2196 	return IRQ_WAKE_THREAD;
2197 }
2198 
2199 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2200 {
2201 	struct msix_entry *entry = dev_id;
2202 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2203 	struct iwl_trans *trans = trans_pcie->trans;
2204 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2205 	u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;
2206 	u32 inta_fh, inta_hw;
2207 	bool polling = false;
2208 	bool sw_err;
2209 
2210 	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
2211 		inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;
2212 
2213 	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
2214 		inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1;
2215 
2216 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
2217 
2218 	spin_lock_bh(&trans_pcie->irq_lock);
2219 	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2220 	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2221 	/*
2222 	 * Clear causes registers to avoid being handling the same cause.
2223 	 */
2224 	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);
2225 	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2226 	spin_unlock_bh(&trans_pcie->irq_lock);
2227 
2228 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2229 
2230 	if (unlikely(!(inta_fh | inta_hw))) {
2231 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2232 		lock_map_release(&trans->sync_cmd_lockdep_map);
2233 		return IRQ_NONE;
2234 	}
2235 
2236 #ifdef CONFIG_IWLWIFI_DEBUG
2237 	if (iwl_have_debug_level(IWL_DL_ISR)) {
2238 		IWL_DEBUG_ISR(trans,
2239 			      "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2240 			      entry->entry, inta_fh, trans_pcie->fh_mask,
2241 			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2242 		if (inta_fh & ~trans_pcie->fh_mask)
2243 			IWL_DEBUG_ISR(trans,
2244 				      "We got a masked interrupt (0x%08x)\n",
2245 				      inta_fh & ~trans_pcie->fh_mask);
2246 	}
2247 #endif
2248 
2249 	inta_fh &= trans_pcie->fh_mask;
2250 
2251 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2252 	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2253 		local_bh_disable();
2254 		if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2255 			polling = true;
2256 			__napi_schedule(&trans_pcie->rxq[0].napi);
2257 		}
2258 		local_bh_enable();
2259 	}
2260 
2261 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2262 	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2263 		local_bh_disable();
2264 		if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
2265 			polling = true;
2266 			__napi_schedule(&trans_pcie->rxq[1].napi);
2267 		}
2268 		local_bh_enable();
2269 	}
2270 
2271 	/* This "Tx" DMA channel is used only for loading uCode */
2272 	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
2273 	    trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2274 		IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
2275 		isr_stats->tx++;
2276 
2277 		/* Wake up IMR routine once write to SRAM is complete */
2278 		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2279 			trans_pcie->imr_status = IMR_D2S_COMPLETED;
2280 			wake_up(&trans_pcie->ucode_write_waitq);
2281 		}
2282 	} else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2283 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2284 		isr_stats->tx++;
2285 		/*
2286 		 * Wake up uCode load routine,
2287 		 * now that load is complete
2288 		 */
2289 		trans_pcie->ucode_write_complete = true;
2290 		wake_up(&trans_pcie->ucode_write_waitq);
2291 
2292 		/* Wake up IMR routine once write to SRAM is complete */
2293 		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2294 			trans_pcie->imr_status = IMR_D2S_COMPLETED;
2295 			wake_up(&trans_pcie->ucode_write_waitq);
2296 		}
2297 	}
2298 
2299 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2300 		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
2301 	else
2302 		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
2303 
2304 	/* Error detected by uCode */
2305 	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {
2306 		IWL_ERR(trans,
2307 			"Microcode SW error detected. Restarting 0x%X.\n",
2308 			inta_fh);
2309 		isr_stats->sw++;
2310 		/* during FW reset flow report errors from there */
2311 		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2312 			trans_pcie->imr_status = IMR_D2S_ERROR;
2313 			wake_up(&trans_pcie->imr_waitq);
2314 		} else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
2315 			trans_pcie->fw_reset_state = FW_RESET_ERROR;
2316 			wake_up(&trans_pcie->fw_reset_waitq);
2317 		} else {
2318 			iwl_pcie_irq_handle_error(trans);
2319 		}
2320 	}
2321 
2322 	/* After checking FH register check HW register */
2323 #ifdef CONFIG_IWLWIFI_DEBUG
2324 	if (iwl_have_debug_level(IWL_DL_ISR)) {
2325 		IWL_DEBUG_ISR(trans,
2326 			      "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2327 			      entry->entry, inta_hw, trans_pcie->hw_mask,
2328 			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2329 		if (inta_hw & ~trans_pcie->hw_mask)
2330 			IWL_DEBUG_ISR(trans,
2331 				      "We got a masked interrupt 0x%08x\n",
2332 				      inta_hw & ~trans_pcie->hw_mask);
2333 	}
2334 #endif
2335 
2336 	inta_hw &= trans_pcie->hw_mask;
2337 
2338 	/* Alive notification via Rx interrupt will do the real work */
2339 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2340 		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2341 		isr_stats->alive++;
2342 		if (trans->trans_cfg->gen2) {
2343 			/* We can restock, since firmware configured the RFH */
2344 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2345 		}
2346 	}
2347 
2348 	/*
2349 	 * In some rare cases when the HW is in a bad state, we may
2350 	 * get this interrupt too early, when prph_info is still NULL.
2351 	 * So make sure that it's not NULL to prevent crashing.
2352 	 */
2353 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
2354 		u32 sleep_notif =
2355 			le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2356 		if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
2357 		    sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
2358 			IWL_DEBUG_ISR(trans,
2359 				      "Sx interrupt: sleep notification = 0x%x\n",
2360 				      sleep_notif);
2361 			trans_pcie->sx_complete = true;
2362 			wake_up(&trans_pcie->sx_waitq);
2363 		} else {
2364 			/* uCode wakes up after power-down sleep */
2365 			IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2366 			iwl_pcie_rxq_check_wrptr(trans);
2367 			iwl_pcie_txq_check_wrptrs(trans);
2368 
2369 			isr_stats->wakeup++;
2370 		}
2371 	}
2372 
2373 	/* Chip got too hot and stopped itself */
2374 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2375 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
2376 		isr_stats->ctkill++;
2377 	}
2378 
2379 	/* HW RF KILL switch toggled */
2380 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2381 		iwl_pcie_handle_rfkill_irq(trans);
2382 
2383 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2384 		IWL_ERR(trans,
2385 			"Hardware error detected. Restarting.\n");
2386 
2387 		isr_stats->hw++;
2388 		trans->dbg.hw_error = true;
2389 		iwl_pcie_irq_handle_error(trans);
2390 	}
2391 
2392 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) {
2393 		IWL_DEBUG_ISR(trans, "Reset flow completed\n");
2394 		trans_pcie->fw_reset_state = FW_RESET_OK;
2395 		wake_up(&trans_pcie->fw_reset_waitq);
2396 	}
2397 
2398 	if (!polling)
2399 		iwl_pcie_clear_irq(trans, entry->entry);
2400 
2401 	lock_map_release(&trans->sync_cmd_lockdep_map);
2402 
2403 	return IRQ_HANDLED;
2404 }
2405