xref: /freebsd/sys/contrib/dev/iwlwifi/pcie/tx.c (revision 9af1bba4)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2003-2014, 2018-2021, 2023 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #include <linux/etherdevice.h>
8 #include <linux/ieee80211.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #ifdef CONFIG_INET
12 #include <net/ip6_checksum.h>
13 #include <net/tso.h>
14 #endif
15 #if defined(__FreeBSD__)
16 #include <net/mac80211.h>
17 #endif
18 
19 #include "iwl-debug.h"
20 #include "iwl-csr.h"
21 #include "iwl-prph.h"
22 #include "iwl-io.h"
23 #include "iwl-scd.h"
24 #include "iwl-op-mode.h"
25 #include "internal.h"
26 #include "fw/api/tx.h"
27 
28 /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
29  * DMA services
30  *
31  * Theory of operation
32  *
33  * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
34  * of buffer descriptors, each of which points to one or more data buffers for
35  * the device to read from or fill.  Driver and device exchange status of each
36  * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
37  * entries in each circular buffer, to protect against confusing empty and full
38  * queue states.
39  *
40  * The device reads or writes the data in the queues via the device's several
41  * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
42  *
43  * For Tx queue, there are low mark and high mark limits. If, after queuing
44  * the packet for Tx, free space become < low mark, Tx queue stopped. When
45  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
46  * Tx queue resumed.
47  *
48  ***************************************************/
49 
50 
iwl_pcie_alloc_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr,size_t size)51 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
52 			   struct iwl_dma_ptr *ptr, size_t size)
53 {
54 	if (WARN_ON(ptr->addr))
55 		return -EINVAL;
56 
57 	ptr->addr = dma_alloc_coherent(trans->dev, size,
58 				       &ptr->dma, GFP_KERNEL);
59 	if (!ptr->addr)
60 		return -ENOMEM;
61 	ptr->size = size;
62 	return 0;
63 }
64 
iwl_pcie_free_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr)65 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
66 {
67 	if (unlikely(!ptr->addr))
68 		return;
69 
70 	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
71 	memset(ptr, 0, sizeof(*ptr));
72 }
73 
74 /*
75  * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
76  */
iwl_pcie_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)77 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
78 				    struct iwl_txq *txq)
79 {
80 	u32 reg = 0;
81 	int txq_id = txq->id;
82 
83 	lockdep_assert_held(&txq->lock);
84 
85 	/*
86 	 * explicitly wake up the NIC if:
87 	 * 1. shadow registers aren't enabled
88 	 * 2. NIC is woken up for CMD regardless of shadow outside this function
89 	 * 3. there is a chance that the NIC is asleep
90 	 */
91 	if (!trans->trans_cfg->base_params->shadow_reg_enable &&
92 	    txq_id != trans->txqs.cmd.q_id &&
93 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
94 		/*
95 		 * wake up nic if it's powered down ...
96 		 * uCode will wake up, and interrupt us again, so next
97 		 * time we'll skip this part.
98 		 */
99 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
100 
101 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
102 			IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
103 				       txq_id, reg);
104 			iwl_set_bit(trans, CSR_GP_CNTRL,
105 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
106 			txq->need_update = true;
107 			return;
108 		}
109 	}
110 
111 	/*
112 	 * if not in power-save mode, uCode will never sleep when we're
113 	 * trying to tx (during RFKILL, we're not trying to tx).
114 	 */
115 	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
116 	if (!txq->block)
117 		iwl_write32(trans, HBUS_TARG_WRPTR,
118 			    txq->write_ptr | (txq_id << 8));
119 }
120 
iwl_pcie_txq_check_wrptrs(struct iwl_trans * trans)121 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
122 {
123 	int i;
124 
125 	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
126 		struct iwl_txq *txq = trans->txqs.txq[i];
127 
128 		if (!test_bit(i, trans->txqs.queue_used))
129 			continue;
130 
131 		spin_lock_bh(&txq->lock);
132 		if (txq->need_update) {
133 			iwl_pcie_txq_inc_wr_ptr(trans, txq);
134 			txq->need_update = false;
135 		}
136 		spin_unlock_bh(&txq->lock);
137 	}
138 }
139 
iwl_pcie_tfd_set_tb(struct iwl_trans * trans,void * tfd,u8 idx,dma_addr_t addr,u16 len)140 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
141 				       u8 idx, dma_addr_t addr, u16 len)
142 {
143 	struct iwl_tfd *tfd_fh = (void *)tfd;
144 	struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
145 
146 	u16 hi_n_len = len << 4;
147 
148 	put_unaligned_le32(addr, &tb->lo);
149 	hi_n_len |= iwl_get_dma_hi_addr(addr);
150 
151 	tb->hi_n_len = cpu_to_le16(hi_n_len);
152 
153 	tfd_fh->num_tbs = idx + 1;
154 }
155 
iwl_pcie_txq_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,dma_addr_t addr,u16 len,bool reset)156 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
157 				  dma_addr_t addr, u16 len, bool reset)
158 {
159 	void *tfd;
160 	u32 num_tbs;
161 
162 	tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
163 
164 	if (reset)
165 		memset(tfd, 0, trans->txqs.tfd.size);
166 
167 	num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
168 
169 	/* Each TFD can point to a maximum max_tbs Tx buffers */
170 	if (num_tbs >= trans->txqs.tfd.max_tbs) {
171 		IWL_ERR(trans, "Error can not send more than %d chunks\n",
172 			trans->txqs.tfd.max_tbs);
173 		return -EINVAL;
174 	}
175 
176 	if (WARN(addr & ~IWL_TX_DMA_MASK,
177 		 "Unaligned address = %llx\n", (unsigned long long)addr))
178 		return -EINVAL;
179 
180 	iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
181 
182 	return num_tbs;
183 }
184 
iwl_pcie_clear_cmd_in_flight(struct iwl_trans * trans)185 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
186 {
187 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
188 
189 	if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
190 		return;
191 
192 	spin_lock(&trans_pcie->reg_lock);
193 
194 	if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {
195 		spin_unlock(&trans_pcie->reg_lock);
196 		return;
197 	}
198 
199 	trans_pcie->cmd_hold_nic_awake = false;
200 	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
201 				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
202 	spin_unlock(&trans_pcie->reg_lock);
203 }
204 
205 /*
206  * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
207  */
iwl_pcie_txq_unmap(struct iwl_trans * trans,int txq_id)208 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
209 {
210 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
211 
212 	if (!txq) {
213 		IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
214 		return;
215 	}
216 
217 	spin_lock_bh(&txq->lock);
218 	while (txq->write_ptr != txq->read_ptr) {
219 		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
220 				   txq_id, txq->read_ptr);
221 
222 		if (txq_id != trans->txqs.cmd.q_id) {
223 			struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
224 
225 			if (WARN_ON_ONCE(!skb))
226 				continue;
227 
228 			iwl_txq_free_tso_page(trans, skb);
229 		}
230 		iwl_txq_free_tfd(trans, txq);
231 		txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
232 
233 		if (txq->read_ptr == txq->write_ptr &&
234 		    txq_id == trans->txqs.cmd.q_id)
235 			iwl_pcie_clear_cmd_in_flight(trans);
236 	}
237 
238 	while (!skb_queue_empty(&txq->overflow_q)) {
239 		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
240 
241 		iwl_op_mode_free_skb(trans->op_mode, skb);
242 	}
243 
244 	spin_unlock_bh(&txq->lock);
245 
246 	/* just in case - this queue may have been stopped */
247 	iwl_wake_queue(trans, txq);
248 }
249 
250 /*
251  * iwl_pcie_txq_free - Deallocate DMA queue.
252  * @txq: Transmit queue to deallocate.
253  *
254  * Empty queue by removing and destroying all BD's.
255  * Free all buffers.
256  * 0-fill, but do not free "txq" descriptor structure.
257  */
iwl_pcie_txq_free(struct iwl_trans * trans,int txq_id)258 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
259 {
260 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
261 	struct device *dev = trans->dev;
262 	int i;
263 
264 	if (WARN_ON(!txq))
265 		return;
266 
267 	iwl_pcie_txq_unmap(trans, txq_id);
268 
269 	/* De-alloc array of command/tx buffers */
270 	if (txq_id == trans->txqs.cmd.q_id)
271 		for (i = 0; i < txq->n_window; i++) {
272 			kfree_sensitive(txq->entries[i].cmd);
273 			kfree_sensitive(txq->entries[i].free_buf);
274 		}
275 
276 	/* De-alloc circular buffer of TFDs */
277 	if (txq->tfds) {
278 		dma_free_coherent(dev,
279 				  trans->txqs.tfd.size *
280 				  trans->trans_cfg->base_params->max_tfd_queue_size,
281 				  txq->tfds, txq->dma_addr);
282 		txq->dma_addr = 0;
283 		txq->tfds = NULL;
284 
285 		dma_free_coherent(dev,
286 				  sizeof(*txq->first_tb_bufs) * txq->n_window,
287 				  txq->first_tb_bufs, txq->first_tb_dma);
288 	}
289 
290 	kfree(txq->entries);
291 	txq->entries = NULL;
292 
293 	del_timer_sync(&txq->stuck_timer);
294 
295 	/* 0-fill queue descriptor structure */
296 	memset(txq, 0, sizeof(*txq));
297 }
298 
iwl_pcie_tx_start(struct iwl_trans * trans,u32 scd_base_addr)299 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
300 {
301 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
302 	int nq = trans->trans_cfg->base_params->num_of_queues;
303 	int chan;
304 	u32 reg_val;
305 	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
306 				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
307 
308 	/* make sure all queue are not stopped/used */
309 	memset(trans->txqs.queue_stopped, 0,
310 	       sizeof(trans->txqs.queue_stopped));
311 	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
312 
313 	trans_pcie->scd_base_addr =
314 		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
315 
316 	WARN_ON(scd_base_addr != 0 &&
317 		scd_base_addr != trans_pcie->scd_base_addr);
318 
319 	/* reset context data, TX status and translation data */
320 	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
321 				   SCD_CONTEXT_MEM_LOWER_BOUND,
322 			    NULL, clear_dwords);
323 
324 	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
325 		       trans->txqs.scd_bc_tbls.dma >> 10);
326 
327 	/* The chain extension of the SCD doesn't work well. This feature is
328 	 * enabled by default by the HW, so we need to disable it manually.
329 	 */
330 	if (trans->trans_cfg->base_params->scd_chain_ext_wa)
331 		iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
332 
333 	iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
334 				trans->txqs.cmd.fifo,
335 				trans->txqs.cmd.wdg_timeout);
336 
337 	/* Activate all Tx DMA/FIFO channels */
338 	iwl_scd_activate_fifos(trans);
339 
340 	/* Enable DMA channel */
341 	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
342 		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
343 				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
344 				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
345 
346 	/* Update FH chicken bits */
347 	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
348 	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
349 			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
350 
351 	/* Enable L1-Active */
352 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
353 		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
354 				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
355 }
356 
iwl_trans_pcie_tx_reset(struct iwl_trans * trans)357 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
358 {
359 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
360 	int txq_id;
361 
362 	/*
363 	 * we should never get here in gen2 trans mode return early to avoid
364 	 * having invalid accesses
365 	 */
366 	if (WARN_ON_ONCE(trans->trans_cfg->gen2))
367 		return;
368 
369 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
370 	     txq_id++) {
371 		struct iwl_txq *txq = trans->txqs.txq[txq_id];
372 		if (trans->trans_cfg->gen2)
373 			iwl_write_direct64(trans,
374 					   FH_MEM_CBBC_QUEUE(trans, txq_id),
375 					   txq->dma_addr);
376 		else
377 			iwl_write_direct32(trans,
378 					   FH_MEM_CBBC_QUEUE(trans, txq_id),
379 					   txq->dma_addr >> 8);
380 		iwl_pcie_txq_unmap(trans, txq_id);
381 		txq->read_ptr = 0;
382 		txq->write_ptr = 0;
383 	}
384 
385 	/* Tell NIC where to find the "keep warm" buffer */
386 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
387 			   trans_pcie->kw.dma >> 4);
388 
389 	/*
390 	 * Send 0 as the scd_base_addr since the device may have be reset
391 	 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
392 	 * contain garbage.
393 	 */
394 	iwl_pcie_tx_start(trans, 0);
395 }
396 
iwl_pcie_tx_stop_fh(struct iwl_trans * trans)397 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
398 {
399 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
400 	int ch, ret;
401 	u32 mask = 0;
402 
403 	spin_lock_bh(&trans_pcie->irq_lock);
404 
405 	if (!iwl_trans_grab_nic_access(trans))
406 		goto out;
407 
408 	/* Stop each Tx DMA channel */
409 	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
410 		iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
411 		mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
412 	}
413 
414 	/* Wait for DMA channels to be idle */
415 	ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
416 	if (ret < 0)
417 		IWL_ERR(trans,
418 			"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
419 			ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
420 
421 	iwl_trans_release_nic_access(trans);
422 
423 out:
424 	spin_unlock_bh(&trans_pcie->irq_lock);
425 }
426 
427 /*
428  * iwl_pcie_tx_stop - Stop all Tx DMA channels
429  */
iwl_pcie_tx_stop(struct iwl_trans * trans)430 int iwl_pcie_tx_stop(struct iwl_trans *trans)
431 {
432 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
433 	int txq_id;
434 
435 	/* Turn off all Tx DMA fifos */
436 	iwl_scd_deactivate_fifos(trans);
437 
438 	/* Turn off all Tx DMA channels */
439 	iwl_pcie_tx_stop_fh(trans);
440 
441 	/*
442 	 * This function can be called before the op_mode disabled the
443 	 * queues. This happens when we have an rfkill interrupt.
444 	 * Since we stop Tx altogether - mark the queues as stopped.
445 	 */
446 	memset(trans->txqs.queue_stopped, 0,
447 	       sizeof(trans->txqs.queue_stopped));
448 	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
449 
450 	/* This can happen: start_hw, stop_device */
451 	if (!trans_pcie->txq_memory)
452 		return 0;
453 
454 	/* Unmap DMA from host system and free skb's */
455 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
456 	     txq_id++)
457 		iwl_pcie_txq_unmap(trans, txq_id);
458 
459 	return 0;
460 }
461 
462 /*
463  * iwl_trans_tx_free - Free TXQ Context
464  *
465  * Destroy all TX DMA queues and structures
466  */
iwl_pcie_tx_free(struct iwl_trans * trans)467 void iwl_pcie_tx_free(struct iwl_trans *trans)
468 {
469 	int txq_id;
470 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
471 
472 	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
473 
474 	/* Tx queues */
475 	if (trans_pcie->txq_memory) {
476 		for (txq_id = 0;
477 		     txq_id < trans->trans_cfg->base_params->num_of_queues;
478 		     txq_id++) {
479 			iwl_pcie_txq_free(trans, txq_id);
480 			trans->txqs.txq[txq_id] = NULL;
481 		}
482 	}
483 
484 	kfree(trans_pcie->txq_memory);
485 	trans_pcie->txq_memory = NULL;
486 
487 	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
488 
489 	iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls);
490 }
491 
492 /*
493  * iwl_pcie_tx_alloc - allocate TX context
494  * Allocate all Tx DMA structures and initialize them
495  */
iwl_pcie_tx_alloc(struct iwl_trans * trans)496 static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
497 {
498 	int ret;
499 	int txq_id, slots_num;
500 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
501 	u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
502 
503 	if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
504 		return -EINVAL;
505 
506 	bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
507 
508 	/*It is not allowed to alloc twice, so warn when this happens.
509 	 * We cannot rely on the previous allocation, so free and fail */
510 	if (WARN_ON(trans_pcie->txq_memory)) {
511 		ret = -EINVAL;
512 		goto error;
513 	}
514 
515 	ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls,
516 				     bc_tbls_size);
517 	if (ret) {
518 		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
519 		goto error;
520 	}
521 
522 	/* Alloc keep-warm buffer */
523 	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
524 	if (ret) {
525 		IWL_ERR(trans, "Keep Warm allocation failed\n");
526 		goto error;
527 	}
528 
529 	trans_pcie->txq_memory =
530 		kcalloc(trans->trans_cfg->base_params->num_of_queues,
531 			sizeof(struct iwl_txq), GFP_KERNEL);
532 	if (!trans_pcie->txq_memory) {
533 		IWL_ERR(trans, "Not enough memory for txq\n");
534 		ret = -ENOMEM;
535 		goto error;
536 	}
537 
538 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
539 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
540 	     txq_id++) {
541 		bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
542 
543 		if (cmd_queue)
544 			slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
545 					  trans->cfg->min_txq_size);
546 		else
547 			slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
548 					  trans->cfg->min_ba_txq_size);
549 		trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
550 		ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
551 				    cmd_queue);
552 		if (ret) {
553 			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
554 			goto error;
555 		}
556 		trans->txqs.txq[txq_id]->id = txq_id;
557 	}
558 
559 	return 0;
560 
561 error:
562 	iwl_pcie_tx_free(trans);
563 
564 	return ret;
565 }
566 
iwl_pcie_tx_init(struct iwl_trans * trans)567 int iwl_pcie_tx_init(struct iwl_trans *trans)
568 {
569 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
570 	int ret;
571 	int txq_id, slots_num;
572 	bool alloc = false;
573 
574 	if (!trans_pcie->txq_memory) {
575 		ret = iwl_pcie_tx_alloc(trans);
576 		if (ret)
577 			goto error;
578 		alloc = true;
579 	}
580 
581 	spin_lock_bh(&trans_pcie->irq_lock);
582 
583 	/* Turn off all Tx DMA fifos */
584 	iwl_scd_deactivate_fifos(trans);
585 
586 	/* Tell NIC where to find the "keep warm" buffer */
587 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
588 			   trans_pcie->kw.dma >> 4);
589 
590 	spin_unlock_bh(&trans_pcie->irq_lock);
591 
592 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
593 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
594 	     txq_id++) {
595 		bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
596 
597 		if (cmd_queue)
598 			slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
599 					  trans->cfg->min_txq_size);
600 		else
601 			slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
602 					  trans->cfg->min_ba_txq_size);
603 		ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
604 				   cmd_queue);
605 		if (ret) {
606 			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
607 			goto error;
608 		}
609 
610 		/*
611 		 * Tell nic where to find circular buffer of TFDs for a
612 		 * given Tx queue, and enable the DMA channel used for that
613 		 * queue.
614 		 * Circular buffer (TFD queue in DRAM) physical base address
615 		 */
616 		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
617 				   trans->txqs.txq[txq_id]->dma_addr >> 8);
618 	}
619 
620 	iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
621 	if (trans->trans_cfg->base_params->num_of_queues > 20)
622 		iwl_set_bits_prph(trans, SCD_GP_CTRL,
623 				  SCD_GP_CTRL_ENABLE_31_QUEUES);
624 
625 	return 0;
626 error:
627 	/*Upon error, free only if we allocated something */
628 	if (alloc)
629 		iwl_pcie_tx_free(trans);
630 	return ret;
631 }
632 
iwl_pcie_set_cmd_in_flight(struct iwl_trans * trans,const struct iwl_host_cmd * cmd)633 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
634 				      const struct iwl_host_cmd *cmd)
635 {
636 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
637 
638 	/* Make sure the NIC is still alive in the bus */
639 	if (test_bit(STATUS_TRANS_DEAD, &trans->status))
640 		return -ENODEV;
641 
642 	if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
643 		return 0;
644 
645 	/*
646 	 * wake up the NIC to make sure that the firmware will see the host
647 	 * command - we will let the NIC sleep once all the host commands
648 	 * returned. This needs to be done only on NICs that have
649 	 * apmg_wake_up_wa set (see above.)
650 	 */
651 	if (!_iwl_trans_pcie_grab_nic_access(trans))
652 		return -EIO;
653 
654 	/*
655 	 * In iwl_trans_grab_nic_access(), we've acquired the reg_lock.
656 	 * There, we also returned immediately if cmd_hold_nic_awake is
657 	 * already true, so it's OK to unconditionally set it to true.
658 	 */
659 	trans_pcie->cmd_hold_nic_awake = true;
660 	spin_unlock(&trans_pcie->reg_lock);
661 
662 	return 0;
663 }
664 
665 /*
666  * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
667  *
668  * When FW advances 'R' index, all entries between old and new 'R' index
669  * need to be reclaimed. As result, some free space forms.  If there is
670  * enough free space (> low mark), wake the stack that feeds us.
671  */
iwl_pcie_cmdq_reclaim(struct iwl_trans * trans,int txq_id,int idx)672 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
673 {
674 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
675 	int nfreed = 0;
676 	u16 r;
677 
678 	lockdep_assert_held(&txq->lock);
679 
680 	idx = iwl_txq_get_cmd_index(txq, idx);
681 	r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
682 
683 	if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
684 	    (!iwl_txq_used(txq, idx))) {
685 		WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
686 			  "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
687 			  __func__, txq_id, idx,
688 			  trans->trans_cfg->base_params->max_tfd_queue_size,
689 			  txq->write_ptr, txq->read_ptr);
690 		return;
691 	}
692 
693 	for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
694 	     r = iwl_txq_inc_wrap(trans, r)) {
695 		txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
696 
697 		if (nfreed++ > 0) {
698 			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
699 				idx, txq->write_ptr, r);
700 			iwl_force_nmi(trans);
701 		}
702 	}
703 
704 	if (txq->read_ptr == txq->write_ptr)
705 		iwl_pcie_clear_cmd_in_flight(trans);
706 
707 	iwl_txq_progress(txq);
708 }
709 
iwl_pcie_txq_set_ratid_map(struct iwl_trans * trans,u16 ra_tid,u16 txq_id)710 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
711 				 u16 txq_id)
712 {
713 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
714 	u32 tbl_dw_addr;
715 	u32 tbl_dw;
716 	u16 scd_q2ratid;
717 
718 	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
719 
720 	tbl_dw_addr = trans_pcie->scd_base_addr +
721 			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
722 
723 	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
724 
725 	if (txq_id & 0x1)
726 		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
727 	else
728 		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
729 
730 	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
731 
732 	return 0;
733 }
734 
735 /* Receiver address (actually, Rx station's index into station table),
736  * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
737 #define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))
738 
iwl_trans_pcie_txq_enable(struct iwl_trans * trans,int txq_id,u16 ssn,const struct iwl_trans_txq_scd_cfg * cfg,unsigned int wdg_timeout)739 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
740 			       const struct iwl_trans_txq_scd_cfg *cfg,
741 			       unsigned int wdg_timeout)
742 {
743 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
744 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
745 	int fifo = -1;
746 	bool scd_bug = false;
747 
748 	if (test_and_set_bit(txq_id, trans->txqs.queue_used))
749 		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
750 
751 	txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
752 
753 	if (cfg) {
754 		fifo = cfg->fifo;
755 
756 		/* Disable the scheduler prior configuring the cmd queue */
757 		if (txq_id == trans->txqs.cmd.q_id &&
758 		    trans_pcie->scd_set_active)
759 			iwl_scd_enable_set_active(trans, 0);
760 
761 		/* Stop this Tx queue before configuring it */
762 		iwl_scd_txq_set_inactive(trans, txq_id);
763 
764 		/* Set this queue as a chain-building queue unless it is CMD */
765 		if (txq_id != trans->txqs.cmd.q_id)
766 			iwl_scd_txq_set_chain(trans, txq_id);
767 
768 		if (cfg->aggregate) {
769 			u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
770 
771 			/* Map receiver-address / traffic-ID to this queue */
772 			iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
773 
774 			/* enable aggregations for the queue */
775 			iwl_scd_txq_enable_agg(trans, txq_id);
776 			txq->ampdu = true;
777 		} else {
778 			/*
779 			 * disable aggregations for the queue, this will also
780 			 * make the ra_tid mapping configuration irrelevant
781 			 * since it is now a non-AGG queue.
782 			 */
783 			iwl_scd_txq_disable_agg(trans, txq_id);
784 
785 			ssn = txq->read_ptr;
786 		}
787 	} else {
788 		/*
789 		 * If we need to move the SCD write pointer by steps of
790 		 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
791 		 * the op_mode know by returning true later.
792 		 * Do this only in case cfg is NULL since this trick can
793 		 * be done only if we have DQA enabled which is true for mvm
794 		 * only. And mvm never sets a cfg pointer.
795 		 * This is really ugly, but this is the easiest way out for
796 		 * this sad hardware issue.
797 		 * This bug has been fixed on devices 9000 and up.
798 		 */
799 		scd_bug = !trans->trans_cfg->mq_rx_supported &&
800 			!((ssn - txq->write_ptr) & 0x3f) &&
801 			(ssn != txq->write_ptr);
802 		if (scd_bug)
803 			ssn++;
804 	}
805 
806 	/* Place first TFD at index corresponding to start sequence number.
807 	 * Assumes that ssn_idx is valid (!= 0xFFF) */
808 	txq->read_ptr = (ssn & 0xff);
809 	txq->write_ptr = (ssn & 0xff);
810 	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
811 			   (ssn & 0xff) | (txq_id << 8));
812 
813 	if (cfg) {
814 		u8 frame_limit = cfg->frame_limit;
815 
816 		iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
817 
818 		/* Set up Tx window size and frame limit for this queue */
819 		iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
820 				SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
821 		iwl_trans_write_mem32(trans,
822 			trans_pcie->scd_base_addr +
823 			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
824 			SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
825 			SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
826 
827 		/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
828 		iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
829 			       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
830 			       (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
831 			       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
832 			       SCD_QUEUE_STTS_REG_MSK);
833 
834 		/* enable the scheduler for this queue (only) */
835 		if (txq_id == trans->txqs.cmd.q_id &&
836 		    trans_pcie->scd_set_active)
837 			iwl_scd_enable_set_active(trans, BIT(txq_id));
838 
839 		IWL_DEBUG_TX_QUEUES(trans,
840 				    "Activate queue %d on FIFO %d WrPtr: %d\n",
841 				    txq_id, fifo, ssn & 0xff);
842 	} else {
843 		IWL_DEBUG_TX_QUEUES(trans,
844 				    "Activate queue %d WrPtr: %d\n",
845 				    txq_id, ssn & 0xff);
846 	}
847 
848 	return scd_bug;
849 }
850 
iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans * trans,u32 txq_id,bool shared_mode)851 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
852 					bool shared_mode)
853 {
854 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
855 
856 	txq->ampdu = !shared_mode;
857 }
858 
iwl_trans_pcie_txq_disable(struct iwl_trans * trans,int txq_id,bool configure_scd)859 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
860 				bool configure_scd)
861 {
862 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
863 	u32 stts_addr = trans_pcie->scd_base_addr +
864 			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
865 	static const u32 zero_val[4] = {};
866 
867 	trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
868 	trans->txqs.txq[txq_id]->frozen = false;
869 
870 	/*
871 	 * Upon HW Rfkill - we stop the device, and then stop the queues
872 	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
873 	 * allow the op_mode to call txq_disable after it already called
874 	 * stop_device.
875 	 */
876 	if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
877 		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
878 			  "queue %d not used", txq_id);
879 		return;
880 	}
881 
882 	if (configure_scd) {
883 		iwl_scd_txq_set_inactive(trans, txq_id);
884 
885 		iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
886 				    ARRAY_SIZE(zero_val));
887 	}
888 
889 	iwl_pcie_txq_unmap(trans, txq_id);
890 	trans->txqs.txq[txq_id]->ampdu = false;
891 
892 	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
893 }
894 
895 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
896 
897 /*
898  * iwl_pcie_enqueue_hcmd - enqueue a uCode command
899  * @priv: device private data point
900  * @cmd: a pointer to the ucode command structure
901  *
902  * The function returns < 0 values to indicate the operation
903  * failed. On success, it returns the index (>= 0) of command in the
904  * command queue.
905  */
iwl_pcie_enqueue_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)906 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
907 			  struct iwl_host_cmd *cmd)
908 {
909 	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
910 	struct iwl_device_cmd *out_cmd;
911 	struct iwl_cmd_meta *out_meta;
912 	void *dup_buf = NULL;
913 	dma_addr_t phys_addr;
914 	int idx;
915 	u16 copy_size, cmd_size, tb0_size;
916 	bool had_nocopy = false;
917 	u8 group_id = iwl_cmd_groupid(cmd->id);
918 	int i, ret;
919 	u32 cmd_pos;
920 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
921 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
922 	unsigned long flags;
923 
924 	if (WARN(!trans->wide_cmd_header &&
925 		 group_id > IWL_ALWAYS_LONG_GROUP,
926 		 "unsupported wide command %#x\n", cmd->id))
927 		return -EINVAL;
928 
929 	if (group_id != 0) {
930 		copy_size = sizeof(struct iwl_cmd_header_wide);
931 		cmd_size = sizeof(struct iwl_cmd_header_wide);
932 	} else {
933 		copy_size = sizeof(struct iwl_cmd_header);
934 		cmd_size = sizeof(struct iwl_cmd_header);
935 	}
936 
937 	/* need one for the header if the first is NOCOPY */
938 	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
939 
940 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
941 		cmddata[i] = cmd->data[i];
942 		cmdlen[i] = cmd->len[i];
943 
944 		if (!cmd->len[i])
945 			continue;
946 
947 		/* need at least IWL_FIRST_TB_SIZE copied */
948 		if (copy_size < IWL_FIRST_TB_SIZE) {
949 			int copy = IWL_FIRST_TB_SIZE - copy_size;
950 
951 			if (copy > cmdlen[i])
952 				copy = cmdlen[i];
953 			cmdlen[i] -= copy;
954 			cmddata[i] += copy;
955 			copy_size += copy;
956 		}
957 
958 		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
959 			had_nocopy = true;
960 			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
961 				idx = -EINVAL;
962 				goto free_dup_buf;
963 			}
964 		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
965 			/*
966 			 * This is also a chunk that isn't copied
967 			 * to the static buffer so set had_nocopy.
968 			 */
969 			had_nocopy = true;
970 
971 			/* only allowed once */
972 			if (WARN_ON(dup_buf)) {
973 				idx = -EINVAL;
974 				goto free_dup_buf;
975 			}
976 
977 			dup_buf = kmemdup(cmddata[i], cmdlen[i],
978 					  GFP_ATOMIC);
979 			if (!dup_buf)
980 				return -ENOMEM;
981 		} else {
982 			/* NOCOPY must not be followed by normal! */
983 			if (WARN_ON(had_nocopy)) {
984 				idx = -EINVAL;
985 				goto free_dup_buf;
986 			}
987 			copy_size += cmdlen[i];
988 		}
989 		cmd_size += cmd->len[i];
990 	}
991 
992 	/*
993 	 * If any of the command structures end up being larger than
994 	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
995 	 * allocated into separate TFDs, then we will need to
996 	 * increase the size of the buffers.
997 	 */
998 	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
999 		 "Command %s (%#x) is too large (%d bytes)\n",
1000 		 iwl_get_cmd_string(trans, cmd->id),
1001 		 cmd->id, copy_size)) {
1002 		idx = -EINVAL;
1003 		goto free_dup_buf;
1004 	}
1005 
1006 	spin_lock_irqsave(&txq->lock, flags);
1007 
1008 	if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1009 		spin_unlock_irqrestore(&txq->lock, flags);
1010 
1011 		IWL_ERR(trans, "No space in command queue\n");
1012 		iwl_op_mode_cmd_queue_full(trans->op_mode);
1013 		idx = -ENOSPC;
1014 		goto free_dup_buf;
1015 	}
1016 
1017 	idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
1018 	out_cmd = txq->entries[idx].cmd;
1019 	out_meta = &txq->entries[idx].meta;
1020 
1021 	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
1022 	if (cmd->flags & CMD_WANT_SKB)
1023 		out_meta->source = cmd;
1024 
1025 	/* set up the header */
1026 	if (group_id != 0) {
1027 		out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1028 		out_cmd->hdr_wide.group_id = group_id;
1029 		out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1030 		out_cmd->hdr_wide.length =
1031 			cpu_to_le16(cmd_size -
1032 				    sizeof(struct iwl_cmd_header_wide));
1033 		out_cmd->hdr_wide.reserved = 0;
1034 		out_cmd->hdr_wide.sequence =
1035 			cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1036 						 INDEX_TO_SEQ(txq->write_ptr));
1037 
1038 		cmd_pos = sizeof(struct iwl_cmd_header_wide);
1039 		copy_size = sizeof(struct iwl_cmd_header_wide);
1040 	} else {
1041 		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1042 		out_cmd->hdr.sequence =
1043 			cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1044 						 INDEX_TO_SEQ(txq->write_ptr));
1045 		out_cmd->hdr.group_id = 0;
1046 
1047 		cmd_pos = sizeof(struct iwl_cmd_header);
1048 		copy_size = sizeof(struct iwl_cmd_header);
1049 	}
1050 
1051 	/* and copy the data that needs to be copied */
1052 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1053 		int copy;
1054 
1055 		if (!cmd->len[i])
1056 			continue;
1057 
1058 		/* copy everything if not nocopy/dup */
1059 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1060 					   IWL_HCMD_DFL_DUP))) {
1061 			copy = cmd->len[i];
1062 
1063 			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1064 			cmd_pos += copy;
1065 			copy_size += copy;
1066 			continue;
1067 		}
1068 
1069 		/*
1070 		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
1071 		 * in total (for bi-directional DMA), but copy up to what
1072 		 * we can fit into the payload for debug dump purposes.
1073 		 */
1074 		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1075 
1076 		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1077 		cmd_pos += copy;
1078 
1079 		/* However, treat copy_size the proper way, we need it below */
1080 		if (copy_size < IWL_FIRST_TB_SIZE) {
1081 			copy = IWL_FIRST_TB_SIZE - copy_size;
1082 
1083 			if (copy > cmd->len[i])
1084 				copy = cmd->len[i];
1085 			copy_size += copy;
1086 		}
1087 	}
1088 
1089 	IWL_DEBUG_HC(trans,
1090 		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1091 		     iwl_get_cmd_string(trans, cmd->id),
1092 		     group_id, out_cmd->hdr.cmd,
1093 		     le16_to_cpu(out_cmd->hdr.sequence),
1094 		     cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
1095 
1096 	/* start the TFD with the minimum copy bytes */
1097 	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
1098 	memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1099 	iwl_pcie_txq_build_tfd(trans, txq,
1100 			       iwl_txq_get_first_tb_dma(txq, idx),
1101 			       tb0_size, true);
1102 
1103 	/* map first command fragment, if any remains */
1104 	if (copy_size > tb0_size) {
1105 		phys_addr = dma_map_single(trans->dev,
1106 					   ((u8 *)&out_cmd->hdr) + tb0_size,
1107 					   copy_size - tb0_size,
1108 					   DMA_TO_DEVICE);
1109 		if (dma_mapping_error(trans->dev, phys_addr)) {
1110 			iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1111 					       txq->write_ptr);
1112 			idx = -ENOMEM;
1113 			goto out;
1114 		}
1115 
1116 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1117 				       copy_size - tb0_size, false);
1118 	}
1119 
1120 	/* map the remaining (adjusted) nocopy/dup fragments */
1121 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1122 		void *data = (void *)(uintptr_t)cmddata[i];
1123 
1124 		if (!cmdlen[i])
1125 			continue;
1126 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1127 					   IWL_HCMD_DFL_DUP)))
1128 			continue;
1129 		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1130 			data = dup_buf;
1131 		phys_addr = dma_map_single(trans->dev, data,
1132 					   cmdlen[i], DMA_TO_DEVICE);
1133 		if (dma_mapping_error(trans->dev, phys_addr)) {
1134 			iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1135 					       txq->write_ptr);
1136 			idx = -ENOMEM;
1137 			goto out;
1138 		}
1139 
1140 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1141 	}
1142 
1143 	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1144 	out_meta->flags = cmd->flags;
1145 	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1146 		kfree_sensitive(txq->entries[idx].free_buf);
1147 	txq->entries[idx].free_buf = dup_buf;
1148 
1149 	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1150 
1151 	/* start timer if queue currently empty */
1152 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1153 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1154 
1155 	ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1156 	if (ret < 0) {
1157 		idx = ret;
1158 		goto out;
1159 	}
1160 
1161 	/* Increment and update queue's write index */
1162 	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1163 	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1164 
1165  out:
1166 	spin_unlock_irqrestore(&txq->lock, flags);
1167  free_dup_buf:
1168 	if (idx < 0)
1169 		kfree(dup_buf);
1170 	return idx;
1171 }
1172 
1173 /*
1174  * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1175  * @rxb: Rx buffer to reclaim
1176  */
iwl_pcie_hcmd_complete(struct iwl_trans * trans,struct iwl_rx_cmd_buffer * rxb)1177 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1178 			    struct iwl_rx_cmd_buffer *rxb)
1179 {
1180 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1181 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1182 	u8 group_id;
1183 	u32 cmd_id;
1184 	int txq_id = SEQ_TO_QUEUE(sequence);
1185 	int index = SEQ_TO_INDEX(sequence);
1186 	int cmd_index;
1187 	struct iwl_device_cmd *cmd;
1188 	struct iwl_cmd_meta *meta;
1189 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1190 	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1191 
1192 	/* If a Tx command is being handled and it isn't in the actual
1193 	 * command queue then there a command routing bug has been introduced
1194 	 * in the queue management code. */
1195 	if (WARN(txq_id != trans->txqs.cmd.q_id,
1196 		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1197 		 txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
1198 		 txq->write_ptr)) {
1199 		iwl_print_hex_error(trans, pkt, 32);
1200 		return;
1201 	}
1202 
1203 	spin_lock_bh(&txq->lock);
1204 
1205 	cmd_index = iwl_txq_get_cmd_index(txq, index);
1206 	cmd = txq->entries[cmd_index].cmd;
1207 	meta = &txq->entries[cmd_index].meta;
1208 	group_id = cmd->hdr.group_id;
1209 	cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
1210 
1211 	iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
1212 
1213 	/* Input error checking is done when commands are added to queue. */
1214 	if (meta->flags & CMD_WANT_SKB) {
1215 		struct page *p = rxb_steal_page(rxb);
1216 
1217 		meta->source->resp_pkt = pkt;
1218 #if defined(__linux__)
1219 		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1220 #elif defined(__FreeBSD__)
1221 		meta->source->_page = p;
1222 #endif
1223 		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1224 	}
1225 
1226 	if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
1227 		iwl_op_mode_async_cb(trans->op_mode, cmd);
1228 
1229 	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1230 
1231 	if (!(meta->flags & CMD_ASYNC)) {
1232 		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1233 			IWL_WARN(trans,
1234 				 "HCMD_ACTIVE already clear for command %s\n",
1235 				 iwl_get_cmd_string(trans, cmd_id));
1236 		}
1237 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1238 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1239 			       iwl_get_cmd_string(trans, cmd_id));
1240 		wake_up(&trans->wait_command_queue);
1241 	}
1242 
1243 	meta->flags = 0;
1244 
1245 	spin_unlock_bh(&txq->lock);
1246 }
1247 
iwl_fill_data_tbs(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta)1248 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
1249 			     struct iwl_txq *txq, u8 hdr_len,
1250 			     struct iwl_cmd_meta *out_meta)
1251 {
1252 	u16 head_tb_len;
1253 	int i;
1254 
1255 	/*
1256 	 * Set up TFD's third entry to point directly to remainder
1257 	 * of skb's head, if any
1258 	 */
1259 	head_tb_len = skb_headlen(skb) - hdr_len;
1260 
1261 	if (head_tb_len > 0) {
1262 		dma_addr_t tb_phys = dma_map_single(trans->dev,
1263 						    skb->data + hdr_len,
1264 						    head_tb_len, DMA_TO_DEVICE);
1265 		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1266 			return -EINVAL;
1267 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
1268 					tb_phys, head_tb_len);
1269 		iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
1270 	}
1271 
1272 	/* set up the remaining entries to point to the data */
1273 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1274 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1275 		dma_addr_t tb_phys;
1276 		int tb_idx;
1277 
1278 		if (!skb_frag_size(frag))
1279 			continue;
1280 
1281 		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
1282 					   skb_frag_size(frag), DMA_TO_DEVICE);
1283 
1284 		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1285 			return -EINVAL;
1286 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
1287 					tb_phys, skb_frag_size(frag));
1288 		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1289 						skb_frag_size(frag), false);
1290 		if (tb_idx < 0)
1291 			return tb_idx;
1292 
1293 		out_meta->tbs |= BIT(tb_idx);
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 #ifdef CONFIG_INET
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)1300 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
1301 				   struct iwl_txq *txq, u8 hdr_len,
1302 				   struct iwl_cmd_meta *out_meta,
1303 				   struct iwl_device_tx_cmd *dev_cmd,
1304 				   u16 tb1_len)
1305 {
1306 	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1307 	struct ieee80211_hdr *hdr = (void *)skb->data;
1308 	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
1309 	unsigned int mss = skb_shinfo(skb)->gso_size;
1310 	u16 length, iv_len, amsdu_pad;
1311 	u8 *start_hdr;
1312 	struct iwl_tso_hdr_page *hdr_page;
1313 	struct tso_t tso;
1314 
1315 	/* if the packet is protected, then it must be CCMP or GCMP */
1316 	BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
1317 	iv_len = ieee80211_has_protected(hdr->frame_control) ?
1318 		IEEE80211_CCMP_HDR_LEN : 0;
1319 
1320 	trace_iwlwifi_dev_tx(trans->dev, skb,
1321 			     iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1322 			     trans->txqs.tfd.size,
1323 			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
1324 
1325 	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
1326 	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
1327 	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
1328 	amsdu_pad = 0;
1329 
1330 	/* total amount of header we may need for this A-MSDU */
1331 	hdr_room = DIV_ROUND_UP(total_len, mss) *
1332 		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
1333 
1334 	/* Our device supports 9 segments at most, it will fit in 1 page */
1335 	hdr_page = get_page_hdr(trans, hdr_room, skb);
1336 	if (!hdr_page)
1337 		return -ENOMEM;
1338 
1339 	start_hdr = hdr_page->pos;
1340 	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
1341 	hdr_page->pos += iv_len;
1342 
1343 	/*
1344 	 * Pull the ieee80211 header + IV to be able to use TSO core,
1345 	 * we will restore it for the tx_status flow.
1346 	 */
1347 	skb_pull(skb, hdr_len + iv_len);
1348 
1349 	/*
1350 	 * Remove the length of all the headers that we don't actually
1351 	 * have in the MPDU by themselves, but that we duplicate into
1352 	 * all the different MSDUs inside the A-MSDU.
1353 	 */
1354 	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
1355 
1356 	tso_start(skb, &tso);
1357 
1358 	while (total_len) {
1359 		/* this is the data left for this subframe */
1360 		unsigned int data_left =
1361 			min_t(unsigned int, mss, total_len);
1362 		unsigned int hdr_tb_len;
1363 		dma_addr_t hdr_tb_phys;
1364 		u8 *subf_hdrs_start = hdr_page->pos;
1365 
1366 		total_len -= data_left;
1367 
1368 		memset(hdr_page->pos, 0, amsdu_pad);
1369 		hdr_page->pos += amsdu_pad;
1370 		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
1371 				  data_left)) & 0x3;
1372 		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
1373 		hdr_page->pos += ETH_ALEN;
1374 		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
1375 		hdr_page->pos += ETH_ALEN;
1376 
1377 		length = snap_ip_tcp_hdrlen + data_left;
1378 		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
1379 		hdr_page->pos += sizeof(length);
1380 
1381 		/*
1382 		 * This will copy the SNAP as well which will be considered
1383 		 * as MAC header.
1384 		 */
1385 		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
1386 
1387 		hdr_page->pos += snap_ip_tcp_hdrlen;
1388 
1389 		hdr_tb_len = hdr_page->pos - start_hdr;
1390 		hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
1391 					     hdr_tb_len, DMA_TO_DEVICE);
1392 		if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys)))
1393 			return -EINVAL;
1394 		iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
1395 				       hdr_tb_len, false);
1396 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
1397 					hdr_tb_phys, hdr_tb_len);
1398 		/* add this subframe's headers' length to the tx_cmd */
1399 		le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
1400 
1401 		/* prepare the start_hdr for the next subframe */
1402 		start_hdr = hdr_page->pos;
1403 
1404 		/* put the payload */
1405 		while (data_left) {
1406 			unsigned int size = min_t(unsigned int, tso.size,
1407 						  data_left);
1408 			dma_addr_t tb_phys;
1409 
1410 			tb_phys = dma_map_single(trans->dev, tso.data,
1411 						 size, DMA_TO_DEVICE);
1412 			if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1413 				return -EINVAL;
1414 
1415 			iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1416 					       size, false);
1417 			trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
1418 						tb_phys, size);
1419 
1420 			data_left -= size;
1421 			tso_build_data(skb, &tso, size);
1422 		}
1423 	}
1424 
1425 	/* re -add the WiFi header and IV */
1426 	skb_push(skb, hdr_len + iv_len);
1427 
1428 	return 0;
1429 }
1430 #else /* CONFIG_INET */
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)1431 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
1432 				   struct iwl_txq *txq, u8 hdr_len,
1433 				   struct iwl_cmd_meta *out_meta,
1434 				   struct iwl_device_tx_cmd *dev_cmd,
1435 				   u16 tb1_len)
1436 {
1437 	/* No A-MSDU without CONFIG_INET */
1438 	WARN_ON(1);
1439 
1440 	return -1;
1441 }
1442 #endif /* CONFIG_INET */
1443 
iwl_trans_pcie_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int txq_id)1444 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1445 		      struct iwl_device_tx_cmd *dev_cmd, int txq_id)
1446 {
1447 	struct ieee80211_hdr *hdr;
1448 	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
1449 	struct iwl_cmd_meta *out_meta;
1450 	struct iwl_txq *txq;
1451 	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1452 	void *tb1_addr;
1453 	void *tfd;
1454 	u16 len, tb1_len;
1455 	bool wait_write_ptr;
1456 	__le16 fc;
1457 	u8 hdr_len;
1458 	u16 wifi_seq;
1459 	bool amsdu;
1460 
1461 	txq = trans->txqs.txq[txq_id];
1462 
1463 	if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
1464 		      "TX on unused queue %d\n", txq_id))
1465 		return -EINVAL;
1466 
1467 	if (skb_is_nonlinear(skb) &&
1468 	    skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
1469 	    __skb_linearize(skb))
1470 		return -ENOMEM;
1471 
1472 	/* mac80211 always puts the full header into the SKB's head,
1473 	 * so there's no need to check if it's readable there
1474 	 */
1475 	hdr = (struct ieee80211_hdr *)skb->data;
1476 	fc = hdr->frame_control;
1477 	hdr_len = ieee80211_hdrlen(fc);
1478 
1479 	spin_lock(&txq->lock);
1480 
1481 	if (iwl_txq_space(trans, txq) < txq->high_mark) {
1482 		iwl_txq_stop(trans, txq);
1483 
1484 		/* don't put the packet on the ring, if there is no room */
1485 		if (unlikely(iwl_txq_space(trans, txq) < 3)) {
1486 			struct iwl_device_tx_cmd **dev_cmd_ptr;
1487 
1488 			dev_cmd_ptr = (void *)((u8 *)skb->cb +
1489 					       trans->txqs.dev_cmd_offs);
1490 
1491 			*dev_cmd_ptr = dev_cmd;
1492 			__skb_queue_tail(&txq->overflow_q, skb);
1493 
1494 			spin_unlock(&txq->lock);
1495 			return 0;
1496 		}
1497 	}
1498 
1499 	/* In AGG mode, the index in the ring must correspond to the WiFi
1500 	 * sequence number. This is a HW requirements to help the SCD to parse
1501 	 * the BA.
1502 	 * Check here that the packets are in the right place on the ring.
1503 	 */
1504 	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1505 	WARN_ONCE(txq->ampdu &&
1506 		  (wifi_seq & 0xff) != txq->write_ptr,
1507 		  "Q: %d WiFi Seq %d tfdNum %d",
1508 		  txq_id, wifi_seq, txq->write_ptr);
1509 
1510 	/* Set up driver data for this TFD */
1511 	txq->entries[txq->write_ptr].skb = skb;
1512 	txq->entries[txq->write_ptr].cmd = dev_cmd;
1513 
1514 	dev_cmd->hdr.sequence =
1515 		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1516 			    INDEX_TO_SEQ(txq->write_ptr)));
1517 
1518 	tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
1519 	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
1520 		       offsetof(struct iwl_tx_cmd, scratch);
1521 
1522 	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1523 	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1524 
1525 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
1526 	out_meta = &txq->entries[txq->write_ptr].meta;
1527 	out_meta->flags = 0;
1528 
1529 	/*
1530 	 * The second TB (tb1) points to the remainder of the TX command
1531 	 * and the 802.11 header - dword aligned size
1532 	 * (This calculation modifies the TX command, so do it before the
1533 	 * setup of the first TB)
1534 	 */
1535 	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
1536 	      hdr_len - IWL_FIRST_TB_SIZE;
1537 	/* do not align A-MSDU to dword as the subframe header aligns it */
1538 	amsdu = ieee80211_is_data_qos(fc) &&
1539 		(*ieee80211_get_qos_ctl(hdr) &
1540 		 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
1541 	if (!amsdu) {
1542 		tb1_len = ALIGN(len, 4);
1543 		/* Tell NIC about any 2-byte padding after MAC header */
1544 		if (tb1_len != len)
1545 			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
1546 	} else {
1547 		tb1_len = len;
1548 	}
1549 
1550 	/*
1551 	 * The first TB points to bi-directional DMA data, we'll
1552 	 * memcpy the data into it later.
1553 	 */
1554 	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1555 			       IWL_FIRST_TB_SIZE, true);
1556 
1557 	/* there must be data left over for TB1 or this code must be changed */
1558 	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
1559 	BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
1560 		     offsetofend(struct iwl_tx_cmd, scratch) >
1561 		     IWL_FIRST_TB_SIZE);
1562 
1563 	/* map the data for TB1 */
1564 	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
1565 	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1566 	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1567 		goto out_err;
1568 	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
1569 
1570 	trace_iwlwifi_dev_tx(trans->dev, skb,
1571 			     iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1572 			     trans->txqs.tfd.size,
1573 			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
1574 			     hdr_len);
1575 
1576 	/*
1577 	 * If gso_size wasn't set, don't give the frame "amsdu treatment"
1578 	 * (adding subframes, etc.).
1579 	 * This can happen in some testing flows when the amsdu was already
1580 	 * pre-built, and we just need to send the resulting skb.
1581 	 */
1582 	if (amsdu && skb_shinfo(skb)->gso_size) {
1583 		if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
1584 						     out_meta, dev_cmd,
1585 						     tb1_len)))
1586 			goto out_err;
1587 	} else {
1588 		struct sk_buff *frag;
1589 
1590 		if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
1591 					       out_meta)))
1592 			goto out_err;
1593 
1594 		skb_walk_frags(skb, frag) {
1595 			if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
1596 						       out_meta)))
1597 				goto out_err;
1598 		}
1599 	}
1600 
1601 	/* building the A-MSDU might have changed this data, so memcpy it now */
1602 	memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
1603 
1604 	tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
1605 	/* Set up entry for this TFD in Tx byte-count array */
1606 	iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
1607 					 iwl_txq_gen1_tfd_get_num_tbs(trans,
1608 								      tfd));
1609 
1610 	wait_write_ptr = ieee80211_has_morefrags(fc);
1611 
1612 	/* start timer if queue currently empty */
1613 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
1614 		/*
1615 		 * If the TXQ is active, then set the timer, if not,
1616 		 * set the timer in remainder so that the timer will
1617 		 * be armed with the right value when the station will
1618 		 * wake up.
1619 		 */
1620 		if (!txq->frozen)
1621 			mod_timer(&txq->stuck_timer,
1622 				  jiffies + txq->wd_timeout);
1623 		else
1624 			txq->frozen_expiry_remainder = txq->wd_timeout;
1625 	}
1626 
1627 	/* Tell device the write index *just past* this latest filled TFD */
1628 	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1629 	if (!wait_write_ptr)
1630 		iwl_pcie_txq_inc_wr_ptr(trans, txq);
1631 
1632 	/*
1633 	 * At this point the frame is "transmitted" successfully
1634 	 * and we will get a TX status notification eventually.
1635 	 */
1636 	spin_unlock(&txq->lock);
1637 	return 0;
1638 out_err:
1639 	iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
1640 	spin_unlock(&txq->lock);
1641 	return -1;
1642 }
1643