1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2020-2021 Intel Corporation
4 */
5 #include <net/tso.h>
6 #include <linux/tcp.h>
7
8 #include "iwl-debug.h"
9 #include "iwl-io.h"
10 #include "fw/api/tx.h"
11 #include "queue/tx.h"
12 #include "iwl-fh.h"
13 #include "iwl-scd.h"
14 #include <linux/dmapool.h>
15
16 /*
17 * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
18 */
iwl_pcie_gen2_update_byte_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)19 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
20 struct iwl_txq *txq, u16 byte_cnt,
21 int num_tbs)
22 {
23 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
24 u8 filled_tfd_size, num_fetch_chunks;
25 u16 len = byte_cnt;
26 __le16 bc_ent;
27
28 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
29 return;
30
31 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
32 num_tbs * sizeof(struct iwl_tfh_tb);
33 /*
34 * filled_tfd_size contains the number of filled bytes in the TFD.
35 * Dividing it by 64 will give the number of chunks to fetch
36 * to SRAM- 0 for one chunk, 1 for 2 and so on.
37 * If, for example, TFD contains only 3 TBs then 32 bytes
38 * of the TFD are used, and only one chunk of 64 bytes should
39 * be fetched
40 */
41 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
42
43 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
44 struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
45
46 /* Starting from AX210, the HW expects bytes */
47 WARN_ON(trans->txqs.bc_table_dword);
48 WARN_ON(len > 0x3FFF);
49 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
50 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
51 } else {
52 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
53
54 /* Before AX210, the HW expects DW */
55 WARN_ON(!trans->txqs.bc_table_dword);
56 len = DIV_ROUND_UP(len, 4);
57 WARN_ON(len > 0xFFF);
58 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
59 scd_bc_tbl->tfd_offset[idx] = bc_ent;
60 }
61 }
62
63 /*
64 * iwl_txq_inc_wr_ptr - Send new write index to hardware
65 */
iwl_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)66 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
67 {
68 lockdep_assert_held(&txq->lock);
69
70 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
71
72 /*
73 * if not in power-save mode, uCode will never sleep when we're
74 * trying to tx (during RFKILL, we're not trying to tx).
75 */
76 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
77 }
78
iwl_txq_gen2_get_num_tbs(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd)79 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
80 struct iwl_tfh_tfd *tfd)
81 {
82 return le16_to_cpu(tfd->num_tbs) & 0x1f;
83 }
84
iwl_txq_gen2_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_tfh_tfd * tfd)85 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
86 struct iwl_tfh_tfd *tfd)
87 {
88 int i, num_tbs;
89
90 /* Sanity check on number of chunks */
91 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
92
93 if (num_tbs > trans->txqs.tfd.max_tbs) {
94 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
95 return;
96 }
97
98 /* first TB is never freed - it's the bidirectional DMA data */
99 for (i = 1; i < num_tbs; i++) {
100 if (meta->tbs & BIT(i))
101 dma_unmap_page(trans->dev,
102 le64_to_cpu(tfd->tbs[i].addr),
103 le16_to_cpu(tfd->tbs[i].tb_len),
104 DMA_TO_DEVICE);
105 else
106 dma_unmap_single(trans->dev,
107 le64_to_cpu(tfd->tbs[i].addr),
108 le16_to_cpu(tfd->tbs[i].tb_len),
109 DMA_TO_DEVICE);
110 }
111
112 tfd->num_tbs = 0;
113 }
114
iwl_txq_gen2_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq)115 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
116 {
117 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
118 * idx is bounded by n_window
119 */
120 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
121 struct sk_buff *skb;
122
123 lockdep_assert_held(&txq->lock);
124
125 if (!txq->entries)
126 return;
127
128 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
129 iwl_txq_get_tfd(trans, txq, idx));
130
131 skb = txq->entries[idx].skb;
132
133 /* Can be called from irqs-disabled context
134 * If skb is not NULL, it means that the whole queue is being
135 * freed and that the queue is not empty - free the skb
136 */
137 if (skb) {
138 iwl_op_mode_free_skb(trans->op_mode, skb);
139 txq->entries[idx].skb = NULL;
140 }
141 }
142
iwl_txq_gen2_set_tb(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd,dma_addr_t addr,u16 len)143 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
144 dma_addr_t addr, u16 len)
145 {
146 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
147 struct iwl_tfh_tb *tb;
148
149 /*
150 * Only WARN here so we know about the issue, but we mess up our
151 * unmap path because not every place currently checks for errors
152 * returned from this function - it can only return an error if
153 * there's no more space, and so when we know there is enough we
154 * don't always check ...
155 */
156 WARN(iwl_txq_crosses_4g_boundary(addr, len),
157 "possible DMA problem with iova:0x%llx, len:%d\n",
158 (unsigned long long)addr, len);
159
160 if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
161 return -EINVAL;
162 tb = &tfd->tbs[idx];
163
164 /* Each TFD can point to a maximum max_tbs Tx buffers */
165 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
166 IWL_ERR(trans, "Error can not send more than %d chunks\n",
167 trans->txqs.tfd.max_tbs);
168 return -EINVAL;
169 }
170
171 put_unaligned_le64(addr, &tb->addr);
172 tb->tb_len = cpu_to_le16(len);
173
174 tfd->num_tbs = cpu_to_le16(idx + 1);
175
176 return idx;
177 }
178
get_workaround_page(struct iwl_trans * trans,struct sk_buff * skb)179 static struct page *get_workaround_page(struct iwl_trans *trans,
180 struct sk_buff *skb)
181 {
182 struct page **page_ptr;
183 struct page *ret;
184
185 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
186
187 ret = alloc_page(GFP_ATOMIC);
188 if (!ret)
189 return NULL;
190
191 /* set the chaining pointer to the previous page if there */
192 *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
193 *page_ptr = ret;
194
195 return ret;
196 }
197
198 /*
199 * Add a TB and if needed apply the FH HW bug workaround;
200 * meta != NULL indicates that it's a page mapping and we
201 * need to dma_unmap_page() and set the meta->tbs bit in
202 * this case.
203 */
iwl_txq_gen2_set_tb_with_wa(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,dma_addr_t phys,void * virt,u16 len,struct iwl_cmd_meta * meta)204 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
205 struct sk_buff *skb,
206 struct iwl_tfh_tfd *tfd,
207 dma_addr_t phys, void *virt,
208 u16 len, struct iwl_cmd_meta *meta)
209 {
210 dma_addr_t oldphys = phys;
211 struct page *page;
212 int ret;
213
214 if (unlikely(dma_mapping_error(trans->dev, phys)))
215 return -ENOMEM;
216
217 if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
218 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
219
220 if (ret < 0)
221 goto unmap;
222
223 if (meta)
224 meta->tbs |= BIT(ret);
225
226 ret = 0;
227 goto trace;
228 }
229
230 /*
231 * Work around a hardware bug. If (as expressed in the
232 * condition above) the TB ends on a 32-bit boundary,
233 * then the next TB may be accessed with the wrong
234 * address.
235 * To work around it, copy the data elsewhere and make
236 * a new mapping for it so the device will not fail.
237 */
238
239 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
240 ret = -ENOBUFS;
241 goto unmap;
242 }
243
244 page = get_workaround_page(trans, skb);
245 if (!page) {
246 ret = -ENOMEM;
247 goto unmap;
248 }
249
250 memcpy(page_address(page), virt, len);
251
252 phys = dma_map_single(trans->dev, page_address(page), len,
253 DMA_TO_DEVICE);
254 if (unlikely(dma_mapping_error(trans->dev, phys)))
255 return -ENOMEM;
256 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
257 if (ret < 0) {
258 /* unmap the new allocation as single */
259 oldphys = phys;
260 meta = NULL;
261 goto unmap;
262 }
263 IWL_WARN(trans,
264 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
265 len, (unsigned long long)oldphys, (unsigned long long)phys);
266
267 ret = 0;
268 unmap:
269 if (meta)
270 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
271 else
272 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
273 trace:
274 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
275
276 return ret;
277 }
278
279 #ifdef CONFIG_INET
get_page_hdr(struct iwl_trans * trans,size_t len,struct sk_buff * skb)280 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
281 struct sk_buff *skb)
282 {
283 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
284 struct page **page_ptr;
285
286 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
287
288 if (WARN_ON(*page_ptr))
289 return NULL;
290
291 if (!p->page)
292 goto alloc;
293
294 /*
295 * Check if there's enough room on this page
296 *
297 * Note that we put a page chaining pointer *last* in the
298 * page - we need it somewhere, and if it's there then we
299 * avoid DMA mapping the last bits of the page which may
300 * trigger the 32-bit boundary hardware bug.
301 *
302 * (see also get_workaround_page() in tx-gen2.c)
303 */
304 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
305 sizeof(void *))
306 goto out;
307
308 /* We don't have enough room on this page, get a new one. */
309 __free_page(p->page);
310
311 alloc:
312 p->page = alloc_page(GFP_ATOMIC);
313 if (!p->page)
314 return NULL;
315 p->pos = page_address(p->page);
316 /* set the chaining pointer to NULL */
317 *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
318 out:
319 *page_ptr = p->page;
320 get_page(p->page);
321 return p;
322 }
323 #endif
324
iwl_txq_gen2_build_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,int start_len,u8 hdr_len,struct iwl_device_tx_cmd * dev_cmd)325 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
326 struct sk_buff *skb,
327 struct iwl_tfh_tfd *tfd, int start_len,
328 u8 hdr_len,
329 struct iwl_device_tx_cmd *dev_cmd)
330 {
331 #ifdef CONFIG_INET
332 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
333 struct ieee80211_hdr *hdr = (void *)skb->data;
334 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
335 unsigned int mss = skb_shinfo(skb)->gso_size;
336 u16 length, amsdu_pad;
337 u8 *start_hdr;
338 struct iwl_tso_hdr_page *hdr_page;
339 struct tso_t tso;
340
341 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
342 &dev_cmd->hdr, start_len, 0);
343
344 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
345 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
346 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
347 amsdu_pad = 0;
348
349 /* total amount of header we may need for this A-MSDU */
350 hdr_room = DIV_ROUND_UP(total_len, mss) *
351 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
352
353 /* Our device supports 9 segments at most, it will fit in 1 page */
354 hdr_page = get_page_hdr(trans, hdr_room, skb);
355 if (!hdr_page)
356 return -ENOMEM;
357
358 start_hdr = hdr_page->pos;
359
360 /*
361 * Pull the ieee80211 header to be able to use TSO core,
362 * we will restore it for the tx_status flow.
363 */
364 skb_pull(skb, hdr_len);
365
366 /*
367 * Remove the length of all the headers that we don't actually
368 * have in the MPDU by themselves, but that we duplicate into
369 * all the different MSDUs inside the A-MSDU.
370 */
371 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
372
373 tso_start(skb, &tso);
374
375 while (total_len) {
376 /* this is the data left for this subframe */
377 unsigned int data_left = min_t(unsigned int, mss, total_len);
378 unsigned int tb_len;
379 dma_addr_t tb_phys;
380 u8 *subf_hdrs_start = hdr_page->pos;
381
382 total_len -= data_left;
383
384 memset(hdr_page->pos, 0, amsdu_pad);
385 hdr_page->pos += amsdu_pad;
386 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
387 data_left)) & 0x3;
388 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
389 hdr_page->pos += ETH_ALEN;
390 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
391 hdr_page->pos += ETH_ALEN;
392
393 length = snap_ip_tcp_hdrlen + data_left;
394 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
395 hdr_page->pos += sizeof(length);
396
397 /*
398 * This will copy the SNAP as well which will be considered
399 * as MAC header.
400 */
401 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
402
403 hdr_page->pos += snap_ip_tcp_hdrlen;
404
405 tb_len = hdr_page->pos - start_hdr;
406 tb_phys = dma_map_single(trans->dev, start_hdr,
407 tb_len, DMA_TO_DEVICE);
408 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
409 goto out_err;
410 /*
411 * No need for _with_wa, this is from the TSO page and
412 * we leave some space at the end of it so can't hit
413 * the buggy scenario.
414 */
415 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
416 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
417 tb_phys, tb_len);
418 /* add this subframe's headers' length to the tx_cmd */
419 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
420
421 /* prepare the start_hdr for the next subframe */
422 start_hdr = hdr_page->pos;
423
424 /* put the payload */
425 while (data_left) {
426 int ret;
427
428 tb_len = min_t(unsigned int, tso.size, data_left);
429 tb_phys = dma_map_single(trans->dev, tso.data,
430 tb_len, DMA_TO_DEVICE);
431 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
432 tb_phys, tso.data,
433 tb_len, NULL);
434 if (ret)
435 goto out_err;
436
437 data_left -= tb_len;
438 tso_build_data(skb, &tso, tb_len);
439 }
440 }
441
442 /* re -add the WiFi header */
443 skb_push(skb, hdr_len);
444
445 return 0;
446
447 out_err:
448 #endif
449 return -EINVAL;
450 }
451
452 static struct
iwl_txq_gen2_build_tx_amsdu(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len)453 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
454 struct iwl_txq *txq,
455 struct iwl_device_tx_cmd *dev_cmd,
456 struct sk_buff *skb,
457 struct iwl_cmd_meta *out_meta,
458 int hdr_len,
459 int tx_cmd_len)
460 {
461 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
462 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
463 dma_addr_t tb_phys;
464 int len;
465 void *tb1_addr;
466
467 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
468
469 /*
470 * No need for _with_wa, the first TB allocation is aligned up
471 * to a 64-byte boundary and thus can't be at the end or cross
472 * a page boundary (much less a 2^32 boundary).
473 */
474 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
475
476 /*
477 * The second TB (tb1) points to the remainder of the TX command
478 * and the 802.11 header - dword aligned size
479 * (This calculation modifies the TX command, so do it before the
480 * setup of the first TB)
481 */
482 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
483 IWL_FIRST_TB_SIZE;
484
485 /* do not align A-MSDU to dword as the subframe header aligns it */
486
487 /* map the data for TB1 */
488 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
489 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
490 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
491 goto out_err;
492 /*
493 * No need for _with_wa(), we ensure (via alignment) that the data
494 * here can never cross or end at a page boundary.
495 */
496 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
497
498 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
499 hdr_len, dev_cmd))
500 goto out_err;
501
502 /* building the A-MSDU might have changed this data, memcpy it now */
503 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
504 return tfd;
505
506 out_err:
507 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
508 return NULL;
509 }
510
iwl_txq_gen2_tx_add_frags(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,struct iwl_cmd_meta * out_meta)511 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
512 struct sk_buff *skb,
513 struct iwl_tfh_tfd *tfd,
514 struct iwl_cmd_meta *out_meta)
515 {
516 int i;
517
518 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
519 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
520 dma_addr_t tb_phys;
521 unsigned int fragsz = skb_frag_size(frag);
522 int ret;
523
524 if (!fragsz)
525 continue;
526
527 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
528 fragsz, DMA_TO_DEVICE);
529 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
530 skb_frag_address(frag),
531 fragsz, out_meta);
532 if (ret)
533 return ret;
534 }
535
536 return 0;
537 }
538
539 static struct
iwl_txq_gen2_build_tx(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len,bool pad)540 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
541 struct iwl_txq *txq,
542 struct iwl_device_tx_cmd *dev_cmd,
543 struct sk_buff *skb,
544 struct iwl_cmd_meta *out_meta,
545 int hdr_len,
546 int tx_cmd_len,
547 bool pad)
548 {
549 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
550 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
551 dma_addr_t tb_phys;
552 int len, tb1_len, tb2_len;
553 void *tb1_addr;
554 struct sk_buff *frag;
555
556 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
557
558 /* The first TB points to bi-directional DMA data */
559 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
560
561 /*
562 * No need for _with_wa, the first TB allocation is aligned up
563 * to a 64-byte boundary and thus can't be at the end or cross
564 * a page boundary (much less a 2^32 boundary).
565 */
566 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
567
568 /*
569 * The second TB (tb1) points to the remainder of the TX command
570 * and the 802.11 header - dword aligned size
571 * (This calculation modifies the TX command, so do it before the
572 * setup of the first TB)
573 */
574 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
575 IWL_FIRST_TB_SIZE;
576
577 if (pad)
578 tb1_len = ALIGN(len, 4);
579 else
580 tb1_len = len;
581
582 /* map the data for TB1 */
583 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
584 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
585 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
586 goto out_err;
587 /*
588 * No need for _with_wa(), we ensure (via alignment) that the data
589 * here can never cross or end at a page boundary.
590 */
591 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
592 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
593 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
594
595 /* set up TFD's third entry to point to remainder of skb's head */
596 tb2_len = skb_headlen(skb) - hdr_len;
597
598 if (tb2_len > 0) {
599 int ret;
600
601 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
602 tb2_len, DMA_TO_DEVICE);
603 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
604 skb->data + hdr_len, tb2_len,
605 NULL);
606 if (ret)
607 goto out_err;
608 }
609
610 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
611 goto out_err;
612
613 skb_walk_frags(skb, frag) {
614 int ret;
615
616 tb_phys = dma_map_single(trans->dev, frag->data,
617 skb_headlen(frag), DMA_TO_DEVICE);
618 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
619 frag->data,
620 skb_headlen(frag), NULL);
621 if (ret)
622 goto out_err;
623 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
624 goto out_err;
625 }
626
627 return tfd;
628
629 out_err:
630 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
631 return NULL;
632 }
633
634 static
iwl_txq_gen2_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta)635 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
636 struct iwl_txq *txq,
637 struct iwl_device_tx_cmd *dev_cmd,
638 struct sk_buff *skb,
639 struct iwl_cmd_meta *out_meta)
640 {
641 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
642 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
643 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
644 int len, hdr_len;
645 bool amsdu;
646
647 /* There must be data left over for TB1 or this code must be changed */
648 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
649
650 memset(tfd, 0, sizeof(*tfd));
651
652 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
653 len = sizeof(struct iwl_tx_cmd_gen2);
654 else
655 len = sizeof(struct iwl_tx_cmd_gen3);
656
657 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
658 (*ieee80211_get_qos_ctl(hdr) &
659 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
660
661 hdr_len = ieee80211_hdrlen(hdr->frame_control);
662
663 /*
664 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
665 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
666 * built in the higher layers already.
667 */
668 if (amsdu && skb_shinfo(skb)->gso_size)
669 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
670 out_meta, hdr_len, len);
671 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
672 hdr_len, len, !amsdu);
673 }
674
iwl_txq_space(struct iwl_trans * trans,const struct iwl_txq * q)675 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
676 {
677 unsigned int max;
678 unsigned int used;
679
680 /*
681 * To avoid ambiguity between empty and completely full queues, there
682 * should always be less than max_tfd_queue_size elements in the queue.
683 * If q->n_window is smaller than max_tfd_queue_size, there is no need
684 * to reserve any queue entries for this purpose.
685 */
686 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
687 max = q->n_window;
688 else
689 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
690
691 /*
692 * max_tfd_queue_size is a power of 2, so the following is equivalent to
693 * modulo by max_tfd_queue_size and is well defined.
694 */
695 used = (q->write_ptr - q->read_ptr) &
696 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
697
698 if (WARN_ON(used > max))
699 return 0;
700
701 return max - used;
702 }
703
iwl_txq_gen2_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int txq_id)704 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
705 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
706 {
707 struct iwl_cmd_meta *out_meta;
708 struct iwl_txq *txq = trans->txqs.txq[txq_id];
709 u16 cmd_len;
710 int idx;
711 void *tfd;
712
713 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
714 "queue %d out of range", txq_id))
715 return -EINVAL;
716
717 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
718 "TX on unused queue %d\n", txq_id))
719 return -EINVAL;
720
721 if (skb_is_nonlinear(skb) &&
722 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
723 __skb_linearize(skb))
724 return -ENOMEM;
725
726 spin_lock(&txq->lock);
727
728 if (iwl_txq_space(trans, txq) < txq->high_mark) {
729 iwl_txq_stop(trans, txq);
730
731 /* don't put the packet on the ring, if there is no room */
732 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
733 struct iwl_device_tx_cmd **dev_cmd_ptr;
734
735 dev_cmd_ptr = (void *)((u8 *)skb->cb +
736 trans->txqs.dev_cmd_offs);
737
738 *dev_cmd_ptr = dev_cmd;
739 __skb_queue_tail(&txq->overflow_q, skb);
740 spin_unlock(&txq->lock);
741 return 0;
742 }
743 }
744
745 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
746
747 /* Set up driver data for this TFD */
748 txq->entries[idx].skb = skb;
749 txq->entries[idx].cmd = dev_cmd;
750
751 dev_cmd->hdr.sequence =
752 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
753 INDEX_TO_SEQ(idx)));
754
755 /* Set up first empty entry in queue's array of Tx/cmd buffers */
756 out_meta = &txq->entries[idx].meta;
757 out_meta->flags = 0;
758
759 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
760 if (!tfd) {
761 spin_unlock(&txq->lock);
762 return -1;
763 }
764
765 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
766 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
767 (void *)dev_cmd->payload;
768
769 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
770 } else {
771 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
772 (void *)dev_cmd->payload;
773
774 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
775 }
776
777 /* Set up entry for this TFD in Tx byte-count array */
778 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
779 iwl_txq_gen2_get_num_tbs(trans, tfd));
780
781 /* start timer if queue currently empty */
782 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
783 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
784
785 /* Tell device the write index *just past* this latest filled TFD */
786 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
787 iwl_txq_inc_wr_ptr(trans, txq);
788 /*
789 * At this point the frame is "transmitted" successfully
790 * and we will get a TX status notification eventually.
791 */
792 spin_unlock(&txq->lock);
793 return 0;
794 }
795
796 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
797
798 /*
799 * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
800 */
iwl_txq_gen2_unmap(struct iwl_trans * trans,int txq_id)801 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
802 {
803 struct iwl_txq *txq = trans->txqs.txq[txq_id];
804
805 spin_lock_bh(&txq->lock);
806 while (txq->write_ptr != txq->read_ptr) {
807 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
808 txq_id, txq->read_ptr);
809
810 if (txq_id != trans->txqs.cmd.q_id) {
811 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
812 struct sk_buff *skb = txq->entries[idx].skb;
813
814 if (!WARN_ON_ONCE(!skb))
815 iwl_txq_free_tso_page(trans, skb);
816 }
817 iwl_txq_gen2_free_tfd(trans, txq);
818 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
819 }
820
821 while (!skb_queue_empty(&txq->overflow_q)) {
822 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
823
824 iwl_op_mode_free_skb(trans->op_mode, skb);
825 }
826
827 spin_unlock_bh(&txq->lock);
828
829 /* just in case - this queue may have been stopped */
830 iwl_wake_queue(trans, txq);
831 }
832
iwl_txq_gen2_free_memory(struct iwl_trans * trans,struct iwl_txq * txq)833 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
834 struct iwl_txq *txq)
835 {
836 struct device *dev = trans->dev;
837
838 /* De-alloc circular buffer of TFDs */
839 if (txq->tfds) {
840 dma_free_coherent(dev,
841 trans->txqs.tfd.size * txq->n_window,
842 txq->tfds, txq->dma_addr);
843 dma_free_coherent(dev,
844 sizeof(*txq->first_tb_bufs) * txq->n_window,
845 txq->first_tb_bufs, txq->first_tb_dma);
846 }
847
848 kfree(txq->entries);
849 if (txq->bc_tbl.addr)
850 dma_pool_free(trans->txqs.bc_pool,
851 txq->bc_tbl.addr, txq->bc_tbl.dma);
852 kfree(txq);
853 }
854
855 /*
856 * iwl_pcie_txq_free - Deallocate DMA queue.
857 * @txq: Transmit queue to deallocate.
858 *
859 * Empty queue by removing and destroying all BD's.
860 * Free all buffers.
861 * 0-fill, but do not free "txq" descriptor structure.
862 */
iwl_txq_gen2_free(struct iwl_trans * trans,int txq_id)863 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
864 {
865 struct iwl_txq *txq;
866 int i;
867
868 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
869 "queue %d out of range", txq_id))
870 return;
871
872 txq = trans->txqs.txq[txq_id];
873
874 if (WARN_ON(!txq))
875 return;
876
877 iwl_txq_gen2_unmap(trans, txq_id);
878
879 /* De-alloc array of command/tx buffers */
880 if (txq_id == trans->txqs.cmd.q_id)
881 for (i = 0; i < txq->n_window; i++) {
882 kfree_sensitive(txq->entries[i].cmd);
883 kfree_sensitive(txq->entries[i].free_buf);
884 }
885 del_timer_sync(&txq->stuck_timer);
886
887 iwl_txq_gen2_free_memory(trans, txq);
888
889 trans->txqs.txq[txq_id] = NULL;
890
891 clear_bit(txq_id, trans->txqs.queue_used);
892 }
893
894 /*
895 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
896 */
iwl_queue_init(struct iwl_txq * q,int slots_num)897 static int iwl_queue_init(struct iwl_txq *q, int slots_num)
898 {
899 q->n_window = slots_num;
900
901 /* slots_num must be power-of-two size, otherwise
902 * iwl_txq_get_cmd_index is broken. */
903 if (WARN_ON(!is_power_of_2(slots_num)))
904 return -EINVAL;
905
906 q->low_mark = q->n_window / 4;
907 if (q->low_mark < 4)
908 q->low_mark = 4;
909
910 q->high_mark = q->n_window / 8;
911 if (q->high_mark < 2)
912 q->high_mark = 2;
913
914 q->write_ptr = 0;
915 q->read_ptr = 0;
916
917 return 0;
918 }
919
iwl_txq_init(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)920 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
921 bool cmd_queue)
922 {
923 int ret;
924 u32 tfd_queue_max_size =
925 trans->trans_cfg->base_params->max_tfd_queue_size;
926
927 txq->need_update = false;
928
929 /* max_tfd_queue_size must be power-of-two size, otherwise
930 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
931 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
932 "Max tfd queue size must be a power of two, but is %d",
933 tfd_queue_max_size))
934 return -EINVAL;
935
936 /* Initialize queue's high/low-water marks, and head/tail indexes */
937 ret = iwl_queue_init(txq, slots_num);
938 if (ret)
939 return ret;
940
941 spin_lock_init(&txq->lock);
942
943 if (cmd_queue) {
944 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
945
946 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
947 }
948
949 __skb_queue_head_init(&txq->overflow_q);
950
951 return 0;
952 }
953
iwl_txq_free_tso_page(struct iwl_trans * trans,struct sk_buff * skb)954 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
955 {
956 struct page **page_ptr;
957 struct page *next;
958
959 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
960 next = *page_ptr;
961 *page_ptr = NULL;
962
963 while (next) {
964 struct page *tmp = next;
965
966 next = *(void **)(page_address(next) + PAGE_SIZE -
967 sizeof(void *));
968 __free_page(tmp);
969 }
970 }
971
iwl_txq_log_scd_error(struct iwl_trans * trans,struct iwl_txq * txq)972 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
973 {
974 u32 txq_id = txq->id;
975 u32 status;
976 bool active;
977 u8 fifo;
978
979 if (trans->trans_cfg->use_tfh) {
980 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
981 txq->read_ptr, txq->write_ptr);
982 /* TODO: access new SCD registers and dump them */
983 return;
984 }
985
986 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
987 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
988 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
989
990 IWL_ERR(trans,
991 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
992 txq_id, active ? "" : "in", fifo,
993 jiffies_to_msecs(txq->wd_timeout),
994 txq->read_ptr, txq->write_ptr,
995 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
996 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
997 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
998 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
999 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1000 }
1001
iwl_txq_stuck_timer(struct timer_list * t)1002 static void iwl_txq_stuck_timer(struct timer_list *t)
1003 {
1004 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1005 struct iwl_trans *trans = txq->trans;
1006
1007 spin_lock(&txq->lock);
1008 /* check if triggered erroneously */
1009 if (txq->read_ptr == txq->write_ptr) {
1010 spin_unlock(&txq->lock);
1011 return;
1012 }
1013 spin_unlock(&txq->lock);
1014
1015 iwl_txq_log_scd_error(trans, txq);
1016
1017 iwl_force_nmi(trans);
1018 }
1019
iwl_txq_alloc(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)1020 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1021 bool cmd_queue)
1022 {
1023 size_t tfd_sz = trans->txqs.tfd.size *
1024 trans->trans_cfg->base_params->max_tfd_queue_size;
1025 size_t tb0_buf_sz;
1026 int i;
1027
1028 if (WARN_ON(txq->entries || txq->tfds))
1029 return -EINVAL;
1030
1031 if (trans->trans_cfg->use_tfh)
1032 tfd_sz = trans->txqs.tfd.size * slots_num;
1033
1034 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1035 txq->trans = trans;
1036
1037 txq->n_window = slots_num;
1038
1039 txq->entries = kcalloc(slots_num,
1040 sizeof(struct iwl_pcie_txq_entry),
1041 GFP_KERNEL);
1042
1043 if (!txq->entries)
1044 goto error;
1045
1046 if (cmd_queue)
1047 for (i = 0; i < slots_num; i++) {
1048 txq->entries[i].cmd =
1049 kmalloc(sizeof(struct iwl_device_cmd),
1050 GFP_KERNEL);
1051 if (!txq->entries[i].cmd)
1052 goto error;
1053 }
1054
1055 /* Circular buffer of transmit frame descriptors (TFDs),
1056 * shared with device */
1057 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1058 &txq->dma_addr, GFP_KERNEL);
1059 if (!txq->tfds)
1060 goto error;
1061
1062 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1063
1064 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1065
1066 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1067 &txq->first_tb_dma,
1068 GFP_KERNEL);
1069 if (!txq->first_tb_bufs)
1070 goto err_free_tfds;
1071
1072 return 0;
1073 err_free_tfds:
1074 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
1075 error:
1076 if (txq->entries && cmd_queue)
1077 for (i = 0; i < slots_num; i++)
1078 kfree(txq->entries[i].cmd);
1079 kfree(txq->entries);
1080 txq->entries = NULL;
1081
1082 return -ENOMEM;
1083 }
1084
iwl_txq_dyn_alloc_dma(struct iwl_trans * trans,struct iwl_txq ** intxq,int size,unsigned int timeout)1085 static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
1086 struct iwl_txq **intxq, int size,
1087 unsigned int timeout)
1088 {
1089 size_t bc_tbl_size, bc_tbl_entries;
1090 struct iwl_txq *txq;
1091 int ret;
1092
1093 WARN_ON(!trans->txqs.bc_tbl_size);
1094
1095 bc_tbl_size = trans->txqs.bc_tbl_size;
1096 bc_tbl_entries = bc_tbl_size / sizeof(u16);
1097
1098 if (WARN_ON(size > bc_tbl_entries))
1099 return -EINVAL;
1100
1101 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1102 if (!txq)
1103 return -ENOMEM;
1104
1105 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1106 &txq->bc_tbl.dma);
1107 if (!txq->bc_tbl.addr) {
1108 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1109 kfree(txq);
1110 return -ENOMEM;
1111 }
1112
1113 ret = iwl_txq_alloc(trans, txq, size, false);
1114 if (ret) {
1115 IWL_ERR(trans, "Tx queue alloc failed\n");
1116 goto error;
1117 }
1118 ret = iwl_txq_init(trans, txq, size, false);
1119 if (ret) {
1120 IWL_ERR(trans, "Tx queue init failed\n");
1121 goto error;
1122 }
1123
1124 txq->wd_timeout = msecs_to_jiffies(timeout);
1125
1126 *intxq = txq;
1127 return 0;
1128
1129 error:
1130 iwl_txq_gen2_free_memory(trans, txq);
1131 return ret;
1132 }
1133
iwl_txq_alloc_response(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_host_cmd * hcmd)1134 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1135 struct iwl_host_cmd *hcmd)
1136 {
1137 struct iwl_tx_queue_cfg_rsp *rsp;
1138 int ret, qid;
1139 u32 wr_ptr;
1140
1141 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1142 sizeof(*rsp))) {
1143 ret = -EINVAL;
1144 goto error_free_resp;
1145 }
1146
1147 rsp = (void *)hcmd->resp_pkt->data;
1148 qid = le16_to_cpu(rsp->queue_number);
1149 wr_ptr = le16_to_cpu(rsp->write_pointer);
1150
1151 if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1152 WARN_ONCE(1, "queue index %d unsupported", qid);
1153 ret = -EIO;
1154 goto error_free_resp;
1155 }
1156
1157 if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1158 WARN_ONCE(1, "queue %d already used", qid);
1159 ret = -EIO;
1160 goto error_free_resp;
1161 }
1162
1163 if (WARN_ONCE(trans->txqs.txq[qid],
1164 "queue %d already allocated\n", qid)) {
1165 ret = -EIO;
1166 goto error_free_resp;
1167 }
1168
1169 txq->id = qid;
1170 trans->txqs.txq[qid] = txq;
1171 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1172
1173 /* Place first TFD at index corresponding to start sequence number */
1174 txq->read_ptr = wr_ptr;
1175 txq->write_ptr = wr_ptr;
1176
1177 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1178
1179 iwl_free_resp(hcmd);
1180 return qid;
1181
1182 error_free_resp:
1183 iwl_free_resp(hcmd);
1184 iwl_txq_gen2_free_memory(trans, txq);
1185 return ret;
1186 }
1187
iwl_txq_dyn_alloc(struct iwl_trans * trans,__le16 flags,u8 sta_id,u8 tid,int cmd_id,int size,unsigned int timeout)1188 int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
1189 int cmd_id, int size, unsigned int timeout)
1190 {
1191 struct iwl_txq *txq = NULL;
1192 struct iwl_tx_queue_cfg_cmd cmd = {
1193 .flags = flags,
1194 .sta_id = sta_id,
1195 .tid = tid,
1196 };
1197 struct iwl_host_cmd hcmd = {
1198 .id = cmd_id,
1199 .len = { sizeof(cmd) },
1200 .data = { &cmd, },
1201 .flags = CMD_WANT_SKB,
1202 };
1203 int ret;
1204
1205 ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
1206 if (ret)
1207 return ret;
1208
1209 cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
1210 cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1211 cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1212
1213 ret = iwl_trans_send_cmd(trans, &hcmd);
1214 if (ret)
1215 goto error;
1216
1217 return iwl_txq_alloc_response(trans, txq, &hcmd);
1218
1219 error:
1220 iwl_txq_gen2_free_memory(trans, txq);
1221 return ret;
1222 }
1223
iwl_txq_dyn_free(struct iwl_trans * trans,int queue)1224 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1225 {
1226 if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1227 "queue %d out of range", queue))
1228 return;
1229
1230 /*
1231 * Upon HW Rfkill - we stop the device, and then stop the queues
1232 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1233 * allow the op_mode to call txq_disable after it already called
1234 * stop_device.
1235 */
1236 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1237 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1238 "queue %d not used", queue);
1239 return;
1240 }
1241
1242 iwl_txq_gen2_free(trans, queue);
1243
1244 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1245 }
1246
iwl_txq_gen2_tx_free(struct iwl_trans * trans)1247 void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1248 {
1249 int i;
1250
1251 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1252
1253 /* Free all TX queues */
1254 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1255 if (!trans->txqs.txq[i])
1256 continue;
1257
1258 iwl_txq_gen2_free(trans, i);
1259 }
1260 }
1261
iwl_txq_gen2_init(struct iwl_trans * trans,int txq_id,int queue_size)1262 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1263 {
1264 struct iwl_txq *queue;
1265 int ret;
1266
1267 /* alloc and init the tx queue */
1268 if (!trans->txqs.txq[txq_id]) {
1269 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1270 if (!queue) {
1271 IWL_ERR(trans, "Not enough memory for tx queue\n");
1272 return -ENOMEM;
1273 }
1274 trans->txqs.txq[txq_id] = queue;
1275 ret = iwl_txq_alloc(trans, queue, queue_size, true);
1276 if (ret) {
1277 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1278 goto error;
1279 }
1280 } else {
1281 queue = trans->txqs.txq[txq_id];
1282 }
1283
1284 ret = iwl_txq_init(trans, queue, queue_size,
1285 (txq_id == trans->txqs.cmd.q_id));
1286 if (ret) {
1287 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1288 goto error;
1289 }
1290 trans->txqs.txq[txq_id]->id = txq_id;
1291 set_bit(txq_id, trans->txqs.queue_used);
1292
1293 return 0;
1294
1295 error:
1296 iwl_txq_gen2_tx_free(trans);
1297 return ret;
1298 }
1299
iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans * trans,void * _tfd,u8 idx)1300 static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
1301 void *_tfd, u8 idx)
1302 {
1303 struct iwl_tfd *tfd;
1304 struct iwl_tfd_tb *tb;
1305 dma_addr_t addr;
1306 dma_addr_t hi_len;
1307
1308 if (trans->trans_cfg->use_tfh) {
1309 struct iwl_tfh_tfd *tfd = _tfd;
1310 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
1311
1312 return (dma_addr_t)(le64_to_cpu(tb->addr));
1313 }
1314
1315 tfd = _tfd;
1316 tb = &tfd->tbs[idx];
1317 addr = get_unaligned_le32(&tb->lo);
1318
1319 if (sizeof(dma_addr_t) <= sizeof(u32))
1320 return addr;
1321
1322 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1323
1324 /*
1325 * shift by 16 twice to avoid warnings on 32-bit
1326 * (where this code never runs anyway due to the
1327 * if statement above)
1328 */
1329 return addr | ((hi_len << 16) << 16);
1330 }
1331
iwl_txq_gen1_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_txq * txq,int index)1332 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1333 struct iwl_cmd_meta *meta,
1334 struct iwl_txq *txq, int index)
1335 {
1336 int i, num_tbs;
1337 void *tfd = iwl_txq_get_tfd(trans, txq, index);
1338
1339 /* Sanity check on number of chunks */
1340 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1341
1342 if (num_tbs > trans->txqs.tfd.max_tbs) {
1343 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1344 /* @todo issue fatal error, it is quite serious situation */
1345 return;
1346 }
1347
1348 /* first TB is never freed - it's the bidirectional DMA data */
1349
1350 for (i = 1; i < num_tbs; i++) {
1351 if (meta->tbs & BIT(i))
1352 dma_unmap_page(trans->dev,
1353 iwl_txq_gen1_tfd_tb_get_addr(trans,
1354 tfd, i),
1355 iwl_txq_gen1_tfd_tb_get_len(trans,
1356 tfd, i),
1357 DMA_TO_DEVICE);
1358 else
1359 dma_unmap_single(trans->dev,
1360 iwl_txq_gen1_tfd_tb_get_addr(trans,
1361 tfd, i),
1362 iwl_txq_gen1_tfd_tb_get_len(trans,
1363 tfd, i),
1364 DMA_TO_DEVICE);
1365 }
1366
1367 meta->tbs = 0;
1368
1369 if (trans->trans_cfg->use_tfh) {
1370 struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
1371
1372 tfd_fh->num_tbs = 0;
1373 } else {
1374 struct iwl_tfd *tfd_fh = (void *)tfd;
1375
1376 tfd_fh->num_tbs = 0;
1377 }
1378 }
1379
1380 #define IWL_TX_CRC_SIZE 4
1381 #define IWL_TX_DELIMITER_SIZE 4
1382
1383 /*
1384 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1385 */
iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)1386 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1387 struct iwl_txq *txq, u16 byte_cnt,
1388 int num_tbs)
1389 {
1390 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1391 int write_ptr = txq->write_ptr;
1392 int txq_id = txq->id;
1393 u8 sec_ctl = 0;
1394 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1395 __le16 bc_ent;
1396 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1397 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1398 u8 sta_id = tx_cmd->sta_id;
1399
1400 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1401
1402 sec_ctl = tx_cmd->sec_ctl;
1403
1404 switch (sec_ctl & TX_CMD_SEC_MSK) {
1405 case TX_CMD_SEC_CCM:
1406 len += IEEE80211_CCMP_MIC_LEN;
1407 break;
1408 case TX_CMD_SEC_TKIP:
1409 len += IEEE80211_TKIP_ICV_LEN;
1410 break;
1411 case TX_CMD_SEC_WEP:
1412 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1413 break;
1414 }
1415 if (trans->txqs.bc_table_dword)
1416 len = DIV_ROUND_UP(len, 4);
1417
1418 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1419 return;
1420
1421 bc_ent = cpu_to_le16(len | (sta_id << 12));
1422
1423 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1424
1425 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1426 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1427 bc_ent;
1428 }
1429
iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq)1430 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1431 struct iwl_txq *txq)
1432 {
1433 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1434 int txq_id = txq->id;
1435 int read_ptr = txq->read_ptr;
1436 u8 sta_id = 0;
1437 __le16 bc_ent;
1438 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1439 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1440
1441 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1442
1443 if (txq_id != trans->txqs.cmd.q_id)
1444 sta_id = tx_cmd->sta_id;
1445
1446 bc_ent = cpu_to_le16(1 | (sta_id << 12));
1447
1448 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1449
1450 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1451 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1452 bc_ent;
1453 }
1454
1455 /*
1456 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1457 * @trans - transport private data
1458 * @txq - tx queue
1459 * @dma_dir - the direction of the DMA mapping
1460 *
1461 * Does NOT advance any TFD circular buffer read/write indexes
1462 * Does NOT free the TFD itself (which is within circular buffer)
1463 */
iwl_txq_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq)1464 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
1465 {
1466 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
1467 * idx is bounded by n_window
1468 */
1469 int rd_ptr = txq->read_ptr;
1470 int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
1471 struct sk_buff *skb;
1472
1473 lockdep_assert_held(&txq->lock);
1474
1475 if (!txq->entries)
1476 return;
1477
1478 /* We have only q->n_window txq->entries, but we use
1479 * TFD_QUEUE_SIZE_MAX tfds
1480 */
1481 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
1482
1483 /* free SKB */
1484 skb = txq->entries[idx].skb;
1485
1486 /* Can be called from irqs-disabled context
1487 * If skb is not NULL, it means that the whole queue is being
1488 * freed and that the queue is not empty - free the skb
1489 */
1490 if (skb) {
1491 iwl_op_mode_free_skb(trans->op_mode, skb);
1492 txq->entries[idx].skb = NULL;
1493 }
1494 }
1495
iwl_txq_progress(struct iwl_txq * txq)1496 void iwl_txq_progress(struct iwl_txq *txq)
1497 {
1498 lockdep_assert_held(&txq->lock);
1499
1500 if (!txq->wd_timeout)
1501 return;
1502
1503 /*
1504 * station is asleep and we send data - that must
1505 * be uAPSD or PS-Poll. Don't rearm the timer.
1506 */
1507 if (txq->frozen)
1508 return;
1509
1510 /*
1511 * if empty delete timer, otherwise move timer forward
1512 * since we're making progress on this queue
1513 */
1514 if (txq->read_ptr == txq->write_ptr)
1515 del_timer(&txq->stuck_timer);
1516 else
1517 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1518 }
1519
1520 /* Frees buffers until index _not_ inclusive */
iwl_txq_reclaim(struct iwl_trans * trans,int txq_id,int ssn,struct sk_buff_head * skbs)1521 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1522 struct sk_buff_head *skbs)
1523 {
1524 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1525 int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
1526 int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1527 int last_to_free;
1528
1529 /* This function is not meant to release cmd queue*/
1530 if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1531 return;
1532
1533 spin_lock_bh(&txq->lock);
1534
1535 if (!test_bit(txq_id, trans->txqs.queue_used)) {
1536 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1537 txq_id, ssn);
1538 goto out;
1539 }
1540
1541 if (read_ptr == tfd_num)
1542 goto out;
1543
1544 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1545 txq_id, txq->read_ptr, tfd_num, ssn);
1546
1547 /*Since we free until index _not_ inclusive, the one before index is
1548 * the last we will free. This one must be used */
1549 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1550
1551 if (!iwl_txq_used(txq, last_to_free)) {
1552 IWL_ERR(trans,
1553 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1554 __func__, txq_id, last_to_free,
1555 trans->trans_cfg->base_params->max_tfd_queue_size,
1556 txq->write_ptr, txq->read_ptr);
1557
1558 iwl_op_mode_time_point(trans->op_mode,
1559 IWL_FW_INI_TIME_POINT_FAKE_TX,
1560 NULL);
1561 goto out;
1562 }
1563
1564 if (WARN_ON(!skb_queue_empty(skbs)))
1565 goto out;
1566
1567 for (;
1568 read_ptr != tfd_num;
1569 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
1570 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1571 struct sk_buff *skb = txq->entries[read_ptr].skb;
1572
1573 if (WARN_ON_ONCE(!skb))
1574 continue;
1575
1576 iwl_txq_free_tso_page(trans, skb);
1577
1578 __skb_queue_tail(skbs, skb);
1579
1580 txq->entries[read_ptr].skb = NULL;
1581
1582 if (!trans->trans_cfg->use_tfh)
1583 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1584
1585 iwl_txq_free_tfd(trans, txq);
1586 }
1587
1588 iwl_txq_progress(txq);
1589
1590 if (iwl_txq_space(trans, txq) > txq->low_mark &&
1591 test_bit(txq_id, trans->txqs.queue_stopped)) {
1592 struct sk_buff_head overflow_skbs;
1593
1594 __skb_queue_head_init(&overflow_skbs);
1595 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1596
1597 /*
1598 * We are going to transmit from the overflow queue.
1599 * Remember this state so that wait_for_txq_empty will know we
1600 * are adding more packets to the TFD queue. It cannot rely on
1601 * the state of &txq->overflow_q, as we just emptied it, but
1602 * haven't TXed the content yet.
1603 */
1604 txq->overflow_tx = true;
1605
1606 /*
1607 * This is tricky: we are in reclaim path which is non
1608 * re-entrant, so noone will try to take the access the
1609 * txq data from that path. We stopped tx, so we can't
1610 * have tx as well. Bottom line, we can unlock and re-lock
1611 * later.
1612 */
1613 spin_unlock_bh(&txq->lock);
1614
1615 while (!skb_queue_empty(&overflow_skbs)) {
1616 struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1617 struct iwl_device_tx_cmd *dev_cmd_ptr;
1618
1619 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1620 trans->txqs.dev_cmd_offs);
1621
1622 /*
1623 * Note that we can very well be overflowing again.
1624 * In that case, iwl_txq_space will be small again
1625 * and we won't wake mac80211's queue.
1626 */
1627 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1628 }
1629
1630 if (iwl_txq_space(trans, txq) > txq->low_mark)
1631 iwl_wake_queue(trans, txq);
1632
1633 spin_lock_bh(&txq->lock);
1634 txq->overflow_tx = false;
1635 }
1636
1637 out:
1638 spin_unlock_bh(&txq->lock);
1639 }
1640
1641 /* Set wr_ptr of specific device and txq */
iwl_txq_set_q_ptrs(struct iwl_trans * trans,int txq_id,int ptr)1642 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1643 {
1644 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1645
1646 spin_lock_bh(&txq->lock);
1647
1648 txq->write_ptr = ptr;
1649 txq->read_ptr = txq->write_ptr;
1650
1651 spin_unlock_bh(&txq->lock);
1652 }
1653
iwl_trans_txq_freeze_timer(struct iwl_trans * trans,unsigned long txqs,bool freeze)1654 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
1655 bool freeze)
1656 {
1657 int queue;
1658
1659 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1660 struct iwl_txq *txq = trans->txqs.txq[queue];
1661 unsigned long now;
1662
1663 spin_lock_bh(&txq->lock);
1664
1665 now = jiffies;
1666
1667 if (txq->frozen == freeze)
1668 goto next_queue;
1669
1670 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1671 freeze ? "Freezing" : "Waking", queue);
1672
1673 txq->frozen = freeze;
1674
1675 if (txq->read_ptr == txq->write_ptr)
1676 goto next_queue;
1677
1678 if (freeze) {
1679 if (unlikely(time_after(now,
1680 txq->stuck_timer.expires))) {
1681 /*
1682 * The timer should have fired, maybe it is
1683 * spinning right now on the lock.
1684 */
1685 goto next_queue;
1686 }
1687 /* remember how long until the timer fires */
1688 txq->frozen_expiry_remainder =
1689 txq->stuck_timer.expires - now;
1690 del_timer(&txq->stuck_timer);
1691 goto next_queue;
1692 }
1693
1694 /*
1695 * Wake a non-empty queue -> arm timer with the
1696 * remainder before it froze
1697 */
1698 mod_timer(&txq->stuck_timer,
1699 now + txq->frozen_expiry_remainder);
1700
1701 next_queue:
1702 spin_unlock_bh(&txq->lock);
1703 }
1704 }
1705
1706 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
1707
iwl_trans_txq_send_hcmd_sync(struct iwl_trans * trans,struct iwl_host_cmd * cmd)1708 static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
1709 struct iwl_host_cmd *cmd)
1710 {
1711 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
1712 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1713 int cmd_idx;
1714 int ret;
1715
1716 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
1717
1718 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1719 &trans->status),
1720 "Command %s: a command is already active!\n", cmd_str))
1721 return -EIO;
1722
1723 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
1724
1725 cmd_idx = trans->ops->send_cmd(trans, cmd);
1726 if (cmd_idx < 0) {
1727 ret = cmd_idx;
1728 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1729 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1730 cmd_str, ret);
1731 return ret;
1732 }
1733
1734 ret = wait_event_timeout(trans->wait_command_queue,
1735 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1736 &trans->status),
1737 HOST_COMPLETE_TIMEOUT);
1738 if (!ret) {
1739 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1740 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1741
1742 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1743 txq->read_ptr, txq->write_ptr);
1744
1745 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1746 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1747 cmd_str);
1748 ret = -ETIMEDOUT;
1749
1750 iwl_trans_sync_nmi(trans);
1751 goto cancel;
1752 }
1753
1754 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1755 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
1756 dump_stack();
1757 ret = -EIO;
1758 goto cancel;
1759 }
1760
1761 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1762 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1763 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1764 ret = -ERFKILL;
1765 goto cancel;
1766 }
1767
1768 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1769 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
1770 ret = -EIO;
1771 goto cancel;
1772 }
1773
1774 return 0;
1775
1776 cancel:
1777 if (cmd->flags & CMD_WANT_SKB) {
1778 /*
1779 * Cancel the CMD_WANT_SKB flag for the cmd in the
1780 * TX cmd queue. Otherwise in case the cmd comes
1781 * in later, it will possibly set an invalid
1782 * address (cmd->meta.source).
1783 */
1784 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1785 }
1786
1787 if (cmd->resp_pkt) {
1788 iwl_free_resp(cmd);
1789 cmd->resp_pkt = NULL;
1790 }
1791
1792 return ret;
1793 }
1794
iwl_trans_txq_send_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)1795 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
1796 struct iwl_host_cmd *cmd)
1797 {
1798 /* Make sure the NIC is still alive in the bus */
1799 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1800 return -ENODEV;
1801
1802 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1803 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1804 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1805 cmd->id);
1806 return -ERFKILL;
1807 }
1808
1809 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
1810 !(cmd->flags & CMD_SEND_IN_D3))) {
1811 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
1812 return -EHOSTDOWN;
1813 }
1814
1815 if (cmd->flags & CMD_ASYNC) {
1816 int ret;
1817
1818 /* An asynchronous command can not expect an SKB to be set. */
1819 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1820 return -EINVAL;
1821
1822 ret = trans->ops->send_cmd(trans, cmd);
1823 if (ret < 0) {
1824 IWL_ERR(trans,
1825 "Error sending %s: enqueue_hcmd failed: %d\n",
1826 iwl_get_cmd_string(trans, cmd->id), ret);
1827 return ret;
1828 }
1829 return 0;
1830 }
1831
1832 return iwl_trans_txq_send_hcmd_sync(trans, cmd);
1833 }
1834
1835