1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5 #ifdef CONFIG_INET
6 #include <net/tso.h>
7 #endif
8 #include <linux/tcp.h>
9
10 #include "iwl-debug.h"
11 #include "iwl-io.h"
12 #include "fw/api/commands.h"
13 #include "fw/api/tx.h"
14 #include "fw/api/datapath.h"
15 #include "queue/tx.h"
16 #include "iwl-fh.h"
17 #include "iwl-scd.h"
18 #include <linux/dmapool.h>
19 #if defined(__FreeBSD__)
20 #include <net/mac80211.h>
21 #endif
22
23 /*
24 * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
25 */
iwl_pcie_gen2_update_byte_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)26 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
27 struct iwl_txq *txq, u16 byte_cnt,
28 int num_tbs)
29 {
30 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
31 u8 filled_tfd_size, num_fetch_chunks;
32 u16 len = byte_cnt;
33 __le16 bc_ent;
34
35 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
36 return;
37
38 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
39 num_tbs * sizeof(struct iwl_tfh_tb);
40 /*
41 * filled_tfd_size contains the number of filled bytes in the TFD.
42 * Dividing it by 64 will give the number of chunks to fetch
43 * to SRAM- 0 for one chunk, 1 for 2 and so on.
44 * If, for example, TFD contains only 3 TBs then 32 bytes
45 * of the TFD are used, and only one chunk of 64 bytes should
46 * be fetched
47 */
48 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
49
50 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
51 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
52
53 /* Starting from AX210, the HW expects bytes */
54 WARN_ON(trans->txqs.bc_table_dword);
55 WARN_ON(len > 0x3FFF);
56 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
57 scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
58 } else {
59 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
60
61 /* Before AX210, the HW expects DW */
62 WARN_ON(!trans->txqs.bc_table_dword);
63 len = DIV_ROUND_UP(len, 4);
64 WARN_ON(len > 0xFFF);
65 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
66 scd_bc_tbl->tfd_offset[idx] = bc_ent;
67 }
68 }
69
70 /*
71 * iwl_txq_inc_wr_ptr - Send new write index to hardware
72 */
iwl_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)73 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
74 {
75 lockdep_assert_held(&txq->lock);
76
77 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
78
79 /*
80 * if not in power-save mode, uCode will never sleep when we're
81 * trying to tx (during RFKILL, we're not trying to tx).
82 */
83 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
84 }
85
iwl_txq_gen2_get_num_tbs(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd)86 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
87 struct iwl_tfh_tfd *tfd)
88 {
89 return le16_to_cpu(tfd->num_tbs) & 0x1f;
90 }
91
iwl_txq_gen2_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_tfh_tfd * tfd)92 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
93 struct iwl_tfh_tfd *tfd)
94 {
95 int i, num_tbs;
96
97 /* Sanity check on number of chunks */
98 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
99
100 if (num_tbs > trans->txqs.tfd.max_tbs) {
101 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
102 return;
103 }
104
105 /* first TB is never freed - it's the bidirectional DMA data */
106 for (i = 1; i < num_tbs; i++) {
107 if (meta->tbs & BIT(i))
108 dma_unmap_page(trans->dev,
109 le64_to_cpu(tfd->tbs[i].addr),
110 le16_to_cpu(tfd->tbs[i].tb_len),
111 DMA_TO_DEVICE);
112 else
113 dma_unmap_single(trans->dev,
114 le64_to_cpu(tfd->tbs[i].addr),
115 le16_to_cpu(tfd->tbs[i].tb_len),
116 DMA_TO_DEVICE);
117 }
118
119 tfd->num_tbs = 0;
120 }
121
iwl_txq_gen2_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq)122 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
123 {
124 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
125 * idx is bounded by n_window
126 */
127 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
128 struct sk_buff *skb;
129
130 lockdep_assert_held(&txq->lock);
131
132 if (!txq->entries)
133 return;
134
135 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
136 iwl_txq_get_tfd(trans, txq, idx));
137
138 skb = txq->entries[idx].skb;
139
140 /* Can be called from irqs-disabled context
141 * If skb is not NULL, it means that the whole queue is being
142 * freed and that the queue is not empty - free the skb
143 */
144 if (skb) {
145 iwl_op_mode_free_skb(trans->op_mode, skb);
146 txq->entries[idx].skb = NULL;
147 }
148 }
149
iwl_txq_gen2_set_tb(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd,dma_addr_t addr,u16 len)150 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
151 dma_addr_t addr, u16 len)
152 {
153 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
154 struct iwl_tfh_tb *tb;
155
156 /*
157 * Only WARN here so we know about the issue, but we mess up our
158 * unmap path because not every place currently checks for errors
159 * returned from this function - it can only return an error if
160 * there's no more space, and so when we know there is enough we
161 * don't always check ...
162 */
163 WARN(iwl_txq_crosses_4g_boundary(addr, len),
164 "possible DMA problem with iova:0x%llx, len:%d\n",
165 (unsigned long long)addr, len);
166
167 if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
168 return -EINVAL;
169 tb = &tfd->tbs[idx];
170
171 /* Each TFD can point to a maximum max_tbs Tx buffers */
172 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
173 IWL_ERR(trans, "Error can not send more than %d chunks\n",
174 trans->txqs.tfd.max_tbs);
175 return -EINVAL;
176 }
177
178 put_unaligned_le64(addr, &tb->addr);
179 tb->tb_len = cpu_to_le16(len);
180
181 tfd->num_tbs = cpu_to_le16(idx + 1);
182
183 return idx;
184 }
185
get_workaround_page(struct iwl_trans * trans,struct sk_buff * skb)186 static struct page *get_workaround_page(struct iwl_trans *trans,
187 struct sk_buff *skb)
188 {
189 struct page **page_ptr;
190 struct page *ret;
191
192 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
193
194 ret = alloc_page(GFP_ATOMIC);
195 if (!ret)
196 return NULL;
197
198 /* set the chaining pointer to the previous page if there */
199 *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
200 *page_ptr = ret;
201
202 return ret;
203 }
204
205 /*
206 * Add a TB and if needed apply the FH HW bug workaround;
207 * meta != NULL indicates that it's a page mapping and we
208 * need to dma_unmap_page() and set the meta->tbs bit in
209 * this case.
210 */
iwl_txq_gen2_set_tb_with_wa(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,dma_addr_t phys,void * virt,u16 len,struct iwl_cmd_meta * meta)211 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
212 struct sk_buff *skb,
213 struct iwl_tfh_tfd *tfd,
214 dma_addr_t phys, void *virt,
215 u16 len, struct iwl_cmd_meta *meta)
216 {
217 dma_addr_t oldphys = phys;
218 struct page *page;
219 int ret;
220
221 if (unlikely(dma_mapping_error(trans->dev, phys)))
222 return -ENOMEM;
223
224 if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
225 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
226
227 if (ret < 0)
228 goto unmap;
229
230 if (meta)
231 meta->tbs |= BIT(ret);
232
233 ret = 0;
234 goto trace;
235 }
236
237 /*
238 * Work around a hardware bug. If (as expressed in the
239 * condition above) the TB ends on a 32-bit boundary,
240 * then the next TB may be accessed with the wrong
241 * address.
242 * To work around it, copy the data elsewhere and make
243 * a new mapping for it so the device will not fail.
244 */
245
246 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
247 ret = -ENOBUFS;
248 goto unmap;
249 }
250
251 page = get_workaround_page(trans, skb);
252 if (!page) {
253 ret = -ENOMEM;
254 goto unmap;
255 }
256
257 memcpy(page_address(page), virt, len);
258
259 phys = dma_map_single(trans->dev, page_address(page), len,
260 DMA_TO_DEVICE);
261 if (unlikely(dma_mapping_error(trans->dev, phys)))
262 return -ENOMEM;
263 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
264 if (ret < 0) {
265 /* unmap the new allocation as single */
266 oldphys = phys;
267 meta = NULL;
268 goto unmap;
269 }
270 IWL_WARN(trans,
271 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
272 len, (unsigned long long)oldphys, (unsigned long long)phys);
273
274 ret = 0;
275 unmap:
276 if (meta)
277 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
278 else
279 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
280 trace:
281 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
282
283 return ret;
284 }
285
286 #ifdef CONFIG_INET
get_page_hdr(struct iwl_trans * trans,size_t len,struct sk_buff * skb)287 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
288 struct sk_buff *skb)
289 {
290 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
291 struct page **page_ptr;
292
293 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
294
295 if (WARN_ON(*page_ptr))
296 return NULL;
297
298 if (!p->page)
299 goto alloc;
300
301 /*
302 * Check if there's enough room on this page
303 *
304 * Note that we put a page chaining pointer *last* in the
305 * page - we need it somewhere, and if it's there then we
306 * avoid DMA mapping the last bits of the page which may
307 * trigger the 32-bit boundary hardware bug.
308 *
309 * (see also get_workaround_page() in tx-gen2.c)
310 */
311 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
312 sizeof(void *))
313 goto out;
314
315 /* We don't have enough room on this page, get a new one. */
316 __free_page(p->page);
317
318 alloc:
319 p->page = alloc_page(GFP_ATOMIC);
320 if (!p->page)
321 return NULL;
322 p->pos = page_address(p->page);
323 /* set the chaining pointer to NULL */
324 *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
325 out:
326 *page_ptr = p->page;
327 get_page(p->page);
328 return p;
329 }
330 #endif
331
iwl_txq_gen2_build_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,int start_len,u8 hdr_len,struct iwl_device_tx_cmd * dev_cmd)332 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
333 struct sk_buff *skb,
334 struct iwl_tfh_tfd *tfd, int start_len,
335 u8 hdr_len,
336 struct iwl_device_tx_cmd *dev_cmd)
337 {
338 #ifdef CONFIG_INET
339 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
340 struct ieee80211_hdr *hdr = (void *)skb->data;
341 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
342 unsigned int mss = skb_shinfo(skb)->gso_size;
343 u16 length, amsdu_pad;
344 u8 *start_hdr;
345 struct iwl_tso_hdr_page *hdr_page;
346 struct tso_t tso;
347
348 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
349 &dev_cmd->hdr, start_len, 0);
350
351 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
352 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
353 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
354 amsdu_pad = 0;
355
356 /* total amount of header we may need for this A-MSDU */
357 hdr_room = DIV_ROUND_UP(total_len, mss) *
358 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
359
360 /* Our device supports 9 segments at most, it will fit in 1 page */
361 hdr_page = get_page_hdr(trans, hdr_room, skb);
362 if (!hdr_page)
363 return -ENOMEM;
364
365 start_hdr = hdr_page->pos;
366
367 /*
368 * Pull the ieee80211 header to be able to use TSO core,
369 * we will restore it for the tx_status flow.
370 */
371 skb_pull(skb, hdr_len);
372
373 /*
374 * Remove the length of all the headers that we don't actually
375 * have in the MPDU by themselves, but that we duplicate into
376 * all the different MSDUs inside the A-MSDU.
377 */
378 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
379
380 tso_start(skb, &tso);
381
382 while (total_len) {
383 /* this is the data left for this subframe */
384 unsigned int data_left = min_t(unsigned int, mss, total_len);
385 unsigned int tb_len;
386 dma_addr_t tb_phys;
387 u8 *subf_hdrs_start = hdr_page->pos;
388
389 total_len -= data_left;
390
391 memset(hdr_page->pos, 0, amsdu_pad);
392 hdr_page->pos += amsdu_pad;
393 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
394 data_left)) & 0x3;
395 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
396 hdr_page->pos += ETH_ALEN;
397 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
398 hdr_page->pos += ETH_ALEN;
399
400 length = snap_ip_tcp_hdrlen + data_left;
401 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
402 hdr_page->pos += sizeof(length);
403
404 /*
405 * This will copy the SNAP as well which will be considered
406 * as MAC header.
407 */
408 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
409
410 hdr_page->pos += snap_ip_tcp_hdrlen;
411
412 tb_len = hdr_page->pos - start_hdr;
413 tb_phys = dma_map_single(trans->dev, start_hdr,
414 tb_len, DMA_TO_DEVICE);
415 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
416 goto out_err;
417 /*
418 * No need for _with_wa, this is from the TSO page and
419 * we leave some space at the end of it so can't hit
420 * the buggy scenario.
421 */
422 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
423 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
424 tb_phys, tb_len);
425 /* add this subframe's headers' length to the tx_cmd */
426 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
427
428 /* prepare the start_hdr for the next subframe */
429 start_hdr = hdr_page->pos;
430
431 /* put the payload */
432 while (data_left) {
433 int ret;
434
435 tb_len = min_t(unsigned int, tso.size, data_left);
436 tb_phys = dma_map_single(trans->dev, tso.data,
437 tb_len, DMA_TO_DEVICE);
438 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
439 tb_phys, tso.data,
440 tb_len, NULL);
441 if (ret)
442 goto out_err;
443
444 data_left -= tb_len;
445 tso_build_data(skb, &tso, tb_len);
446 }
447 }
448
449 /* re -add the WiFi header */
450 skb_push(skb, hdr_len);
451
452 return 0;
453
454 out_err:
455 #endif
456 return -EINVAL;
457 }
458
459 static struct
iwl_txq_gen2_build_tx_amsdu(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len)460 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
461 struct iwl_txq *txq,
462 struct iwl_device_tx_cmd *dev_cmd,
463 struct sk_buff *skb,
464 struct iwl_cmd_meta *out_meta,
465 int hdr_len,
466 int tx_cmd_len)
467 {
468 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
469 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
470 dma_addr_t tb_phys;
471 int len;
472 void *tb1_addr;
473
474 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
475
476 /*
477 * No need for _with_wa, the first TB allocation is aligned up
478 * to a 64-byte boundary and thus can't be at the end or cross
479 * a page boundary (much less a 2^32 boundary).
480 */
481 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
482
483 /*
484 * The second TB (tb1) points to the remainder of the TX command
485 * and the 802.11 header - dword aligned size
486 * (This calculation modifies the TX command, so do it before the
487 * setup of the first TB)
488 */
489 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
490 IWL_FIRST_TB_SIZE;
491
492 /* do not align A-MSDU to dword as the subframe header aligns it */
493
494 /* map the data for TB1 */
495 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
496 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
497 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
498 goto out_err;
499 /*
500 * No need for _with_wa(), we ensure (via alignment) that the data
501 * here can never cross or end at a page boundary.
502 */
503 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
504
505 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
506 hdr_len, dev_cmd))
507 goto out_err;
508
509 /* building the A-MSDU might have changed this data, memcpy it now */
510 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
511 return tfd;
512
513 out_err:
514 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
515 return NULL;
516 }
517
iwl_txq_gen2_tx_add_frags(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,struct iwl_cmd_meta * out_meta)518 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
519 struct sk_buff *skb,
520 struct iwl_tfh_tfd *tfd,
521 struct iwl_cmd_meta *out_meta)
522 {
523 int i;
524
525 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
526 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
527 dma_addr_t tb_phys;
528 unsigned int fragsz = skb_frag_size(frag);
529 int ret;
530
531 if (!fragsz)
532 continue;
533
534 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
535 fragsz, DMA_TO_DEVICE);
536 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
537 skb_frag_address(frag),
538 fragsz, out_meta);
539 if (ret)
540 return ret;
541 }
542
543 return 0;
544 }
545
546 static struct
iwl_txq_gen2_build_tx(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len,bool pad)547 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
548 struct iwl_txq *txq,
549 struct iwl_device_tx_cmd *dev_cmd,
550 struct sk_buff *skb,
551 struct iwl_cmd_meta *out_meta,
552 int hdr_len,
553 int tx_cmd_len,
554 bool pad)
555 {
556 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
557 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
558 dma_addr_t tb_phys;
559 int len, tb1_len, tb2_len;
560 void *tb1_addr;
561 struct sk_buff *frag;
562
563 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
564
565 /* The first TB points to bi-directional DMA data */
566 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
567
568 /*
569 * No need for _with_wa, the first TB allocation is aligned up
570 * to a 64-byte boundary and thus can't be at the end or cross
571 * a page boundary (much less a 2^32 boundary).
572 */
573 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
574
575 /*
576 * The second TB (tb1) points to the remainder of the TX command
577 * and the 802.11 header - dword aligned size
578 * (This calculation modifies the TX command, so do it before the
579 * setup of the first TB)
580 */
581 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
582 IWL_FIRST_TB_SIZE;
583
584 if (pad)
585 tb1_len = ALIGN(len, 4);
586 else
587 tb1_len = len;
588
589 /* map the data for TB1 */
590 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
591 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
592 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
593 goto out_err;
594 /*
595 * No need for _with_wa(), we ensure (via alignment) that the data
596 * here can never cross or end at a page boundary.
597 */
598 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
599 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
600 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
601
602 /* set up TFD's third entry to point to remainder of skb's head */
603 tb2_len = skb_headlen(skb) - hdr_len;
604
605 if (tb2_len > 0) {
606 int ret;
607
608 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
609 tb2_len, DMA_TO_DEVICE);
610 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
611 skb->data + hdr_len, tb2_len,
612 NULL);
613 if (ret)
614 goto out_err;
615 }
616
617 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
618 goto out_err;
619
620 skb_walk_frags(skb, frag) {
621 int ret;
622
623 tb_phys = dma_map_single(trans->dev, frag->data,
624 skb_headlen(frag), DMA_TO_DEVICE);
625 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
626 frag->data,
627 skb_headlen(frag), NULL);
628 if (ret)
629 goto out_err;
630 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
631 goto out_err;
632 }
633
634 return tfd;
635
636 out_err:
637 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
638 return NULL;
639 }
640
641 static
iwl_txq_gen2_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta)642 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
643 struct iwl_txq *txq,
644 struct iwl_device_tx_cmd *dev_cmd,
645 struct sk_buff *skb,
646 struct iwl_cmd_meta *out_meta)
647 {
648 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
649 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
650 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
651 int len, hdr_len;
652 bool amsdu;
653
654 /* There must be data left over for TB1 or this code must be changed */
655 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
656 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
657 offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
658 IWL_FIRST_TB_SIZE);
659 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
660 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
661 offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
662 IWL_FIRST_TB_SIZE);
663
664 memset(tfd, 0, sizeof(*tfd));
665
666 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
667 len = sizeof(struct iwl_tx_cmd_gen2);
668 else
669 len = sizeof(struct iwl_tx_cmd_gen3);
670
671 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
672 (*ieee80211_get_qos_ctl(hdr) &
673 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
674
675 hdr_len = ieee80211_hdrlen(hdr->frame_control);
676
677 /*
678 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
679 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
680 * built in the higher layers already.
681 */
682 if (amsdu && skb_shinfo(skb)->gso_size)
683 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
684 out_meta, hdr_len, len);
685 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
686 hdr_len, len, !amsdu);
687 }
688
iwl_txq_space(struct iwl_trans * trans,const struct iwl_txq * q)689 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
690 {
691 unsigned int max;
692 unsigned int used;
693
694 /*
695 * To avoid ambiguity between empty and completely full queues, there
696 * should always be less than max_tfd_queue_size elements in the queue.
697 * If q->n_window is smaller than max_tfd_queue_size, there is no need
698 * to reserve any queue entries for this purpose.
699 */
700 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
701 max = q->n_window;
702 else
703 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
704
705 /*
706 * max_tfd_queue_size is a power of 2, so the following is equivalent to
707 * modulo by max_tfd_queue_size and is well defined.
708 */
709 used = (q->write_ptr - q->read_ptr) &
710 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
711
712 if (WARN_ON(used > max))
713 return 0;
714
715 return max - used;
716 }
717
iwl_txq_gen2_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int txq_id)718 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
719 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
720 {
721 struct iwl_cmd_meta *out_meta;
722 struct iwl_txq *txq = trans->txqs.txq[txq_id];
723 u16 cmd_len;
724 int idx;
725 void *tfd;
726
727 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
728 "queue %d out of range", txq_id))
729 return -EINVAL;
730
731 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
732 "TX on unused queue %d\n", txq_id))
733 return -EINVAL;
734
735 if (skb_is_nonlinear(skb) &&
736 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
737 __skb_linearize(skb))
738 return -ENOMEM;
739
740 spin_lock(&txq->lock);
741
742 if (iwl_txq_space(trans, txq) < txq->high_mark) {
743 iwl_txq_stop(trans, txq);
744
745 /* don't put the packet on the ring, if there is no room */
746 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
747 struct iwl_device_tx_cmd **dev_cmd_ptr;
748
749 dev_cmd_ptr = (void *)((u8 *)skb->cb +
750 trans->txqs.dev_cmd_offs);
751
752 *dev_cmd_ptr = dev_cmd;
753 __skb_queue_tail(&txq->overflow_q, skb);
754 spin_unlock(&txq->lock);
755 return 0;
756 }
757 }
758
759 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
760
761 /* Set up driver data for this TFD */
762 txq->entries[idx].skb = skb;
763 txq->entries[idx].cmd = dev_cmd;
764
765 dev_cmd->hdr.sequence =
766 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
767 INDEX_TO_SEQ(idx)));
768
769 /* Set up first empty entry in queue's array of Tx/cmd buffers */
770 out_meta = &txq->entries[idx].meta;
771 out_meta->flags = 0;
772
773 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
774 if (!tfd) {
775 spin_unlock(&txq->lock);
776 return -1;
777 }
778
779 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
780 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
781 (void *)dev_cmd->payload;
782
783 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
784 } else {
785 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
786 (void *)dev_cmd->payload;
787
788 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
789 }
790
791 /* Set up entry for this TFD in Tx byte-count array */
792 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
793 iwl_txq_gen2_get_num_tbs(trans, tfd));
794
795 /* start timer if queue currently empty */
796 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
797 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
798
799 /* Tell device the write index *just past* this latest filled TFD */
800 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
801 iwl_txq_inc_wr_ptr(trans, txq);
802 /*
803 * At this point the frame is "transmitted" successfully
804 * and we will get a TX status notification eventually.
805 */
806 spin_unlock(&txq->lock);
807 return 0;
808 }
809
810 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
811
812 /*
813 * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
814 */
iwl_txq_gen2_unmap(struct iwl_trans * trans,int txq_id)815 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
816 {
817 struct iwl_txq *txq = trans->txqs.txq[txq_id];
818
819 spin_lock_bh(&txq->lock);
820 while (txq->write_ptr != txq->read_ptr) {
821 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
822 txq_id, txq->read_ptr);
823
824 if (txq_id != trans->txqs.cmd.q_id) {
825 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
826 struct sk_buff *skb = txq->entries[idx].skb;
827
828 if (!WARN_ON_ONCE(!skb))
829 iwl_txq_free_tso_page(trans, skb);
830 }
831 iwl_txq_gen2_free_tfd(trans, txq);
832 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
833 }
834
835 while (!skb_queue_empty(&txq->overflow_q)) {
836 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
837
838 iwl_op_mode_free_skb(trans->op_mode, skb);
839 }
840
841 spin_unlock_bh(&txq->lock);
842
843 /* just in case - this queue may have been stopped */
844 iwl_wake_queue(trans, txq);
845 }
846
iwl_txq_gen2_free_memory(struct iwl_trans * trans,struct iwl_txq * txq)847 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
848 struct iwl_txq *txq)
849 {
850 struct device *dev = trans->dev;
851
852 /* De-alloc circular buffer of TFDs */
853 if (txq->tfds) {
854 dma_free_coherent(dev,
855 trans->txqs.tfd.size * txq->n_window,
856 txq->tfds, txq->dma_addr);
857 dma_free_coherent(dev,
858 sizeof(*txq->first_tb_bufs) * txq->n_window,
859 txq->first_tb_bufs, txq->first_tb_dma);
860 }
861
862 kfree(txq->entries);
863 if (txq->bc_tbl.addr)
864 dma_pool_free(trans->txqs.bc_pool,
865 txq->bc_tbl.addr, txq->bc_tbl.dma);
866 kfree(txq);
867 }
868
869 /*
870 * iwl_pcie_txq_free - Deallocate DMA queue.
871 * @txq: Transmit queue to deallocate.
872 *
873 * Empty queue by removing and destroying all BD's.
874 * Free all buffers.
875 * 0-fill, but do not free "txq" descriptor structure.
876 */
iwl_txq_gen2_free(struct iwl_trans * trans,int txq_id)877 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
878 {
879 struct iwl_txq *txq;
880 int i;
881
882 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
883 "queue %d out of range", txq_id))
884 return;
885
886 txq = trans->txqs.txq[txq_id];
887
888 if (WARN_ON(!txq))
889 return;
890
891 iwl_txq_gen2_unmap(trans, txq_id);
892
893 /* De-alloc array of command/tx buffers */
894 if (txq_id == trans->txqs.cmd.q_id)
895 for (i = 0; i < txq->n_window; i++) {
896 kfree_sensitive(txq->entries[i].cmd);
897 kfree_sensitive(txq->entries[i].free_buf);
898 }
899 del_timer_sync(&txq->stuck_timer);
900
901 iwl_txq_gen2_free_memory(trans, txq);
902
903 trans->txqs.txq[txq_id] = NULL;
904
905 clear_bit(txq_id, trans->txqs.queue_used);
906 }
907
908 /*
909 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
910 */
iwl_queue_init(struct iwl_txq * q,int slots_num)911 static int iwl_queue_init(struct iwl_txq *q, int slots_num)
912 {
913 q->n_window = slots_num;
914
915 /* slots_num must be power-of-two size, otherwise
916 * iwl_txq_get_cmd_index is broken. */
917 if (WARN_ON(!is_power_of_2(slots_num)))
918 return -EINVAL;
919
920 q->low_mark = q->n_window / 4;
921 if (q->low_mark < 4)
922 q->low_mark = 4;
923
924 q->high_mark = q->n_window / 8;
925 if (q->high_mark < 2)
926 q->high_mark = 2;
927
928 q->write_ptr = 0;
929 q->read_ptr = 0;
930
931 return 0;
932 }
933
iwl_txq_init(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)934 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
935 bool cmd_queue)
936 {
937 int ret;
938 u32 tfd_queue_max_size =
939 trans->trans_cfg->base_params->max_tfd_queue_size;
940
941 txq->need_update = false;
942
943 /* max_tfd_queue_size must be power-of-two size, otherwise
944 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
945 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
946 "Max tfd queue size must be a power of two, but is %d",
947 tfd_queue_max_size))
948 return -EINVAL;
949
950 /* Initialize queue's high/low-water marks, and head/tail indexes */
951 ret = iwl_queue_init(txq, slots_num);
952 if (ret)
953 return ret;
954
955 spin_lock_init(&txq->lock);
956
957 #ifdef CONFIG_LOCKDEP
958 if (cmd_queue) {
959 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
960
961 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
962 }
963 #endif
964
965 __skb_queue_head_init(&txq->overflow_q);
966
967 return 0;
968 }
969
iwl_txq_free_tso_page(struct iwl_trans * trans,struct sk_buff * skb)970 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
971 {
972 struct page **page_ptr;
973 struct page *next;
974
975 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
976 next = *page_ptr;
977 *page_ptr = NULL;
978
979 while (next) {
980 struct page *tmp = next;
981
982 next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
983 sizeof(void *));
984 __free_page(tmp);
985 }
986 }
987
iwl_txq_log_scd_error(struct iwl_trans * trans,struct iwl_txq * txq)988 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
989 {
990 u32 txq_id = txq->id;
991 u32 status;
992 bool active;
993 u8 fifo;
994
995 if (trans->trans_cfg->gen2) {
996 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
997 txq->read_ptr, txq->write_ptr);
998 #if defined(__FreeBSD__)
999 /*
1000 * Dump some more queue and timer information to rule
1001 * out a LinuxKPI issues and gather some extra data.
1002 */
1003 IWL_ERR(trans, " need_update %d frozen %d ampdu %d "
1004 "now %ju stuck_timer.expires %ju "
1005 "frozen_expiry_remainder %ju wd_timeout %ju\n",
1006 txq->need_update, txq->frozen, txq->ampdu,
1007 (uintmax_t)jiffies, (uintmax_t)txq->stuck_timer.expires,
1008 (uintmax_t)txq->frozen_expiry_remainder,
1009 (uintmax_t)txq->wd_timeout);
1010 #endif
1011 /* TODO: access new SCD registers and dump them */
1012 return;
1013 }
1014
1015 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
1016 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1017 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1018
1019 IWL_ERR(trans,
1020 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
1021 txq_id, active ? "" : "in", fifo,
1022 jiffies_to_msecs(txq->wd_timeout),
1023 txq->read_ptr, txq->write_ptr,
1024 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
1025 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1026 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
1027 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1028 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1029 }
1030
iwl_txq_stuck_timer(struct timer_list * t)1031 static void iwl_txq_stuck_timer(struct timer_list *t)
1032 {
1033 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1034 struct iwl_trans *trans = txq->trans;
1035
1036 spin_lock(&txq->lock);
1037 /* check if triggered erroneously */
1038 if (txq->read_ptr == txq->write_ptr) {
1039 spin_unlock(&txq->lock);
1040 return;
1041 }
1042 spin_unlock(&txq->lock);
1043
1044 iwl_txq_log_scd_error(trans, txq);
1045
1046 iwl_force_nmi(trans);
1047 }
1048
iwl_txq_alloc(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)1049 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1050 bool cmd_queue)
1051 {
1052 size_t tfd_sz = trans->txqs.tfd.size *
1053 trans->trans_cfg->base_params->max_tfd_queue_size;
1054 size_t tb0_buf_sz;
1055 int i;
1056
1057 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
1058 return -EINVAL;
1059
1060 if (WARN_ON(txq->entries || txq->tfds))
1061 return -EINVAL;
1062
1063 if (trans->trans_cfg->gen2)
1064 tfd_sz = trans->txqs.tfd.size * slots_num;
1065
1066 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1067 txq->trans = trans;
1068
1069 txq->n_window = slots_num;
1070
1071 txq->entries = kcalloc(slots_num,
1072 sizeof(struct iwl_pcie_txq_entry),
1073 GFP_KERNEL);
1074
1075 if (!txq->entries)
1076 goto error;
1077
1078 if (cmd_queue)
1079 for (i = 0; i < slots_num; i++) {
1080 txq->entries[i].cmd =
1081 kmalloc(sizeof(struct iwl_device_cmd),
1082 GFP_KERNEL);
1083 if (!txq->entries[i].cmd)
1084 goto error;
1085 }
1086
1087 /* Circular buffer of transmit frame descriptors (TFDs),
1088 * shared with device */
1089 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1090 &txq->dma_addr, GFP_KERNEL);
1091 if (!txq->tfds)
1092 goto error;
1093
1094 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1095
1096 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1097
1098 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1099 &txq->first_tb_dma,
1100 GFP_KERNEL);
1101 if (!txq->first_tb_bufs)
1102 goto err_free_tfds;
1103
1104 return 0;
1105 err_free_tfds:
1106 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
1107 txq->tfds = NULL;
1108 error:
1109 if (txq->entries && cmd_queue)
1110 for (i = 0; i < slots_num; i++)
1111 kfree(txq->entries[i].cmd);
1112 kfree(txq->entries);
1113 txq->entries = NULL;
1114
1115 return -ENOMEM;
1116 }
1117
1118 static struct iwl_txq *
iwl_txq_dyn_alloc_dma(struct iwl_trans * trans,int size,unsigned int timeout)1119 iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
1120 {
1121 size_t bc_tbl_size, bc_tbl_entries;
1122 struct iwl_txq *txq;
1123 int ret;
1124
1125 WARN_ON(!trans->txqs.bc_tbl_size);
1126
1127 bc_tbl_size = trans->txqs.bc_tbl_size;
1128 bc_tbl_entries = bc_tbl_size / sizeof(u16);
1129
1130 if (WARN_ON(size > bc_tbl_entries))
1131 return ERR_PTR(-EINVAL);
1132
1133 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1134 if (!txq)
1135 return ERR_PTR(-ENOMEM);
1136
1137 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1138 &txq->bc_tbl.dma);
1139 if (!txq->bc_tbl.addr) {
1140 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1141 kfree(txq);
1142 return ERR_PTR(-ENOMEM);
1143 }
1144
1145 ret = iwl_txq_alloc(trans, txq, size, false);
1146 if (ret) {
1147 IWL_ERR(trans, "Tx queue alloc failed\n");
1148 goto error;
1149 }
1150 ret = iwl_txq_init(trans, txq, size, false);
1151 if (ret) {
1152 IWL_ERR(trans, "Tx queue init failed\n");
1153 goto error;
1154 }
1155
1156 txq->wd_timeout = msecs_to_jiffies(timeout);
1157
1158 return txq;
1159
1160 error:
1161 iwl_txq_gen2_free_memory(trans, txq);
1162 return ERR_PTR(ret);
1163 }
1164
iwl_txq_alloc_response(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_host_cmd * hcmd)1165 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1166 struct iwl_host_cmd *hcmd)
1167 {
1168 struct iwl_tx_queue_cfg_rsp *rsp;
1169 int ret, qid;
1170 u32 wr_ptr;
1171
1172 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1173 sizeof(*rsp))) {
1174 ret = -EINVAL;
1175 goto error_free_resp;
1176 }
1177
1178 rsp = (void *)hcmd->resp_pkt->data;
1179 qid = le16_to_cpu(rsp->queue_number);
1180 wr_ptr = le16_to_cpu(rsp->write_pointer);
1181
1182 if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1183 WARN_ONCE(1, "queue index %d unsupported", qid);
1184 ret = -EIO;
1185 goto error_free_resp;
1186 }
1187
1188 if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1189 WARN_ONCE(1, "queue %d already used", qid);
1190 ret = -EIO;
1191 goto error_free_resp;
1192 }
1193
1194 if (WARN_ONCE(trans->txqs.txq[qid],
1195 "queue %d already allocated\n", qid)) {
1196 ret = -EIO;
1197 goto error_free_resp;
1198 }
1199
1200 txq->id = qid;
1201 trans->txqs.txq[qid] = txq;
1202 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1203
1204 /* Place first TFD at index corresponding to start sequence number */
1205 txq->read_ptr = wr_ptr;
1206 txq->write_ptr = wr_ptr;
1207
1208 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1209
1210 iwl_free_resp(hcmd);
1211 return qid;
1212
1213 error_free_resp:
1214 iwl_free_resp(hcmd);
1215 iwl_txq_gen2_free_memory(trans, txq);
1216 return ret;
1217 }
1218
iwl_txq_dyn_alloc(struct iwl_trans * trans,u32 flags,u32 sta_mask,u8 tid,int size,unsigned int timeout)1219 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1220 u8 tid, int size, unsigned int timeout)
1221 {
1222 struct iwl_txq *txq;
1223 union {
1224 struct iwl_tx_queue_cfg_cmd old;
1225 struct iwl_scd_queue_cfg_cmd new;
1226 } cmd;
1227 struct iwl_host_cmd hcmd = {
1228 .flags = CMD_WANT_SKB,
1229 };
1230 int ret;
1231
1232 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
1233 trans->hw_rev_step == SILICON_A_STEP)
1234 size = 4096;
1235
1236 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1237 if (IS_ERR(txq))
1238 return PTR_ERR(txq);
1239
1240 if (trans->txqs.queue_alloc_cmd_ver == 0) {
1241 memset(&cmd.old, 0, sizeof(cmd.old));
1242 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
1243 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1244 cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1245 cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
1246 cmd.old.tid = tid;
1247
1248 if (hweight32(sta_mask) != 1) {
1249 ret = -EINVAL;
1250 goto error;
1251 }
1252 cmd.old.sta_id = ffs(sta_mask) - 1;
1253
1254 hcmd.id = SCD_QUEUE_CFG;
1255 hcmd.len[0] = sizeof(cmd.old);
1256 hcmd.data[0] = &cmd.old;
1257 } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
1258 memset(&cmd.new, 0, sizeof(cmd.new));
1259 cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
1260 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
1261 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
1262 cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1263 cmd.new.u.add.flags = cpu_to_le32(flags);
1264 cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
1265 cmd.new.u.add.tid = tid;
1266
1267 hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
1268 hcmd.len[0] = sizeof(cmd.new);
1269 hcmd.data[0] = &cmd.new;
1270 } else {
1271 ret = -EOPNOTSUPP;
1272 goto error;
1273 }
1274
1275 ret = iwl_trans_send_cmd(trans, &hcmd);
1276 if (ret)
1277 goto error;
1278
1279 return iwl_txq_alloc_response(trans, txq, &hcmd);
1280
1281 error:
1282 iwl_txq_gen2_free_memory(trans, txq);
1283 return ret;
1284 }
1285
iwl_txq_dyn_free(struct iwl_trans * trans,int queue)1286 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1287 {
1288 if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1289 "queue %d out of range", queue))
1290 return;
1291
1292 /*
1293 * Upon HW Rfkill - we stop the device, and then stop the queues
1294 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1295 * allow the op_mode to call txq_disable after it already called
1296 * stop_device.
1297 */
1298 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1299 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1300 "queue %d not used", queue);
1301 return;
1302 }
1303
1304 iwl_txq_gen2_free(trans, queue);
1305
1306 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1307 }
1308
iwl_txq_gen2_tx_free(struct iwl_trans * trans)1309 void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1310 {
1311 int i;
1312
1313 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1314
1315 /* Free all TX queues */
1316 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1317 if (!trans->txqs.txq[i])
1318 continue;
1319
1320 iwl_txq_gen2_free(trans, i);
1321 }
1322 }
1323
iwl_txq_gen2_init(struct iwl_trans * trans,int txq_id,int queue_size)1324 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1325 {
1326 struct iwl_txq *queue;
1327 int ret;
1328
1329 /* alloc and init the tx queue */
1330 if (!trans->txqs.txq[txq_id]) {
1331 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1332 if (!queue) {
1333 IWL_ERR(trans, "Not enough memory for tx queue\n");
1334 return -ENOMEM;
1335 }
1336 trans->txqs.txq[txq_id] = queue;
1337 ret = iwl_txq_alloc(trans, queue, queue_size, true);
1338 if (ret) {
1339 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1340 goto error;
1341 }
1342 } else {
1343 queue = trans->txqs.txq[txq_id];
1344 }
1345
1346 ret = iwl_txq_init(trans, queue, queue_size,
1347 (txq_id == trans->txqs.cmd.q_id));
1348 if (ret) {
1349 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1350 goto error;
1351 }
1352 trans->txqs.txq[txq_id]->id = txq_id;
1353 set_bit(txq_id, trans->txqs.queue_used);
1354
1355 return 0;
1356
1357 error:
1358 iwl_txq_gen2_tx_free(trans);
1359 return ret;
1360 }
1361
iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans * trans,void * _tfd,u8 idx)1362 static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
1363 void *_tfd, u8 idx)
1364 {
1365 struct iwl_tfd *tfd;
1366 struct iwl_tfd_tb *tb;
1367 dma_addr_t addr;
1368 dma_addr_t hi_len;
1369
1370 if (trans->trans_cfg->gen2) {
1371 struct iwl_tfh_tfd *tfh_tfd = _tfd;
1372 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
1373
1374 return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
1375 }
1376
1377 tfd = _tfd;
1378 tb = &tfd->tbs[idx];
1379 addr = get_unaligned_le32(&tb->lo);
1380
1381 if (sizeof(dma_addr_t) <= sizeof(u32))
1382 return addr;
1383
1384 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1385
1386 /*
1387 * shift by 16 twice to avoid warnings on 32-bit
1388 * (where this code never runs anyway due to the
1389 * if statement above)
1390 */
1391 return addr | ((hi_len << 16) << 16);
1392 }
1393
iwl_txq_gen1_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_txq * txq,int index)1394 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1395 struct iwl_cmd_meta *meta,
1396 struct iwl_txq *txq, int index)
1397 {
1398 int i, num_tbs;
1399 void *tfd = iwl_txq_get_tfd(trans, txq, index);
1400
1401 /* Sanity check on number of chunks */
1402 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1403
1404 if (num_tbs > trans->txqs.tfd.max_tbs) {
1405 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1406 /* @todo issue fatal error, it is quite serious situation */
1407 return;
1408 }
1409
1410 /* first TB is never freed - it's the bidirectional DMA data */
1411
1412 for (i = 1; i < num_tbs; i++) {
1413 if (meta->tbs & BIT(i))
1414 dma_unmap_page(trans->dev,
1415 iwl_txq_gen1_tfd_tb_get_addr(trans,
1416 tfd, i),
1417 iwl_txq_gen1_tfd_tb_get_len(trans,
1418 tfd, i),
1419 DMA_TO_DEVICE);
1420 else
1421 dma_unmap_single(trans->dev,
1422 iwl_txq_gen1_tfd_tb_get_addr(trans,
1423 tfd, i),
1424 iwl_txq_gen1_tfd_tb_get_len(trans,
1425 tfd, i),
1426 DMA_TO_DEVICE);
1427 }
1428
1429 meta->tbs = 0;
1430
1431 if (trans->trans_cfg->gen2) {
1432 struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
1433
1434 tfd_fh->num_tbs = 0;
1435 } else {
1436 struct iwl_tfd *tfd_fh = (void *)tfd;
1437
1438 tfd_fh->num_tbs = 0;
1439 }
1440 }
1441
1442 #define IWL_TX_CRC_SIZE 4
1443 #define IWL_TX_DELIMITER_SIZE 4
1444
1445 /*
1446 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1447 */
iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)1448 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1449 struct iwl_txq *txq, u16 byte_cnt,
1450 int num_tbs)
1451 {
1452 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1453 int write_ptr = txq->write_ptr;
1454 int txq_id = txq->id;
1455 u8 sec_ctl = 0;
1456 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1457 __le16 bc_ent;
1458 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1459 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1460 u8 sta_id = tx_cmd->sta_id;
1461
1462 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1463
1464 sec_ctl = tx_cmd->sec_ctl;
1465
1466 switch (sec_ctl & TX_CMD_SEC_MSK) {
1467 case TX_CMD_SEC_CCM:
1468 len += IEEE80211_CCMP_MIC_LEN;
1469 break;
1470 case TX_CMD_SEC_TKIP:
1471 len += IEEE80211_TKIP_ICV_LEN;
1472 break;
1473 case TX_CMD_SEC_WEP:
1474 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1475 break;
1476 }
1477 if (trans->txqs.bc_table_dword)
1478 len = DIV_ROUND_UP(len, 4);
1479
1480 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1481 return;
1482
1483 bc_ent = cpu_to_le16(len | (sta_id << 12));
1484
1485 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1486
1487 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1488 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1489 bc_ent;
1490 }
1491
iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq)1492 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1493 struct iwl_txq *txq)
1494 {
1495 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1496 int txq_id = txq->id;
1497 int read_ptr = txq->read_ptr;
1498 u8 sta_id = 0;
1499 __le16 bc_ent;
1500 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1501 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1502
1503 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1504
1505 if (txq_id != trans->txqs.cmd.q_id)
1506 sta_id = tx_cmd->sta_id;
1507
1508 bc_ent = cpu_to_le16(1 | (sta_id << 12));
1509
1510 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1511
1512 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1513 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1514 bc_ent;
1515 }
1516
1517 /*
1518 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1519 * @trans - transport private data
1520 * @txq - tx queue
1521 * @dma_dir - the direction of the DMA mapping
1522 *
1523 * Does NOT advance any TFD circular buffer read/write indexes
1524 * Does NOT free the TFD itself (which is within circular buffer)
1525 */
iwl_txq_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq)1526 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
1527 {
1528 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
1529 * idx is bounded by n_window
1530 */
1531 int rd_ptr = txq->read_ptr;
1532 int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
1533 struct sk_buff *skb;
1534
1535 lockdep_assert_held(&txq->lock);
1536
1537 if (!txq->entries)
1538 return;
1539
1540 /* We have only q->n_window txq->entries, but we use
1541 * TFD_QUEUE_SIZE_MAX tfds
1542 */
1543 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
1544
1545 /* free SKB */
1546 skb = txq->entries[idx].skb;
1547
1548 /* Can be called from irqs-disabled context
1549 * If skb is not NULL, it means that the whole queue is being
1550 * freed and that the queue is not empty - free the skb
1551 */
1552 if (skb) {
1553 iwl_op_mode_free_skb(trans->op_mode, skb);
1554 txq->entries[idx].skb = NULL;
1555 }
1556 }
1557
iwl_txq_progress(struct iwl_txq * txq)1558 void iwl_txq_progress(struct iwl_txq *txq)
1559 {
1560 lockdep_assert_held(&txq->lock);
1561
1562 if (!txq->wd_timeout)
1563 return;
1564
1565 /*
1566 * station is asleep and we send data - that must
1567 * be uAPSD or PS-Poll. Don't rearm the timer.
1568 */
1569 if (txq->frozen)
1570 return;
1571
1572 /*
1573 * if empty delete timer, otherwise move timer forward
1574 * since we're making progress on this queue
1575 */
1576 if (txq->read_ptr == txq->write_ptr)
1577 del_timer(&txq->stuck_timer);
1578 else
1579 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1580 }
1581
1582 /* Frees buffers until index _not_ inclusive */
iwl_txq_reclaim(struct iwl_trans * trans,int txq_id,int ssn,struct sk_buff_head * skbs)1583 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1584 struct sk_buff_head *skbs)
1585 {
1586 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1587 int tfd_num, read_ptr, last_to_free;
1588
1589 /* This function is not meant to release cmd queue*/
1590 if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1591 return;
1592
1593 if (WARN_ON(!txq))
1594 return;
1595
1596 tfd_num = iwl_txq_get_cmd_index(txq, ssn);
1597 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1598
1599 spin_lock_bh(&txq->lock);
1600
1601 if (!test_bit(txq_id, trans->txqs.queue_used)) {
1602 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1603 txq_id, ssn);
1604 goto out;
1605 }
1606
1607 if (read_ptr == tfd_num)
1608 goto out;
1609
1610 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1611 txq_id, txq->read_ptr, tfd_num, ssn);
1612
1613 /*Since we free until index _not_ inclusive, the one before index is
1614 * the last we will free. This one must be used */
1615 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1616
1617 if (!iwl_txq_used(txq, last_to_free)) {
1618 IWL_ERR(trans,
1619 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1620 __func__, txq_id, last_to_free,
1621 trans->trans_cfg->base_params->max_tfd_queue_size,
1622 txq->write_ptr, txq->read_ptr);
1623
1624 iwl_op_mode_time_point(trans->op_mode,
1625 IWL_FW_INI_TIME_POINT_FAKE_TX,
1626 NULL);
1627 goto out;
1628 }
1629
1630 if (WARN_ON(!skb_queue_empty(skbs)))
1631 goto out;
1632
1633 for (;
1634 read_ptr != tfd_num;
1635 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
1636 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1637 struct sk_buff *skb = txq->entries[read_ptr].skb;
1638
1639 if (WARN_ON_ONCE(!skb))
1640 continue;
1641
1642 iwl_txq_free_tso_page(trans, skb);
1643
1644 __skb_queue_tail(skbs, skb);
1645
1646 txq->entries[read_ptr].skb = NULL;
1647
1648 if (!trans->trans_cfg->gen2)
1649 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1650
1651 iwl_txq_free_tfd(trans, txq);
1652 }
1653
1654 iwl_txq_progress(txq);
1655
1656 if (iwl_txq_space(trans, txq) > txq->low_mark &&
1657 test_bit(txq_id, trans->txqs.queue_stopped)) {
1658 struct sk_buff_head overflow_skbs;
1659
1660 __skb_queue_head_init(&overflow_skbs);
1661 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1662
1663 /*
1664 * We are going to transmit from the overflow queue.
1665 * Remember this state so that wait_for_txq_empty will know we
1666 * are adding more packets to the TFD queue. It cannot rely on
1667 * the state of &txq->overflow_q, as we just emptied it, but
1668 * haven't TXed the content yet.
1669 */
1670 txq->overflow_tx = true;
1671
1672 /*
1673 * This is tricky: we are in reclaim path which is non
1674 * re-entrant, so noone will try to take the access the
1675 * txq data from that path. We stopped tx, so we can't
1676 * have tx as well. Bottom line, we can unlock and re-lock
1677 * later.
1678 */
1679 spin_unlock_bh(&txq->lock);
1680
1681 while (!skb_queue_empty(&overflow_skbs)) {
1682 struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1683 struct iwl_device_tx_cmd *dev_cmd_ptr;
1684
1685 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1686 trans->txqs.dev_cmd_offs);
1687
1688 /*
1689 * Note that we can very well be overflowing again.
1690 * In that case, iwl_txq_space will be small again
1691 * and we won't wake mac80211's queue.
1692 */
1693 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1694 }
1695
1696 if (iwl_txq_space(trans, txq) > txq->low_mark)
1697 iwl_wake_queue(trans, txq);
1698
1699 spin_lock_bh(&txq->lock);
1700 txq->overflow_tx = false;
1701 }
1702
1703 out:
1704 spin_unlock_bh(&txq->lock);
1705 }
1706
1707 /* Set wr_ptr of specific device and txq */
iwl_txq_set_q_ptrs(struct iwl_trans * trans,int txq_id,int ptr)1708 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1709 {
1710 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1711
1712 spin_lock_bh(&txq->lock);
1713
1714 txq->write_ptr = ptr;
1715 txq->read_ptr = txq->write_ptr;
1716
1717 spin_unlock_bh(&txq->lock);
1718 }
1719
iwl_trans_txq_freeze_timer(struct iwl_trans * trans,unsigned long txqs,bool freeze)1720 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
1721 bool freeze)
1722 {
1723 int queue;
1724
1725 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1726 struct iwl_txq *txq = trans->txqs.txq[queue];
1727 unsigned long now;
1728
1729 spin_lock_bh(&txq->lock);
1730
1731 now = jiffies;
1732
1733 if (txq->frozen == freeze)
1734 goto next_queue;
1735
1736 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1737 freeze ? "Freezing" : "Waking", queue);
1738
1739 txq->frozen = freeze;
1740
1741 if (txq->read_ptr == txq->write_ptr)
1742 goto next_queue;
1743
1744 if (freeze) {
1745 if (unlikely(time_after(now,
1746 txq->stuck_timer.expires))) {
1747 /*
1748 * The timer should have fired, maybe it is
1749 * spinning right now on the lock.
1750 */
1751 goto next_queue;
1752 }
1753 /* remember how long until the timer fires */
1754 txq->frozen_expiry_remainder =
1755 txq->stuck_timer.expires - now;
1756 del_timer(&txq->stuck_timer);
1757 goto next_queue;
1758 }
1759
1760 /*
1761 * Wake a non-empty queue -> arm timer with the
1762 * remainder before it froze
1763 */
1764 mod_timer(&txq->stuck_timer,
1765 now + txq->frozen_expiry_remainder);
1766
1767 next_queue:
1768 spin_unlock_bh(&txq->lock);
1769 }
1770 }
1771
1772 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
1773
iwl_trans_txq_send_hcmd_sync(struct iwl_trans * trans,struct iwl_host_cmd * cmd)1774 static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
1775 struct iwl_host_cmd *cmd)
1776 {
1777 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
1778 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1779 int cmd_idx;
1780 int ret;
1781
1782 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
1783
1784 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1785 &trans->status),
1786 "Command %s: a command is already active!\n", cmd_str))
1787 return -EIO;
1788
1789 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
1790
1791 cmd_idx = trans->ops->send_cmd(trans, cmd);
1792 if (cmd_idx < 0) {
1793 ret = cmd_idx;
1794 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1795 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1796 cmd_str, ret);
1797 return ret;
1798 }
1799
1800 ret = wait_event_timeout(trans->wait_command_queue,
1801 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1802 &trans->status),
1803 HOST_COMPLETE_TIMEOUT);
1804 if (!ret) {
1805 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1806 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1807
1808 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1809 txq->read_ptr, txq->write_ptr);
1810
1811 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1812 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1813 cmd_str);
1814 ret = -ETIMEDOUT;
1815
1816 iwl_trans_sync_nmi(trans);
1817 goto cancel;
1818 }
1819
1820 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1821 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
1822 &trans->status)) {
1823 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
1824 dump_stack();
1825 }
1826 ret = -EIO;
1827 goto cancel;
1828 }
1829
1830 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1831 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1832 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1833 ret = -ERFKILL;
1834 goto cancel;
1835 }
1836
1837 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1838 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
1839 ret = -EIO;
1840 goto cancel;
1841 }
1842
1843 return 0;
1844
1845 cancel:
1846 if (cmd->flags & CMD_WANT_SKB) {
1847 /*
1848 * Cancel the CMD_WANT_SKB flag for the cmd in the
1849 * TX cmd queue. Otherwise in case the cmd comes
1850 * in later, it will possibly set an invalid
1851 * address (cmd->meta.source).
1852 */
1853 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1854 }
1855
1856 if (cmd->resp_pkt) {
1857 iwl_free_resp(cmd);
1858 cmd->resp_pkt = NULL;
1859 }
1860
1861 return ret;
1862 }
1863
iwl_trans_txq_send_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)1864 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
1865 struct iwl_host_cmd *cmd)
1866 {
1867 /* Make sure the NIC is still alive in the bus */
1868 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1869 return -ENODEV;
1870
1871 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1872 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1873 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1874 cmd->id);
1875 return -ERFKILL;
1876 }
1877
1878 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
1879 !(cmd->flags & CMD_SEND_IN_D3))) {
1880 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
1881 return -EHOSTDOWN;
1882 }
1883
1884 if (cmd->flags & CMD_ASYNC) {
1885 int ret;
1886
1887 /* An asynchronous command can not expect an SKB to be set. */
1888 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1889 return -EINVAL;
1890
1891 ret = trans->ops->send_cmd(trans, cmd);
1892 if (ret < 0) {
1893 IWL_ERR(trans,
1894 "Error sending %s: enqueue_hcmd failed: %d\n",
1895 iwl_get_cmd_string(trans, cmd->id), ret);
1896 return ret;
1897 }
1898 return 0;
1899 }
1900
1901 return iwl_trans_txq_send_hcmd_sync(trans, cmd);
1902 }
1903
1904