1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5 #include <net/tso.h>
6 #include <linux/tcp.h>
7
8 #include "iwl-debug.h"
9 #include "iwl-io.h"
10 #include "fw/api/commands.h"
11 #include "fw/api/tx.h"
12 #include "fw/api/datapath.h"
13 #include "fw/api/debug.h"
14 #include "queue/tx.h"
15 #include "iwl-fh.h"
16 #include "iwl-scd.h"
17 #include <linux/dmapool.h>
18
19 /*
20 * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
21 */
iwl_pcie_gen2_update_byte_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)22 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
23 struct iwl_txq *txq, u16 byte_cnt,
24 int num_tbs)
25 {
26 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
27 u8 filled_tfd_size, num_fetch_chunks;
28 u16 len = byte_cnt;
29 __le16 bc_ent;
30
31 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
32 return;
33
34 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
35 num_tbs * sizeof(struct iwl_tfh_tb);
36 /*
37 * filled_tfd_size contains the number of filled bytes in the TFD.
38 * Dividing it by 64 will give the number of chunks to fetch
39 * to SRAM- 0 for one chunk, 1 for 2 and so on.
40 * If, for example, TFD contains only 3 TBs then 32 bytes
41 * of the TFD are used, and only one chunk of 64 bytes should
42 * be fetched
43 */
44 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
45
46 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
47 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
48
49 /* Starting from AX210, the HW expects bytes */
50 WARN_ON(trans->txqs.bc_table_dword);
51 WARN_ON(len > 0x3FFF);
52 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
53 scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
54 } else {
55 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
56
57 /* Before AX210, the HW expects DW */
58 WARN_ON(!trans->txqs.bc_table_dword);
59 len = DIV_ROUND_UP(len, 4);
60 WARN_ON(len > 0xFFF);
61 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
62 scd_bc_tbl->tfd_offset[idx] = bc_ent;
63 }
64 }
65
66 /*
67 * iwl_txq_inc_wr_ptr - Send new write index to hardware
68 */
iwl_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)69 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
70 {
71 lockdep_assert_held(&txq->lock);
72
73 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
74
75 /*
76 * if not in power-save mode, uCode will never sleep when we're
77 * trying to tx (during RFKILL, we're not trying to tx).
78 */
79 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
80 }
81
iwl_txq_gen2_get_num_tbs(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd)82 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
83 struct iwl_tfh_tfd *tfd)
84 {
85 return le16_to_cpu(tfd->num_tbs) & 0x1f;
86 }
87
iwl_txq_gen2_set_tb(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd,dma_addr_t addr,u16 len)88 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
89 dma_addr_t addr, u16 len)
90 {
91 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
92 struct iwl_tfh_tb *tb;
93
94 /* Only WARN here so we know about the issue, but we mess up our
95 * unmap path because not every place currently checks for errors
96 * returned from this function - it can only return an error if
97 * there's no more space, and so when we know there is enough we
98 * don't always check ...
99 */
100 WARN(iwl_txq_crosses_4g_boundary(addr, len),
101 "possible DMA problem with iova:0x%llx, len:%d\n",
102 (unsigned long long)addr, len);
103
104 if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
105 return -EINVAL;
106 tb = &tfd->tbs[idx];
107
108 /* Each TFD can point to a maximum max_tbs Tx buffers */
109 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
110 IWL_ERR(trans, "Error can not send more than %d chunks\n",
111 trans->txqs.tfd.max_tbs);
112 return -EINVAL;
113 }
114
115 put_unaligned_le64(addr, &tb->addr);
116 tb->tb_len = cpu_to_le16(len);
117
118 tfd->num_tbs = cpu_to_le16(idx + 1);
119
120 return idx;
121 }
122
iwl_txq_set_tfd_invalid_gen2(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd)123 static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
124 struct iwl_tfh_tfd *tfd)
125 {
126 tfd->num_tbs = 0;
127
128 iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
129 trans->invalid_tx_cmd.size);
130 }
131
iwl_txq_gen2_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_tfh_tfd * tfd)132 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
133 struct iwl_tfh_tfd *tfd)
134 {
135 int i, num_tbs;
136
137 /* Sanity check on number of chunks */
138 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
139
140 if (num_tbs > trans->txqs.tfd.max_tbs) {
141 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
142 return;
143 }
144
145 /* first TB is never freed - it's the bidirectional DMA data */
146 for (i = 1; i < num_tbs; i++) {
147 if (meta->tbs & BIT(i))
148 dma_unmap_page(trans->dev,
149 le64_to_cpu(tfd->tbs[i].addr),
150 le16_to_cpu(tfd->tbs[i].tb_len),
151 DMA_TO_DEVICE);
152 else
153 dma_unmap_single(trans->dev,
154 le64_to_cpu(tfd->tbs[i].addr),
155 le16_to_cpu(tfd->tbs[i].tb_len),
156 DMA_TO_DEVICE);
157 }
158
159 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
160 }
161
iwl_txq_gen2_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq)162 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
163 {
164 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
165 * idx is bounded by n_window
166 */
167 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
168 struct sk_buff *skb;
169
170 lockdep_assert_held(&txq->lock);
171
172 if (!txq->entries)
173 return;
174
175 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
176 iwl_txq_get_tfd(trans, txq, idx));
177
178 skb = txq->entries[idx].skb;
179
180 /* Can be called from irqs-disabled context
181 * If skb is not NULL, it means that the whole queue is being
182 * freed and that the queue is not empty - free the skb
183 */
184 if (skb) {
185 iwl_op_mode_free_skb(trans->op_mode, skb);
186 txq->entries[idx].skb = NULL;
187 }
188 }
189
get_workaround_page(struct iwl_trans * trans,struct sk_buff * skb)190 static struct page *get_workaround_page(struct iwl_trans *trans,
191 struct sk_buff *skb)
192 {
193 struct page **page_ptr;
194 struct page *ret;
195
196 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
197
198 ret = alloc_page(GFP_ATOMIC);
199 if (!ret)
200 return NULL;
201
202 /* set the chaining pointer to the previous page if there */
203 *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
204 *page_ptr = ret;
205
206 return ret;
207 }
208
209 /*
210 * Add a TB and if needed apply the FH HW bug workaround;
211 * meta != NULL indicates that it's a page mapping and we
212 * need to dma_unmap_page() and set the meta->tbs bit in
213 * this case.
214 */
iwl_txq_gen2_set_tb_with_wa(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,dma_addr_t phys,void * virt,u16 len,struct iwl_cmd_meta * meta)215 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
216 struct sk_buff *skb,
217 struct iwl_tfh_tfd *tfd,
218 dma_addr_t phys, void *virt,
219 u16 len, struct iwl_cmd_meta *meta)
220 {
221 dma_addr_t oldphys = phys;
222 struct page *page;
223 int ret;
224
225 if (unlikely(dma_mapping_error(trans->dev, phys)))
226 return -ENOMEM;
227
228 if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
229 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
230
231 if (ret < 0)
232 goto unmap;
233
234 if (meta)
235 meta->tbs |= BIT(ret);
236
237 ret = 0;
238 goto trace;
239 }
240
241 /*
242 * Work around a hardware bug. If (as expressed in the
243 * condition above) the TB ends on a 32-bit boundary,
244 * then the next TB may be accessed with the wrong
245 * address.
246 * To work around it, copy the data elsewhere and make
247 * a new mapping for it so the device will not fail.
248 */
249
250 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
251 ret = -ENOBUFS;
252 goto unmap;
253 }
254
255 page = get_workaround_page(trans, skb);
256 if (!page) {
257 ret = -ENOMEM;
258 goto unmap;
259 }
260
261 memcpy(page_address(page), virt, len);
262
263 phys = dma_map_single(trans->dev, page_address(page), len,
264 DMA_TO_DEVICE);
265 if (unlikely(dma_mapping_error(trans->dev, phys)))
266 return -ENOMEM;
267 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
268 if (ret < 0) {
269 /* unmap the new allocation as single */
270 oldphys = phys;
271 meta = NULL;
272 goto unmap;
273 }
274 IWL_DEBUG_TX(trans,
275 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
276 len, (unsigned long long)oldphys,
277 (unsigned long long)phys);
278
279 ret = 0;
280 unmap:
281 if (meta)
282 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
283 else
284 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
285 trace:
286 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
287
288 return ret;
289 }
290
291 #ifdef CONFIG_INET
get_page_hdr(struct iwl_trans * trans,size_t len,struct sk_buff * skb)292 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
293 struct sk_buff *skb)
294 {
295 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
296 struct page **page_ptr;
297
298 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
299
300 if (WARN_ON(*page_ptr))
301 return NULL;
302
303 if (!p->page)
304 goto alloc;
305
306 /*
307 * Check if there's enough room on this page
308 *
309 * Note that we put a page chaining pointer *last* in the
310 * page - we need it somewhere, and if it's there then we
311 * avoid DMA mapping the last bits of the page which may
312 * trigger the 32-bit boundary hardware bug.
313 *
314 * (see also get_workaround_page() in tx-gen2.c)
315 */
316 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
317 sizeof(void *))
318 goto out;
319
320 /* We don't have enough room on this page, get a new one. */
321 __free_page(p->page);
322
323 alloc:
324 p->page = alloc_page(GFP_ATOMIC);
325 if (!p->page)
326 return NULL;
327 p->pos = page_address(p->page);
328 /* set the chaining pointer to NULL */
329 *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
330 out:
331 *page_ptr = p->page;
332 get_page(p->page);
333 return p;
334 }
335 #endif
336
iwl_txq_gen2_build_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,int start_len,u8 hdr_len,struct iwl_device_tx_cmd * dev_cmd)337 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
338 struct sk_buff *skb,
339 struct iwl_tfh_tfd *tfd, int start_len,
340 u8 hdr_len,
341 struct iwl_device_tx_cmd *dev_cmd)
342 {
343 #ifdef CONFIG_INET
344 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
345 struct ieee80211_hdr *hdr = (void *)skb->data;
346 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
347 unsigned int mss = skb_shinfo(skb)->gso_size;
348 u16 length, amsdu_pad;
349 u8 *start_hdr;
350 struct iwl_tso_hdr_page *hdr_page;
351 struct tso_t tso;
352
353 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
354 &dev_cmd->hdr, start_len, 0);
355
356 ip_hdrlen = skb_network_header_len(skb);
357 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
358 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
359 amsdu_pad = 0;
360
361 /* total amount of header we may need for this A-MSDU */
362 hdr_room = DIV_ROUND_UP(total_len, mss) *
363 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
364
365 /* Our device supports 9 segments at most, it will fit in 1 page */
366 hdr_page = get_page_hdr(trans, hdr_room, skb);
367 if (!hdr_page)
368 return -ENOMEM;
369
370 start_hdr = hdr_page->pos;
371
372 /*
373 * Pull the ieee80211 header to be able to use TSO core,
374 * we will restore it for the tx_status flow.
375 */
376 skb_pull(skb, hdr_len);
377
378 /*
379 * Remove the length of all the headers that we don't actually
380 * have in the MPDU by themselves, but that we duplicate into
381 * all the different MSDUs inside the A-MSDU.
382 */
383 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
384
385 tso_start(skb, &tso);
386
387 while (total_len) {
388 /* this is the data left for this subframe */
389 unsigned int data_left = min_t(unsigned int, mss, total_len);
390 unsigned int tb_len;
391 dma_addr_t tb_phys;
392 u8 *subf_hdrs_start = hdr_page->pos;
393
394 total_len -= data_left;
395
396 memset(hdr_page->pos, 0, amsdu_pad);
397 hdr_page->pos += amsdu_pad;
398 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
399 data_left)) & 0x3;
400 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
401 hdr_page->pos += ETH_ALEN;
402 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
403 hdr_page->pos += ETH_ALEN;
404
405 length = snap_ip_tcp_hdrlen + data_left;
406 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
407 hdr_page->pos += sizeof(length);
408
409 /*
410 * This will copy the SNAP as well which will be considered
411 * as MAC header.
412 */
413 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
414
415 hdr_page->pos += snap_ip_tcp_hdrlen;
416
417 tb_len = hdr_page->pos - start_hdr;
418 tb_phys = dma_map_single(trans->dev, start_hdr,
419 tb_len, DMA_TO_DEVICE);
420 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
421 goto out_err;
422 /*
423 * No need for _with_wa, this is from the TSO page and
424 * we leave some space at the end of it so can't hit
425 * the buggy scenario.
426 */
427 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
428 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
429 tb_phys, tb_len);
430 /* add this subframe's headers' length to the tx_cmd */
431 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
432
433 /* prepare the start_hdr for the next subframe */
434 start_hdr = hdr_page->pos;
435
436 /* put the payload */
437 while (data_left) {
438 int ret;
439
440 tb_len = min_t(unsigned int, tso.size, data_left);
441 tb_phys = dma_map_single(trans->dev, tso.data,
442 tb_len, DMA_TO_DEVICE);
443 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
444 tb_phys, tso.data,
445 tb_len, NULL);
446 if (ret)
447 goto out_err;
448
449 data_left -= tb_len;
450 tso_build_data(skb, &tso, tb_len);
451 }
452 }
453
454 /* re -add the WiFi header */
455 skb_push(skb, hdr_len);
456
457 return 0;
458
459 out_err:
460 #endif
461 return -EINVAL;
462 }
463
464 static struct
iwl_txq_gen2_build_tx_amsdu(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len)465 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
466 struct iwl_txq *txq,
467 struct iwl_device_tx_cmd *dev_cmd,
468 struct sk_buff *skb,
469 struct iwl_cmd_meta *out_meta,
470 int hdr_len,
471 int tx_cmd_len)
472 {
473 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
474 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
475 dma_addr_t tb_phys;
476 int len;
477 void *tb1_addr;
478
479 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
480
481 /*
482 * No need for _with_wa, the first TB allocation is aligned up
483 * to a 64-byte boundary and thus can't be at the end or cross
484 * a page boundary (much less a 2^32 boundary).
485 */
486 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
487
488 /*
489 * The second TB (tb1) points to the remainder of the TX command
490 * and the 802.11 header - dword aligned size
491 * (This calculation modifies the TX command, so do it before the
492 * setup of the first TB)
493 */
494 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
495 IWL_FIRST_TB_SIZE;
496
497 /* do not align A-MSDU to dword as the subframe header aligns it */
498
499 /* map the data for TB1 */
500 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
501 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
502 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
503 goto out_err;
504 /*
505 * No need for _with_wa(), we ensure (via alignment) that the data
506 * here can never cross or end at a page boundary.
507 */
508 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
509
510 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
511 hdr_len, dev_cmd))
512 goto out_err;
513
514 /* building the A-MSDU might have changed this data, memcpy it now */
515 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
516 return tfd;
517
518 out_err:
519 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
520 return NULL;
521 }
522
iwl_txq_gen2_tx_add_frags(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,struct iwl_cmd_meta * out_meta)523 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
524 struct sk_buff *skb,
525 struct iwl_tfh_tfd *tfd,
526 struct iwl_cmd_meta *out_meta)
527 {
528 int i;
529
530 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
531 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
532 dma_addr_t tb_phys;
533 unsigned int fragsz = skb_frag_size(frag);
534 int ret;
535
536 if (!fragsz)
537 continue;
538
539 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
540 fragsz, DMA_TO_DEVICE);
541 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
542 skb_frag_address(frag),
543 fragsz, out_meta);
544 if (ret)
545 return ret;
546 }
547
548 return 0;
549 }
550
551 static struct
iwl_txq_gen2_build_tx(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len,bool pad)552 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
553 struct iwl_txq *txq,
554 struct iwl_device_tx_cmd *dev_cmd,
555 struct sk_buff *skb,
556 struct iwl_cmd_meta *out_meta,
557 int hdr_len,
558 int tx_cmd_len,
559 bool pad)
560 {
561 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
562 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
563 dma_addr_t tb_phys;
564 int len, tb1_len, tb2_len;
565 void *tb1_addr;
566 struct sk_buff *frag;
567
568 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
569
570 /* The first TB points to bi-directional DMA data */
571 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
572
573 /*
574 * No need for _with_wa, the first TB allocation is aligned up
575 * to a 64-byte boundary and thus can't be at the end or cross
576 * a page boundary (much less a 2^32 boundary).
577 */
578 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
579
580 /*
581 * The second TB (tb1) points to the remainder of the TX command
582 * and the 802.11 header - dword aligned size
583 * (This calculation modifies the TX command, so do it before the
584 * setup of the first TB)
585 */
586 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
587 IWL_FIRST_TB_SIZE;
588
589 if (pad)
590 tb1_len = ALIGN(len, 4);
591 else
592 tb1_len = len;
593
594 /* map the data for TB1 */
595 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
596 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
597 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
598 goto out_err;
599 /*
600 * No need for _with_wa(), we ensure (via alignment) that the data
601 * here can never cross or end at a page boundary.
602 */
603 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
604 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
605 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
606
607 /* set up TFD's third entry to point to remainder of skb's head */
608 tb2_len = skb_headlen(skb) - hdr_len;
609
610 if (tb2_len > 0) {
611 int ret;
612
613 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
614 tb2_len, DMA_TO_DEVICE);
615 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
616 skb->data + hdr_len, tb2_len,
617 NULL);
618 if (ret)
619 goto out_err;
620 }
621
622 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
623 goto out_err;
624
625 skb_walk_frags(skb, frag) {
626 int ret;
627
628 tb_phys = dma_map_single(trans->dev, frag->data,
629 skb_headlen(frag), DMA_TO_DEVICE);
630 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
631 frag->data,
632 skb_headlen(frag), NULL);
633 if (ret)
634 goto out_err;
635 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
636 goto out_err;
637 }
638
639 return tfd;
640
641 out_err:
642 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
643 return NULL;
644 }
645
646 static
iwl_txq_gen2_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta)647 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
648 struct iwl_txq *txq,
649 struct iwl_device_tx_cmd *dev_cmd,
650 struct sk_buff *skb,
651 struct iwl_cmd_meta *out_meta)
652 {
653 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
654 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
655 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
656 int len, hdr_len;
657 bool amsdu;
658
659 /* There must be data left over for TB1 or this code must be changed */
660 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
661 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
662 offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
663 IWL_FIRST_TB_SIZE);
664 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
665 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
666 offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
667 IWL_FIRST_TB_SIZE);
668
669 memset(tfd, 0, sizeof(*tfd));
670
671 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
672 len = sizeof(struct iwl_tx_cmd_gen2);
673 else
674 len = sizeof(struct iwl_tx_cmd_gen3);
675
676 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
677 (*ieee80211_get_qos_ctl(hdr) &
678 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
679
680 hdr_len = ieee80211_hdrlen(hdr->frame_control);
681
682 /*
683 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
684 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
685 * built in the higher layers already.
686 */
687 if (amsdu && skb_shinfo(skb)->gso_size)
688 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
689 out_meta, hdr_len, len);
690 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
691 hdr_len, len, !amsdu);
692 }
693
iwl_txq_space(struct iwl_trans * trans,const struct iwl_txq * q)694 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
695 {
696 unsigned int max;
697 unsigned int used;
698
699 /*
700 * To avoid ambiguity between empty and completely full queues, there
701 * should always be less than max_tfd_queue_size elements in the queue.
702 * If q->n_window is smaller than max_tfd_queue_size, there is no need
703 * to reserve any queue entries for this purpose.
704 */
705 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
706 max = q->n_window;
707 else
708 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
709
710 /*
711 * max_tfd_queue_size is a power of 2, so the following is equivalent to
712 * modulo by max_tfd_queue_size and is well defined.
713 */
714 used = (q->write_ptr - q->read_ptr) &
715 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
716
717 if (WARN_ON(used > max))
718 return 0;
719
720 return max - used;
721 }
722
iwl_txq_gen2_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int txq_id)723 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
724 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
725 {
726 struct iwl_cmd_meta *out_meta;
727 struct iwl_txq *txq = trans->txqs.txq[txq_id];
728 u16 cmd_len;
729 int idx;
730 void *tfd;
731
732 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
733 "queue %d out of range", txq_id))
734 return -EINVAL;
735
736 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
737 "TX on unused queue %d\n", txq_id))
738 return -EINVAL;
739
740 if (skb_is_nonlinear(skb) &&
741 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
742 __skb_linearize(skb))
743 return -ENOMEM;
744
745 spin_lock(&txq->lock);
746
747 if (iwl_txq_space(trans, txq) < txq->high_mark) {
748 iwl_txq_stop(trans, txq);
749
750 /* don't put the packet on the ring, if there is no room */
751 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
752 struct iwl_device_tx_cmd **dev_cmd_ptr;
753
754 dev_cmd_ptr = (void *)((u8 *)skb->cb +
755 trans->txqs.dev_cmd_offs);
756
757 *dev_cmd_ptr = dev_cmd;
758 __skb_queue_tail(&txq->overflow_q, skb);
759 spin_unlock(&txq->lock);
760 return 0;
761 }
762 }
763
764 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
765
766 /* Set up driver data for this TFD */
767 txq->entries[idx].skb = skb;
768 txq->entries[idx].cmd = dev_cmd;
769
770 dev_cmd->hdr.sequence =
771 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
772 INDEX_TO_SEQ(idx)));
773
774 /* Set up first empty entry in queue's array of Tx/cmd buffers */
775 out_meta = &txq->entries[idx].meta;
776 out_meta->flags = 0;
777
778 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
779 if (!tfd) {
780 spin_unlock(&txq->lock);
781 return -1;
782 }
783
784 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
785 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
786 (void *)dev_cmd->payload;
787
788 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
789 } else {
790 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
791 (void *)dev_cmd->payload;
792
793 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
794 }
795
796 /* Set up entry for this TFD in Tx byte-count array */
797 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
798 iwl_txq_gen2_get_num_tbs(trans, tfd));
799
800 /* start timer if queue currently empty */
801 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
802 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
803
804 /* Tell device the write index *just past* this latest filled TFD */
805 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
806 iwl_txq_inc_wr_ptr(trans, txq);
807 /*
808 * At this point the frame is "transmitted" successfully
809 * and we will get a TX status notification eventually.
810 */
811 spin_unlock(&txq->lock);
812 return 0;
813 }
814
815 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
816
817 /*
818 * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
819 */
iwl_txq_gen2_unmap(struct iwl_trans * trans,int txq_id)820 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
821 {
822 struct iwl_txq *txq = trans->txqs.txq[txq_id];
823
824 spin_lock_bh(&txq->lock);
825 while (txq->write_ptr != txq->read_ptr) {
826 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
827 txq_id, txq->read_ptr);
828
829 if (txq_id != trans->txqs.cmd.q_id) {
830 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
831 struct sk_buff *skb = txq->entries[idx].skb;
832
833 if (!WARN_ON_ONCE(!skb))
834 iwl_txq_free_tso_page(trans, skb);
835 }
836 iwl_txq_gen2_free_tfd(trans, txq);
837 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
838 }
839
840 while (!skb_queue_empty(&txq->overflow_q)) {
841 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
842
843 iwl_op_mode_free_skb(trans->op_mode, skb);
844 }
845
846 spin_unlock_bh(&txq->lock);
847
848 /* just in case - this queue may have been stopped */
849 iwl_wake_queue(trans, txq);
850 }
851
iwl_txq_gen2_free_memory(struct iwl_trans * trans,struct iwl_txq * txq)852 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
853 struct iwl_txq *txq)
854 {
855 struct device *dev = trans->dev;
856
857 /* De-alloc circular buffer of TFDs */
858 if (txq->tfds) {
859 dma_free_coherent(dev,
860 trans->txqs.tfd.size * txq->n_window,
861 txq->tfds, txq->dma_addr);
862 dma_free_coherent(dev,
863 sizeof(*txq->first_tb_bufs) * txq->n_window,
864 txq->first_tb_bufs, txq->first_tb_dma);
865 }
866
867 kfree(txq->entries);
868 if (txq->bc_tbl.addr)
869 dma_pool_free(trans->txqs.bc_pool,
870 txq->bc_tbl.addr, txq->bc_tbl.dma);
871 kfree(txq);
872 }
873
874 /*
875 * iwl_pcie_txq_free - Deallocate DMA queue.
876 * @txq: Transmit queue to deallocate.
877 *
878 * Empty queue by removing and destroying all BD's.
879 * Free all buffers.
880 * 0-fill, but do not free "txq" descriptor structure.
881 */
iwl_txq_gen2_free(struct iwl_trans * trans,int txq_id)882 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
883 {
884 struct iwl_txq *txq;
885 int i;
886
887 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
888 "queue %d out of range", txq_id))
889 return;
890
891 txq = trans->txqs.txq[txq_id];
892
893 if (WARN_ON(!txq))
894 return;
895
896 iwl_txq_gen2_unmap(trans, txq_id);
897
898 /* De-alloc array of command/tx buffers */
899 if (txq_id == trans->txqs.cmd.q_id)
900 for (i = 0; i < txq->n_window; i++) {
901 kfree_sensitive(txq->entries[i].cmd);
902 kfree_sensitive(txq->entries[i].free_buf);
903 }
904 del_timer_sync(&txq->stuck_timer);
905
906 iwl_txq_gen2_free_memory(trans, txq);
907
908 trans->txqs.txq[txq_id] = NULL;
909
910 clear_bit(txq_id, trans->txqs.queue_used);
911 }
912
913 /*
914 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
915 */
iwl_queue_init(struct iwl_txq * q,int slots_num)916 static int iwl_queue_init(struct iwl_txq *q, int slots_num)
917 {
918 q->n_window = slots_num;
919
920 /* slots_num must be power-of-two size, otherwise
921 * iwl_txq_get_cmd_index is broken. */
922 if (WARN_ON(!is_power_of_2(slots_num)))
923 return -EINVAL;
924
925 q->low_mark = q->n_window / 4;
926 if (q->low_mark < 4)
927 q->low_mark = 4;
928
929 q->high_mark = q->n_window / 8;
930 if (q->high_mark < 2)
931 q->high_mark = 2;
932
933 q->write_ptr = 0;
934 q->read_ptr = 0;
935
936 return 0;
937 }
938
iwl_txq_init(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)939 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
940 bool cmd_queue)
941 {
942 int ret;
943 u32 tfd_queue_max_size =
944 trans->trans_cfg->base_params->max_tfd_queue_size;
945
946 txq->need_update = false;
947
948 /* max_tfd_queue_size must be power-of-two size, otherwise
949 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
950 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
951 "Max tfd queue size must be a power of two, but is %d",
952 tfd_queue_max_size))
953 return -EINVAL;
954
955 /* Initialize queue's high/low-water marks, and head/tail indexes */
956 ret = iwl_queue_init(txq, slots_num);
957 if (ret)
958 return ret;
959
960 spin_lock_init(&txq->lock);
961
962 if (cmd_queue) {
963 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
964
965 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
966 }
967
968 __skb_queue_head_init(&txq->overflow_q);
969
970 return 0;
971 }
972
iwl_txq_free_tso_page(struct iwl_trans * trans,struct sk_buff * skb)973 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
974 {
975 struct page **page_ptr;
976 struct page *next;
977
978 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
979 next = *page_ptr;
980 *page_ptr = NULL;
981
982 while (next) {
983 struct page *tmp = next;
984
985 next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
986 sizeof(void *));
987 __free_page(tmp);
988 }
989 }
990
iwl_txq_log_scd_error(struct iwl_trans * trans,struct iwl_txq * txq)991 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
992 {
993 u32 txq_id = txq->id;
994 u32 status;
995 bool active;
996 u8 fifo;
997
998 if (trans->trans_cfg->gen2) {
999 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
1000 txq->read_ptr, txq->write_ptr);
1001 /* TODO: access new SCD registers and dump them */
1002 return;
1003 }
1004
1005 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
1006 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1007 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1008
1009 IWL_ERR(trans,
1010 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
1011 txq_id, active ? "" : "in", fifo,
1012 jiffies_to_msecs(txq->wd_timeout),
1013 txq->read_ptr, txq->write_ptr,
1014 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
1015 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1016 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
1017 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1018 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1019 }
1020
iwl_txq_stuck_timer(struct timer_list * t)1021 static void iwl_txq_stuck_timer(struct timer_list *t)
1022 {
1023 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1024 struct iwl_trans *trans = txq->trans;
1025
1026 spin_lock(&txq->lock);
1027 /* check if triggered erroneously */
1028 if (txq->read_ptr == txq->write_ptr) {
1029 spin_unlock(&txq->lock);
1030 return;
1031 }
1032 spin_unlock(&txq->lock);
1033
1034 iwl_txq_log_scd_error(trans, txq);
1035
1036 iwl_force_nmi(trans);
1037 }
1038
iwl_txq_set_tfd_invalid_gen1(struct iwl_trans * trans,struct iwl_tfd * tfd)1039 static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
1040 struct iwl_tfd *tfd)
1041 {
1042 tfd->num_tbs = 0;
1043
1044 iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma,
1045 trans->invalid_tx_cmd.size);
1046 }
1047
iwl_txq_alloc(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)1048 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1049 bool cmd_queue)
1050 {
1051 size_t num_entries = trans->trans_cfg->gen2 ?
1052 slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
1053 size_t tfd_sz;
1054 size_t tb0_buf_sz;
1055 int i;
1056
1057 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
1058 return -EINVAL;
1059
1060 if (WARN_ON(txq->entries || txq->tfds))
1061 return -EINVAL;
1062
1063 tfd_sz = trans->txqs.tfd.size * num_entries;
1064
1065 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1066 txq->trans = trans;
1067
1068 txq->n_window = slots_num;
1069
1070 txq->entries = kcalloc(slots_num,
1071 sizeof(struct iwl_pcie_txq_entry),
1072 GFP_KERNEL);
1073
1074 if (!txq->entries)
1075 goto error;
1076
1077 if (cmd_queue)
1078 for (i = 0; i < slots_num; i++) {
1079 txq->entries[i].cmd =
1080 kmalloc(sizeof(struct iwl_device_cmd),
1081 GFP_KERNEL);
1082 if (!txq->entries[i].cmd)
1083 goto error;
1084 }
1085
1086 /* Circular buffer of transmit frame descriptors (TFDs),
1087 * shared with device */
1088 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1089 &txq->dma_addr, GFP_KERNEL);
1090 if (!txq->tfds)
1091 goto error;
1092
1093 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1094
1095 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1096
1097 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1098 &txq->first_tb_dma,
1099 GFP_KERNEL);
1100 if (!txq->first_tb_bufs)
1101 goto err_free_tfds;
1102
1103 for (i = 0; i < num_entries; i++) {
1104 void *tfd = iwl_txq_get_tfd(trans, txq, i);
1105
1106 if (trans->trans_cfg->gen2)
1107 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
1108 else
1109 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
1110 }
1111
1112 return 0;
1113 err_free_tfds:
1114 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
1115 txq->tfds = NULL;
1116 error:
1117 if (txq->entries && cmd_queue)
1118 for (i = 0; i < slots_num; i++)
1119 kfree(txq->entries[i].cmd);
1120 kfree(txq->entries);
1121 txq->entries = NULL;
1122
1123 return -ENOMEM;
1124 }
1125
1126 static struct iwl_txq *
iwl_txq_dyn_alloc_dma(struct iwl_trans * trans,int size,unsigned int timeout)1127 iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
1128 {
1129 size_t bc_tbl_size, bc_tbl_entries;
1130 struct iwl_txq *txq;
1131 int ret;
1132
1133 WARN_ON(!trans->txqs.bc_tbl_size);
1134
1135 bc_tbl_size = trans->txqs.bc_tbl_size;
1136 bc_tbl_entries = bc_tbl_size / sizeof(u16);
1137
1138 if (WARN_ON(size > bc_tbl_entries))
1139 return ERR_PTR(-EINVAL);
1140
1141 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1142 if (!txq)
1143 return ERR_PTR(-ENOMEM);
1144
1145 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1146 &txq->bc_tbl.dma);
1147 if (!txq->bc_tbl.addr) {
1148 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1149 kfree(txq);
1150 return ERR_PTR(-ENOMEM);
1151 }
1152
1153 ret = iwl_txq_alloc(trans, txq, size, false);
1154 if (ret) {
1155 IWL_ERR(trans, "Tx queue alloc failed\n");
1156 goto error;
1157 }
1158 ret = iwl_txq_init(trans, txq, size, false);
1159 if (ret) {
1160 IWL_ERR(trans, "Tx queue init failed\n");
1161 goto error;
1162 }
1163
1164 txq->wd_timeout = msecs_to_jiffies(timeout);
1165
1166 return txq;
1167
1168 error:
1169 iwl_txq_gen2_free_memory(trans, txq);
1170 return ERR_PTR(ret);
1171 }
1172
iwl_txq_alloc_response(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_host_cmd * hcmd)1173 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1174 struct iwl_host_cmd *hcmd)
1175 {
1176 struct iwl_tx_queue_cfg_rsp *rsp;
1177 int ret, qid;
1178 u32 wr_ptr;
1179
1180 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1181 sizeof(*rsp))) {
1182 ret = -EINVAL;
1183 goto error_free_resp;
1184 }
1185
1186 rsp = (void *)hcmd->resp_pkt->data;
1187 qid = le16_to_cpu(rsp->queue_number);
1188 wr_ptr = le16_to_cpu(rsp->write_pointer);
1189
1190 if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1191 WARN_ONCE(1, "queue index %d unsupported", qid);
1192 ret = -EIO;
1193 goto error_free_resp;
1194 }
1195
1196 if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1197 WARN_ONCE(1, "queue %d already used", qid);
1198 ret = -EIO;
1199 goto error_free_resp;
1200 }
1201
1202 if (WARN_ONCE(trans->txqs.txq[qid],
1203 "queue %d already allocated\n", qid)) {
1204 ret = -EIO;
1205 goto error_free_resp;
1206 }
1207
1208 txq->id = qid;
1209 trans->txqs.txq[qid] = txq;
1210 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1211
1212 /* Place first TFD at index corresponding to start sequence number */
1213 txq->read_ptr = wr_ptr;
1214 txq->write_ptr = wr_ptr;
1215
1216 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1217
1218 iwl_free_resp(hcmd);
1219 return qid;
1220
1221 error_free_resp:
1222 iwl_free_resp(hcmd);
1223 iwl_txq_gen2_free_memory(trans, txq);
1224 return ret;
1225 }
1226
iwl_txq_dyn_alloc(struct iwl_trans * trans,u32 flags,u32 sta_mask,u8 tid,int size,unsigned int timeout)1227 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1228 u8 tid, int size, unsigned int timeout)
1229 {
1230 struct iwl_txq *txq;
1231 union {
1232 struct iwl_tx_queue_cfg_cmd old;
1233 struct iwl_scd_queue_cfg_cmd new;
1234 } cmd;
1235 struct iwl_host_cmd hcmd = {
1236 .flags = CMD_WANT_SKB,
1237 };
1238 int ret;
1239
1240 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
1241 trans->hw_rev_step == SILICON_A_STEP)
1242 size = 4096;
1243
1244 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1245 if (IS_ERR(txq))
1246 return PTR_ERR(txq);
1247
1248 if (trans->txqs.queue_alloc_cmd_ver == 0) {
1249 memset(&cmd.old, 0, sizeof(cmd.old));
1250 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
1251 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1252 cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1253 cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
1254 cmd.old.tid = tid;
1255
1256 if (hweight32(sta_mask) != 1) {
1257 ret = -EINVAL;
1258 goto error;
1259 }
1260 cmd.old.sta_id = ffs(sta_mask) - 1;
1261
1262 hcmd.id = SCD_QUEUE_CFG;
1263 hcmd.len[0] = sizeof(cmd.old);
1264 hcmd.data[0] = &cmd.old;
1265 } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
1266 memset(&cmd.new, 0, sizeof(cmd.new));
1267 cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
1268 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
1269 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
1270 cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1271 cmd.new.u.add.flags = cpu_to_le32(flags);
1272 cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
1273 cmd.new.u.add.tid = tid;
1274
1275 hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
1276 hcmd.len[0] = sizeof(cmd.new);
1277 hcmd.data[0] = &cmd.new;
1278 } else {
1279 ret = -EOPNOTSUPP;
1280 goto error;
1281 }
1282
1283 ret = iwl_trans_send_cmd(trans, &hcmd);
1284 if (ret)
1285 goto error;
1286
1287 return iwl_txq_alloc_response(trans, txq, &hcmd);
1288
1289 error:
1290 iwl_txq_gen2_free_memory(trans, txq);
1291 return ret;
1292 }
1293
iwl_txq_dyn_free(struct iwl_trans * trans,int queue)1294 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1295 {
1296 if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1297 "queue %d out of range", queue))
1298 return;
1299
1300 /*
1301 * Upon HW Rfkill - we stop the device, and then stop the queues
1302 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1303 * allow the op_mode to call txq_disable after it already called
1304 * stop_device.
1305 */
1306 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1307 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1308 "queue %d not used", queue);
1309 return;
1310 }
1311
1312 iwl_txq_gen2_free(trans, queue);
1313
1314 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1315 }
1316
iwl_txq_gen2_tx_free(struct iwl_trans * trans)1317 void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1318 {
1319 int i;
1320
1321 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1322
1323 /* Free all TX queues */
1324 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1325 if (!trans->txqs.txq[i])
1326 continue;
1327
1328 iwl_txq_gen2_free(trans, i);
1329 }
1330 }
1331
iwl_txq_gen2_init(struct iwl_trans * trans,int txq_id,int queue_size)1332 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1333 {
1334 struct iwl_txq *queue;
1335 int ret;
1336
1337 /* alloc and init the tx queue */
1338 if (!trans->txqs.txq[txq_id]) {
1339 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1340 if (!queue) {
1341 IWL_ERR(trans, "Not enough memory for tx queue\n");
1342 return -ENOMEM;
1343 }
1344 trans->txqs.txq[txq_id] = queue;
1345 ret = iwl_txq_alloc(trans, queue, queue_size, true);
1346 if (ret) {
1347 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1348 goto error;
1349 }
1350 } else {
1351 queue = trans->txqs.txq[txq_id];
1352 }
1353
1354 ret = iwl_txq_init(trans, queue, queue_size,
1355 (txq_id == trans->txqs.cmd.q_id));
1356 if (ret) {
1357 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1358 goto error;
1359 }
1360 trans->txqs.txq[txq_id]->id = txq_id;
1361 set_bit(txq_id, trans->txqs.queue_used);
1362
1363 return 0;
1364
1365 error:
1366 iwl_txq_gen2_tx_free(trans);
1367 return ret;
1368 }
1369
iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans * trans,struct iwl_tfd * tfd,u8 idx)1370 static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
1371 struct iwl_tfd *tfd, u8 idx)
1372 {
1373 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
1374 dma_addr_t addr;
1375 dma_addr_t hi_len;
1376
1377 addr = get_unaligned_le32(&tb->lo);
1378
1379 if (sizeof(dma_addr_t) <= sizeof(u32))
1380 return addr;
1381
1382 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1383
1384 /*
1385 * shift by 16 twice to avoid warnings on 32-bit
1386 * (where this code never runs anyway due to the
1387 * if statement above)
1388 */
1389 return addr | ((hi_len << 16) << 16);
1390 }
1391
iwl_txq_gen1_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_txq * txq,int index)1392 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1393 struct iwl_cmd_meta *meta,
1394 struct iwl_txq *txq, int index)
1395 {
1396 int i, num_tbs;
1397 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
1398
1399 /* Sanity check on number of chunks */
1400 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1401
1402 if (num_tbs > trans->txqs.tfd.max_tbs) {
1403 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1404 /* @todo issue fatal error, it is quite serious situation */
1405 return;
1406 }
1407
1408 /* first TB is never freed - it's the bidirectional DMA data */
1409
1410 for (i = 1; i < num_tbs; i++) {
1411 if (meta->tbs & BIT(i))
1412 dma_unmap_page(trans->dev,
1413 iwl_txq_gen1_tfd_tb_get_addr(trans,
1414 tfd, i),
1415 iwl_txq_gen1_tfd_tb_get_len(trans,
1416 tfd, i),
1417 DMA_TO_DEVICE);
1418 else
1419 dma_unmap_single(trans->dev,
1420 iwl_txq_gen1_tfd_tb_get_addr(trans,
1421 tfd, i),
1422 iwl_txq_gen1_tfd_tb_get_len(trans,
1423 tfd, i),
1424 DMA_TO_DEVICE);
1425 }
1426
1427 meta->tbs = 0;
1428
1429 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
1430 }
1431
1432 #define IWL_TX_CRC_SIZE 4
1433 #define IWL_TX_DELIMITER_SIZE 4
1434
1435 /*
1436 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1437 */
iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)1438 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1439 struct iwl_txq *txq, u16 byte_cnt,
1440 int num_tbs)
1441 {
1442 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1443 int write_ptr = txq->write_ptr;
1444 int txq_id = txq->id;
1445 u8 sec_ctl = 0;
1446 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1447 __le16 bc_ent;
1448 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1449 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1450 u8 sta_id = tx_cmd->sta_id;
1451
1452 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1453
1454 sec_ctl = tx_cmd->sec_ctl;
1455
1456 switch (sec_ctl & TX_CMD_SEC_MSK) {
1457 case TX_CMD_SEC_CCM:
1458 len += IEEE80211_CCMP_MIC_LEN;
1459 break;
1460 case TX_CMD_SEC_TKIP:
1461 len += IEEE80211_TKIP_ICV_LEN;
1462 break;
1463 case TX_CMD_SEC_WEP:
1464 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1465 break;
1466 }
1467 if (trans->txqs.bc_table_dword)
1468 len = DIV_ROUND_UP(len, 4);
1469
1470 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1471 return;
1472
1473 bc_ent = cpu_to_le16(len | (sta_id << 12));
1474
1475 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1476
1477 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1478 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1479 bc_ent;
1480 }
1481
iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq)1482 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1483 struct iwl_txq *txq)
1484 {
1485 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1486 int txq_id = txq->id;
1487 int read_ptr = txq->read_ptr;
1488 u8 sta_id = 0;
1489 __le16 bc_ent;
1490 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1491 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1492
1493 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1494
1495 if (txq_id != trans->txqs.cmd.q_id)
1496 sta_id = tx_cmd->sta_id;
1497
1498 bc_ent = cpu_to_le16(1 | (sta_id << 12));
1499
1500 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1501
1502 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1503 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1504 bc_ent;
1505 }
1506
1507 /*
1508 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1509 * @trans - transport private data
1510 * @txq - tx queue
1511 * @dma_dir - the direction of the DMA mapping
1512 *
1513 * Does NOT advance any TFD circular buffer read/write indexes
1514 * Does NOT free the TFD itself (which is within circular buffer)
1515 */
iwl_txq_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq)1516 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
1517 {
1518 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
1519 * idx is bounded by n_window
1520 */
1521 int rd_ptr = txq->read_ptr;
1522 int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
1523 struct sk_buff *skb;
1524
1525 lockdep_assert_held(&txq->lock);
1526
1527 if (!txq->entries)
1528 return;
1529
1530 /* We have only q->n_window txq->entries, but we use
1531 * TFD_QUEUE_SIZE_MAX tfds
1532 */
1533 if (trans->trans_cfg->gen2)
1534 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
1535 iwl_txq_get_tfd(trans, txq, rd_ptr));
1536 else
1537 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
1538 txq, rd_ptr);
1539
1540 /* free SKB */
1541 skb = txq->entries[idx].skb;
1542
1543 /* Can be called from irqs-disabled context
1544 * If skb is not NULL, it means that the whole queue is being
1545 * freed and that the queue is not empty - free the skb
1546 */
1547 if (skb) {
1548 iwl_op_mode_free_skb(trans->op_mode, skb);
1549 txq->entries[idx].skb = NULL;
1550 }
1551 }
1552
iwl_txq_progress(struct iwl_txq * txq)1553 void iwl_txq_progress(struct iwl_txq *txq)
1554 {
1555 lockdep_assert_held(&txq->lock);
1556
1557 if (!txq->wd_timeout)
1558 return;
1559
1560 /*
1561 * station is asleep and we send data - that must
1562 * be uAPSD or PS-Poll. Don't rearm the timer.
1563 */
1564 if (txq->frozen)
1565 return;
1566
1567 /*
1568 * if empty delete timer, otherwise move timer forward
1569 * since we're making progress on this queue
1570 */
1571 if (txq->read_ptr == txq->write_ptr)
1572 del_timer(&txq->stuck_timer);
1573 else
1574 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1575 }
1576
1577 /* Frees buffers until index _not_ inclusive */
iwl_txq_reclaim(struct iwl_trans * trans,int txq_id,int ssn,struct sk_buff_head * skbs,bool is_flush)1578 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1579 struct sk_buff_head *skbs, bool is_flush)
1580 {
1581 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1582 int tfd_num, read_ptr, last_to_free;
1583
1584 /* This function is not meant to release cmd queue*/
1585 if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1586 return;
1587
1588 if (WARN_ON(!txq))
1589 return;
1590
1591 tfd_num = iwl_txq_get_cmd_index(txq, ssn);
1592
1593 spin_lock_bh(&txq->lock);
1594 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1595
1596 if (!test_bit(txq_id, trans->txqs.queue_used)) {
1597 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1598 txq_id, ssn);
1599 goto out;
1600 }
1601
1602 if (read_ptr == tfd_num)
1603 goto out;
1604
1605 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
1606 txq_id, read_ptr, txq->read_ptr, tfd_num, ssn);
1607
1608 /*Since we free until index _not_ inclusive, the one before index is
1609 * the last we will free. This one must be used */
1610 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1611
1612 if (!iwl_txq_used(txq, last_to_free)) {
1613 IWL_ERR(trans,
1614 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1615 __func__, txq_id, last_to_free,
1616 trans->trans_cfg->base_params->max_tfd_queue_size,
1617 txq->write_ptr, txq->read_ptr);
1618
1619 iwl_op_mode_time_point(trans->op_mode,
1620 IWL_FW_INI_TIME_POINT_FAKE_TX,
1621 NULL);
1622 goto out;
1623 }
1624
1625 if (WARN_ON(!skb_queue_empty(skbs)))
1626 goto out;
1627
1628 for (;
1629 read_ptr != tfd_num;
1630 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
1631 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1632 struct sk_buff *skb = txq->entries[read_ptr].skb;
1633
1634 if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
1635 read_ptr, txq->read_ptr, txq_id))
1636 continue;
1637
1638 iwl_txq_free_tso_page(trans, skb);
1639
1640 __skb_queue_tail(skbs, skb);
1641
1642 txq->entries[read_ptr].skb = NULL;
1643
1644 if (!trans->trans_cfg->gen2)
1645 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1646
1647 iwl_txq_free_tfd(trans, txq);
1648 }
1649
1650 iwl_txq_progress(txq);
1651
1652 if (iwl_txq_space(trans, txq) > txq->low_mark &&
1653 test_bit(txq_id, trans->txqs.queue_stopped)) {
1654 struct sk_buff_head overflow_skbs;
1655 struct sk_buff *skb;
1656
1657 __skb_queue_head_init(&overflow_skbs);
1658 skb_queue_splice_init(&txq->overflow_q,
1659 is_flush ? skbs : &overflow_skbs);
1660
1661 /*
1662 * We are going to transmit from the overflow queue.
1663 * Remember this state so that wait_for_txq_empty will know we
1664 * are adding more packets to the TFD queue. It cannot rely on
1665 * the state of &txq->overflow_q, as we just emptied it, but
1666 * haven't TXed the content yet.
1667 */
1668 txq->overflow_tx = true;
1669
1670 /*
1671 * This is tricky: we are in reclaim path which is non
1672 * re-entrant, so noone will try to take the access the
1673 * txq data from that path. We stopped tx, so we can't
1674 * have tx as well. Bottom line, we can unlock and re-lock
1675 * later.
1676 */
1677 spin_unlock_bh(&txq->lock);
1678
1679 while ((skb = __skb_dequeue(&overflow_skbs))) {
1680 struct iwl_device_tx_cmd *dev_cmd_ptr;
1681
1682 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1683 trans->txqs.dev_cmd_offs);
1684
1685 /*
1686 * Note that we can very well be overflowing again.
1687 * In that case, iwl_txq_space will be small again
1688 * and we won't wake mac80211's queue.
1689 */
1690 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1691 }
1692
1693 if (iwl_txq_space(trans, txq) > txq->low_mark)
1694 iwl_wake_queue(trans, txq);
1695
1696 spin_lock_bh(&txq->lock);
1697 txq->overflow_tx = false;
1698 }
1699
1700 out:
1701 spin_unlock_bh(&txq->lock);
1702 }
1703
1704 /* Set wr_ptr of specific device and txq */
iwl_txq_set_q_ptrs(struct iwl_trans * trans,int txq_id,int ptr)1705 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1706 {
1707 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1708
1709 spin_lock_bh(&txq->lock);
1710
1711 txq->write_ptr = ptr;
1712 txq->read_ptr = txq->write_ptr;
1713
1714 spin_unlock_bh(&txq->lock);
1715 }
1716
iwl_trans_txq_freeze_timer(struct iwl_trans * trans,unsigned long txqs,bool freeze)1717 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
1718 bool freeze)
1719 {
1720 int queue;
1721
1722 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1723 struct iwl_txq *txq = trans->txqs.txq[queue];
1724 unsigned long now;
1725
1726 spin_lock_bh(&txq->lock);
1727
1728 now = jiffies;
1729
1730 if (txq->frozen == freeze)
1731 goto next_queue;
1732
1733 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1734 freeze ? "Freezing" : "Waking", queue);
1735
1736 txq->frozen = freeze;
1737
1738 if (txq->read_ptr == txq->write_ptr)
1739 goto next_queue;
1740
1741 if (freeze) {
1742 if (unlikely(time_after(now,
1743 txq->stuck_timer.expires))) {
1744 /*
1745 * The timer should have fired, maybe it is
1746 * spinning right now on the lock.
1747 */
1748 goto next_queue;
1749 }
1750 /* remember how long until the timer fires */
1751 txq->frozen_expiry_remainder =
1752 txq->stuck_timer.expires - now;
1753 del_timer(&txq->stuck_timer);
1754 goto next_queue;
1755 }
1756
1757 /*
1758 * Wake a non-empty queue -> arm timer with the
1759 * remainder before it froze
1760 */
1761 mod_timer(&txq->stuck_timer,
1762 now + txq->frozen_expiry_remainder);
1763
1764 next_queue:
1765 spin_unlock_bh(&txq->lock);
1766 }
1767 }
1768
1769 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
1770
iwl_trans_txq_send_hcmd_sync(struct iwl_trans * trans,struct iwl_host_cmd * cmd)1771 static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
1772 struct iwl_host_cmd *cmd)
1773 {
1774 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
1775 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1776 int cmd_idx;
1777 int ret;
1778
1779 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
1780
1781 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1782 &trans->status),
1783 "Command %s: a command is already active!\n", cmd_str))
1784 return -EIO;
1785
1786 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
1787
1788 cmd_idx = trans->ops->send_cmd(trans, cmd);
1789 if (cmd_idx < 0) {
1790 ret = cmd_idx;
1791 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1792 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1793 cmd_str, ret);
1794 return ret;
1795 }
1796
1797 ret = wait_event_timeout(trans->wait_command_queue,
1798 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1799 &trans->status),
1800 HOST_COMPLETE_TIMEOUT);
1801 if (!ret) {
1802 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1803 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1804
1805 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1806 txq->read_ptr, txq->write_ptr);
1807
1808 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1809 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1810 cmd_str);
1811 ret = -ETIMEDOUT;
1812
1813 iwl_trans_sync_nmi(trans);
1814 goto cancel;
1815 }
1816
1817 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1818 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
1819 &trans->status)) {
1820 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
1821 dump_stack();
1822 }
1823 ret = -EIO;
1824 goto cancel;
1825 }
1826
1827 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1828 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1829 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1830 ret = -ERFKILL;
1831 goto cancel;
1832 }
1833
1834 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1835 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
1836 ret = -EIO;
1837 goto cancel;
1838 }
1839
1840 return 0;
1841
1842 cancel:
1843 if (cmd->flags & CMD_WANT_SKB) {
1844 /*
1845 * Cancel the CMD_WANT_SKB flag for the cmd in the
1846 * TX cmd queue. Otherwise in case the cmd comes
1847 * in later, it will possibly set an invalid
1848 * address (cmd->meta.source).
1849 */
1850 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1851 }
1852
1853 if (cmd->resp_pkt) {
1854 iwl_free_resp(cmd);
1855 cmd->resp_pkt = NULL;
1856 }
1857
1858 return ret;
1859 }
1860
iwl_trans_txq_send_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)1861 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
1862 struct iwl_host_cmd *cmd)
1863 {
1864 /* Make sure the NIC is still alive in the bus */
1865 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1866 return -ENODEV;
1867
1868 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1869 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1870 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1871 cmd->id);
1872 return -ERFKILL;
1873 }
1874
1875 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
1876 !(cmd->flags & CMD_SEND_IN_D3))) {
1877 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
1878 return -EHOSTDOWN;
1879 }
1880
1881 if (cmd->flags & CMD_ASYNC) {
1882 int ret;
1883
1884 /* An asynchronous command can not expect an SKB to be set. */
1885 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1886 return -EINVAL;
1887
1888 ret = trans->ops->send_cmd(trans, cmd);
1889 if (ret < 0) {
1890 IWL_ERR(trans,
1891 "Error sending %s: enqueue_hcmd failed: %d\n",
1892 iwl_get_cmd_string(trans, cmd->id), ret);
1893 return ret;
1894 }
1895 return 0;
1896 }
1897
1898 return iwl_trans_txq_send_hcmd_sync(trans, cmd);
1899 }
1900
1901