xref: /freebsd/sys/contrib/dev/iwlwifi/queue/tx.h (revision 9af1bba4)
1bfcc09ddSBjoern A. Zeeb /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2bfcc09ddSBjoern A. Zeeb /*
39af1bba4SBjoern A. Zeeb  * Copyright (C) 2020-2023 Intel Corporation
4bfcc09ddSBjoern A. Zeeb  */
5bfcc09ddSBjoern A. Zeeb #ifndef __iwl_trans_queue_tx_h__
6bfcc09ddSBjoern A. Zeeb #define __iwl_trans_queue_tx_h__
7bfcc09ddSBjoern A. Zeeb #include "iwl-fh.h"
8bfcc09ddSBjoern A. Zeeb #include "fw/api/tx.h"
9bfcc09ddSBjoern A. Zeeb 
10bfcc09ddSBjoern A. Zeeb struct iwl_tso_hdr_page {
11bfcc09ddSBjoern A. Zeeb 	struct page *page;
12bfcc09ddSBjoern A. Zeeb 	u8 *pos;
13bfcc09ddSBjoern A. Zeeb };
14bfcc09ddSBjoern A. Zeeb 
15bfcc09ddSBjoern A. Zeeb static inline dma_addr_t
iwl_txq_get_first_tb_dma(struct iwl_txq * txq,int idx)16bfcc09ddSBjoern A. Zeeb iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
17bfcc09ddSBjoern A. Zeeb {
18bfcc09ddSBjoern A. Zeeb 	return txq->first_tb_dma +
19bfcc09ddSBjoern A. Zeeb 	       sizeof(struct iwl_pcie_first_tb_buf) * idx;
20bfcc09ddSBjoern A. Zeeb }
21bfcc09ddSBjoern A. Zeeb 
iwl_txq_get_cmd_index(const struct iwl_txq * q,u32 index)22bfcc09ddSBjoern A. Zeeb static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
23bfcc09ddSBjoern A. Zeeb {
24bfcc09ddSBjoern A. Zeeb 	return index & (q->n_window - 1);
25bfcc09ddSBjoern A. Zeeb }
26bfcc09ddSBjoern A. Zeeb 
27bfcc09ddSBjoern A. Zeeb void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
28bfcc09ddSBjoern A. Zeeb 
iwl_wake_queue(struct iwl_trans * trans,struct iwl_txq * txq)29bfcc09ddSBjoern A. Zeeb static inline void iwl_wake_queue(struct iwl_trans *trans,
30bfcc09ddSBjoern A. Zeeb 				  struct iwl_txq *txq)
31bfcc09ddSBjoern A. Zeeb {
32bfcc09ddSBjoern A. Zeeb 	if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
33bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
34bfcc09ddSBjoern A. Zeeb 		iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
35bfcc09ddSBjoern A. Zeeb 	}
36bfcc09ddSBjoern A. Zeeb }
37bfcc09ddSBjoern A. Zeeb 
iwl_txq_get_tfd(struct iwl_trans * trans,struct iwl_txq * txq,int idx)38bfcc09ddSBjoern A. Zeeb static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
39bfcc09ddSBjoern A. Zeeb 				    struct iwl_txq *txq, int idx)
40bfcc09ddSBjoern A. Zeeb {
419af1bba4SBjoern A. Zeeb 	if (trans->trans_cfg->gen2)
42bfcc09ddSBjoern A. Zeeb 		idx = iwl_txq_get_cmd_index(txq, idx);
43bfcc09ddSBjoern A. Zeeb 
44bfcc09ddSBjoern A. Zeeb 	return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
45bfcc09ddSBjoern A. Zeeb }
46bfcc09ddSBjoern A. Zeeb 
47bfcc09ddSBjoern A. Zeeb int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
48bfcc09ddSBjoern A. Zeeb 		  bool cmd_queue);
49bfcc09ddSBjoern A. Zeeb /*
50bfcc09ddSBjoern A. Zeeb  * We need this inline in case dma_addr_t is only 32-bits - since the
51bfcc09ddSBjoern A. Zeeb  * hardware is always 64-bit, the issue can still occur in that case,
52bfcc09ddSBjoern A. Zeeb  * so use u64 for 'phys' here to force the addition in 64-bit.
53bfcc09ddSBjoern A. Zeeb  */
iwl_txq_crosses_4g_boundary(u64 phys,u16 len)54bfcc09ddSBjoern A. Zeeb static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
55bfcc09ddSBjoern A. Zeeb {
56bfcc09ddSBjoern A. Zeeb 	return upper_32_bits(phys) != upper_32_bits(phys + len);
57bfcc09ddSBjoern A. Zeeb }
58bfcc09ddSBjoern A. Zeeb 
59bfcc09ddSBjoern A. Zeeb int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
60bfcc09ddSBjoern A. Zeeb 
iwl_txq_stop(struct iwl_trans * trans,struct iwl_txq * txq)61bfcc09ddSBjoern A. Zeeb static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
62bfcc09ddSBjoern A. Zeeb {
63bfcc09ddSBjoern A. Zeeb 	if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
64bfcc09ddSBjoern A. Zeeb 		iwl_op_mode_queue_full(trans->op_mode, txq->id);
65bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
66bfcc09ddSBjoern A. Zeeb 	} else {
67bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
68bfcc09ddSBjoern A. Zeeb 				    txq->id);
69bfcc09ddSBjoern A. Zeeb 	}
70bfcc09ddSBjoern A. Zeeb }
71bfcc09ddSBjoern A. Zeeb 
72bfcc09ddSBjoern A. Zeeb /**
73bfcc09ddSBjoern A. Zeeb  * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
74bfcc09ddSBjoern A. Zeeb  * @index -- current index
75bfcc09ddSBjoern A. Zeeb  */
iwl_txq_inc_wrap(struct iwl_trans * trans,int index)76bfcc09ddSBjoern A. Zeeb static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
77bfcc09ddSBjoern A. Zeeb {
78bfcc09ddSBjoern A. Zeeb 	return ++index &
79bfcc09ddSBjoern A. Zeeb 		(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
80bfcc09ddSBjoern A. Zeeb }
81bfcc09ddSBjoern A. Zeeb 
82bfcc09ddSBjoern A. Zeeb /**
83bfcc09ddSBjoern A. Zeeb  * iwl_txq_dec_wrap - decrement queue index, wrap back to end
84bfcc09ddSBjoern A. Zeeb  * @index -- current index
85bfcc09ddSBjoern A. Zeeb  */
iwl_txq_dec_wrap(struct iwl_trans * trans,int index)86bfcc09ddSBjoern A. Zeeb static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
87bfcc09ddSBjoern A. Zeeb {
88bfcc09ddSBjoern A. Zeeb 	return --index &
89bfcc09ddSBjoern A. Zeeb 		(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
90bfcc09ddSBjoern A. Zeeb }
91bfcc09ddSBjoern A. Zeeb 
iwl_txq_used(const struct iwl_txq * q,int i)92bfcc09ddSBjoern A. Zeeb static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
93bfcc09ddSBjoern A. Zeeb {
94bfcc09ddSBjoern A. Zeeb 	int index = iwl_txq_get_cmd_index(q, i);
95bfcc09ddSBjoern A. Zeeb 	int r = iwl_txq_get_cmd_index(q, q->read_ptr);
96bfcc09ddSBjoern A. Zeeb 	int w = iwl_txq_get_cmd_index(q, q->write_ptr);
97bfcc09ddSBjoern A. Zeeb 
98bfcc09ddSBjoern A. Zeeb 	return w >= r ?
99bfcc09ddSBjoern A. Zeeb 		(index >= r && index < w) :
100bfcc09ddSBjoern A. Zeeb 		!(index < r && index >= w);
101bfcc09ddSBjoern A. Zeeb }
102bfcc09ddSBjoern A. Zeeb 
103bfcc09ddSBjoern A. Zeeb void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
104bfcc09ddSBjoern A. Zeeb 
105bfcc09ddSBjoern A. Zeeb void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
106bfcc09ddSBjoern A. Zeeb 
107bfcc09ddSBjoern A. Zeeb int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
108bfcc09ddSBjoern A. Zeeb 			struct iwl_tfh_tfd *tfd, dma_addr_t addr,
109bfcc09ddSBjoern A. Zeeb 			u16 len);
110bfcc09ddSBjoern A. Zeeb 
111bfcc09ddSBjoern A. Zeeb void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
112bfcc09ddSBjoern A. Zeeb 			    struct iwl_cmd_meta *meta,
113bfcc09ddSBjoern A. Zeeb 			    struct iwl_tfh_tfd *tfd);
114bfcc09ddSBjoern A. Zeeb 
115d9836fb4SBjoern A. Zeeb int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
116d9836fb4SBjoern A. Zeeb 		      u32 sta_mask, u8 tid,
117d9836fb4SBjoern A. Zeeb 		      int size, unsigned int timeout);
118bfcc09ddSBjoern A. Zeeb 
119bfcc09ddSBjoern A. Zeeb int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
120bfcc09ddSBjoern A. Zeeb 		    struct iwl_device_tx_cmd *dev_cmd, int txq_id);
121bfcc09ddSBjoern A. Zeeb 
122bfcc09ddSBjoern A. Zeeb void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
123bfcc09ddSBjoern A. Zeeb void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
124bfcc09ddSBjoern A. Zeeb void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
125bfcc09ddSBjoern A. Zeeb void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
126bfcc09ddSBjoern A. Zeeb int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
127bfcc09ddSBjoern A. Zeeb 		 bool cmd_queue);
128bfcc09ddSBjoern A. Zeeb int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
129bfcc09ddSBjoern A. Zeeb #ifdef CONFIG_INET
130bfcc09ddSBjoern A. Zeeb struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
131bfcc09ddSBjoern A. Zeeb 				      struct sk_buff *skb);
132bfcc09ddSBjoern A. Zeeb #endif
iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans * trans,void * _tfd)133bfcc09ddSBjoern A. Zeeb static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
134bfcc09ddSBjoern A. Zeeb 					      void *_tfd)
135bfcc09ddSBjoern A. Zeeb {
136bfcc09ddSBjoern A. Zeeb 	struct iwl_tfd *tfd;
137bfcc09ddSBjoern A. Zeeb 
1389af1bba4SBjoern A. Zeeb 	if (trans->trans_cfg->gen2) {
139d9836fb4SBjoern A. Zeeb 		struct iwl_tfh_tfd *tfh_tfd = _tfd;
140bfcc09ddSBjoern A. Zeeb 
141d9836fb4SBjoern A. Zeeb 		return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
142bfcc09ddSBjoern A. Zeeb 	}
143bfcc09ddSBjoern A. Zeeb 
144bfcc09ddSBjoern A. Zeeb 	tfd = (struct iwl_tfd *)_tfd;
145bfcc09ddSBjoern A. Zeeb 	return tfd->num_tbs & 0x1f;
146bfcc09ddSBjoern A. Zeeb }
147bfcc09ddSBjoern A. Zeeb 
iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans * trans,void * _tfd,u8 idx)148bfcc09ddSBjoern A. Zeeb static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
149bfcc09ddSBjoern A. Zeeb 					      void *_tfd, u8 idx)
150bfcc09ddSBjoern A. Zeeb {
151bfcc09ddSBjoern A. Zeeb 	struct iwl_tfd *tfd;
152bfcc09ddSBjoern A. Zeeb 	struct iwl_tfd_tb *tb;
153bfcc09ddSBjoern A. Zeeb 
1549af1bba4SBjoern A. Zeeb 	if (trans->trans_cfg->gen2) {
155d9836fb4SBjoern A. Zeeb 		struct iwl_tfh_tfd *tfh_tfd = _tfd;
156d9836fb4SBjoern A. Zeeb 		struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
157bfcc09ddSBjoern A. Zeeb 
158d9836fb4SBjoern A. Zeeb 		return le16_to_cpu(tfh_tb->tb_len);
159bfcc09ddSBjoern A. Zeeb 	}
160bfcc09ddSBjoern A. Zeeb 
161bfcc09ddSBjoern A. Zeeb 	tfd = (struct iwl_tfd *)_tfd;
162bfcc09ddSBjoern A. Zeeb 	tb = &tfd->tbs[idx];
163bfcc09ddSBjoern A. Zeeb 
164bfcc09ddSBjoern A. Zeeb 	return le16_to_cpu(tb->hi_n_len) >> 4;
165bfcc09ddSBjoern A. Zeeb }
166bfcc09ddSBjoern A. Zeeb 
167bfcc09ddSBjoern A. Zeeb void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
168bfcc09ddSBjoern A. Zeeb 			    struct iwl_cmd_meta *meta,
169bfcc09ddSBjoern A. Zeeb 			    struct iwl_txq *txq, int index);
170bfcc09ddSBjoern A. Zeeb void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
171bfcc09ddSBjoern A. Zeeb 				     struct iwl_txq *txq);
172bfcc09ddSBjoern A. Zeeb void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
173bfcc09ddSBjoern A. Zeeb 				      struct iwl_txq *txq, u16 byte_cnt,
174bfcc09ddSBjoern A. Zeeb 				      int num_tbs);
175bfcc09ddSBjoern A. Zeeb void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
176bfcc09ddSBjoern A. Zeeb 		     struct sk_buff_head *skbs);
177bfcc09ddSBjoern A. Zeeb void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
178bfcc09ddSBjoern A. Zeeb void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
179bfcc09ddSBjoern A. Zeeb 				bool freeze);
180bfcc09ddSBjoern A. Zeeb void iwl_txq_progress(struct iwl_txq *txq);
181bfcc09ddSBjoern A. Zeeb void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
182bfcc09ddSBjoern A. Zeeb int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
183bfcc09ddSBjoern A. Zeeb #endif /* __iwl_trans_queue_tx_h__ */
184