xref: /linux/drivers/net/wireless/intel/iwlwifi/queue/tx.h (revision 658939fc)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5 #ifndef __iwl_trans_queue_tx_h__
6 #define __iwl_trans_queue_tx_h__
7 #include "iwl-fh.h"
8 #include "fw/api/tx.h"
9 
10 struct iwl_tso_hdr_page {
11 	struct page *page;
12 	u8 *pos;
13 };
14 
15 static inline dma_addr_t
iwl_txq_get_first_tb_dma(struct iwl_txq * txq,int idx)16 iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
17 {
18 	return txq->first_tb_dma +
19 	       sizeof(struct iwl_pcie_first_tb_buf) * idx;
20 }
21 
iwl_txq_get_cmd_index(const struct iwl_txq * q,u32 index)22 static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
23 {
24 	return index & (q->n_window - 1);
25 }
26 
27 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
28 
iwl_wake_queue(struct iwl_trans * trans,struct iwl_txq * txq)29 static inline void iwl_wake_queue(struct iwl_trans *trans,
30 				  struct iwl_txq *txq)
31 {
32 	if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
33 		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
34 		iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
35 	}
36 }
37 
iwl_txq_get_tfd(struct iwl_trans * trans,struct iwl_txq * txq,int idx)38 static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
39 				    struct iwl_txq *txq, int idx)
40 {
41 	if (trans->trans_cfg->gen2)
42 		idx = iwl_txq_get_cmd_index(txq, idx);
43 
44 	return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
45 }
46 
47 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
48 		  bool cmd_queue);
49 /*
50  * We need this inline in case dma_addr_t is only 32-bits - since the
51  * hardware is always 64-bit, the issue can still occur in that case,
52  * so use u64 for 'phys' here to force the addition in 64-bit.
53  */
iwl_txq_crosses_4g_boundary(u64 phys,u16 len)54 static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
55 {
56 	return upper_32_bits(phys) != upper_32_bits(phys + len);
57 }
58 
59 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
60 
iwl_txq_stop(struct iwl_trans * trans,struct iwl_txq * txq)61 static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
62 {
63 	if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
64 		iwl_op_mode_queue_full(trans->op_mode, txq->id);
65 		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
66 	} else {
67 		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
68 				    txq->id);
69 	}
70 }
71 
72 /**
73  * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
74  * @trans: the transport (for configuration data)
75  * @index: current index
76  */
iwl_txq_inc_wrap(struct iwl_trans * trans,int index)77 static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
78 {
79 	return ++index &
80 		(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
81 }
82 
83 /**
84  * iwl_txq_dec_wrap - decrement queue index, wrap back to end
85  * @trans: the transport (for configuration data)
86  * @index: current index
87  */
iwl_txq_dec_wrap(struct iwl_trans * trans,int index)88 static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
89 {
90 	return --index &
91 		(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
92 }
93 
iwl_txq_used(const struct iwl_txq * q,int i)94 static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
95 {
96 	int index = iwl_txq_get_cmd_index(q, i);
97 	int r = iwl_txq_get_cmd_index(q, q->read_ptr);
98 	int w = iwl_txq_get_cmd_index(q, q->write_ptr);
99 
100 	return w >= r ?
101 		(index >= r && index < w) :
102 		!(index < r && index >= w);
103 }
104 
105 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
106 
107 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
108 
109 int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
110 			struct iwl_tfh_tfd *tfd, dma_addr_t addr,
111 			u16 len);
112 
113 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
114 			    struct iwl_cmd_meta *meta,
115 			    struct iwl_tfh_tfd *tfd);
116 
117 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
118 		      u32 sta_mask, u8 tid,
119 		      int size, unsigned int timeout);
120 
121 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
122 		    struct iwl_device_tx_cmd *dev_cmd, int txq_id);
123 
124 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
125 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
126 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
127 void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
128 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
129 		 bool cmd_queue);
130 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
131 #ifdef CONFIG_INET
132 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
133 				      struct sk_buff *skb);
134 #endif
iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans * trans,struct iwl_tfd * tfd)135 static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
136 					      struct iwl_tfd *tfd)
137 {
138 	return tfd->num_tbs & 0x1f;
139 }
140 
iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans * trans,void * _tfd,u8 idx)141 static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
142 					      void *_tfd, u8 idx)
143 {
144 	struct iwl_tfd *tfd;
145 	struct iwl_tfd_tb *tb;
146 
147 	if (trans->trans_cfg->gen2) {
148 		struct iwl_tfh_tfd *tfh_tfd = _tfd;
149 		struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
150 
151 		return le16_to_cpu(tfh_tb->tb_len);
152 	}
153 
154 	tfd = (struct iwl_tfd *)_tfd;
155 	tb = &tfd->tbs[idx];
156 
157 	return le16_to_cpu(tb->hi_n_len) >> 4;
158 }
159 
iwl_pcie_gen1_tfd_set_tb(struct iwl_trans * trans,struct iwl_tfd * tfd,u8 idx,dma_addr_t addr,u16 len)160 static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_trans *trans,
161 					    struct iwl_tfd *tfd,
162 					    u8 idx, dma_addr_t addr, u16 len)
163 {
164 	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
165 	u16 hi_n_len = len << 4;
166 
167 	put_unaligned_le32(addr, &tb->lo);
168 	hi_n_len |= iwl_get_dma_hi_addr(addr);
169 
170 	tb->hi_n_len = cpu_to_le16(hi_n_len);
171 
172 	tfd->num_tbs = idx + 1;
173 }
174 
175 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
176 			    struct iwl_cmd_meta *meta,
177 			    struct iwl_txq *txq, int index);
178 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
179 				     struct iwl_txq *txq);
180 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
181 				      struct iwl_txq *txq, u16 byte_cnt,
182 				      int num_tbs);
183 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
184 		     struct sk_buff_head *skbs, bool is_flush);
185 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
186 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
187 				bool freeze);
188 void iwl_txq_progress(struct iwl_txq *txq);
189 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
190 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
191 #endif /* __iwl_trans_queue_tx_h__ */
192