1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * Copyright (C) 2018 Marvell International Ltd. 4 */ 5 6 #ifndef NICVF_QUEUES_H 7 #define NICVF_QUEUES_H 8 9 #include "q_struct.h" 10 11 #define MAX_QUEUE_SET 128 12 #define MAX_RCV_QUEUES_PER_QS 8 13 #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2 14 #define MAX_SND_QUEUES_PER_QS 8 15 #define MAX_CMP_QUEUES_PER_QS 8 16 17 /* VF's queue interrupt ranges */ 18 #define NICVF_INTR_ID_CQ 0 19 #define NICVF_INTR_ID_SQ 8 20 #define NICVF_INTR_ID_RBDR 16 21 #define NICVF_INTR_ID_MISC 18 22 #define NICVF_INTR_ID_QS_ERR 19 23 24 #define RBDR_SIZE0 0ULL /* 8K entries */ 25 #define RBDR_SIZE1 1ULL /* 16K entries */ 26 #define RBDR_SIZE2 2ULL /* 32K entries */ 27 #define RBDR_SIZE3 3ULL /* 64K entries */ 28 #define RBDR_SIZE4 4ULL /* 126K entries */ 29 #define RBDR_SIZE5 5ULL /* 256K entries */ 30 #define RBDR_SIZE6 6ULL /* 512K entries */ 31 32 #define SND_QUEUE_SIZE0 0ULL /* 1K entries */ 33 #define SND_QUEUE_SIZE1 1ULL /* 2K entries */ 34 #define SND_QUEUE_SIZE2 2ULL /* 4K entries */ 35 #define SND_QUEUE_SIZE3 3ULL /* 8K entries */ 36 #define SND_QUEUE_SIZE4 4ULL /* 16K entries */ 37 #define SND_QUEUE_SIZE5 5ULL /* 32K entries */ 38 #define SND_QUEUE_SIZE6 6ULL /* 64K entries */ 39 40 #define CMP_QUEUE_SIZE0 0ULL /* 1K entries */ 41 #define CMP_QUEUE_SIZE1 1ULL /* 2K entries */ 42 #define CMP_QUEUE_SIZE2 2ULL /* 4K entries */ 43 #define CMP_QUEUE_SIZE3 3ULL /* 8K entries */ 44 #define CMP_QUEUE_SIZE4 4ULL /* 16K entries */ 45 #define CMP_QUEUE_SIZE5 5ULL /* 32K entries */ 46 #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */ 47 48 /* Default queue count per QS, its lengths and threshold values */ 49 #define RBDR_CNT 1 50 #define RCV_QUEUE_CNT 1 51 #define SND_QUEUE_CNT 1 52 #define CMP_QUEUE_CNT 1 /* Max of RCV and SND qcount */ 53 54 #define SND_QSIZE SND_QUEUE_SIZE0 55 #define SND_QUEUE_LEN BIT_ULL((SND_QSIZE + 10)) 56 #define SND_QUEUE_THRESH 2ULL 57 #define MIN_SQ_DESC_PER_PKT_XMIT 2 58 #define MAX_CQE_PER_PKT_XMIT 2 59 60 #define CMP_QSIZE CMP_QUEUE_SIZE0 61 #define CMP_QUEUE_LEN BIT_ULL((CMP_QSIZE + 10)) 62 #define CMP_QUEUE_CQE_THRESH 0 63 #define CMP_QUEUE_TIMER_THRESH 1 /* 1 ms */ 64 65 #define RBDR_SIZE RBDR_SIZE0 66 #define RCV_BUF_COUNT BIT_ULL((RBDR_SIZE + 13)) 67 #define RBDR_THRESH (RCV_BUF_COUNT / 2) 68 #define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ 69 #define RCV_FRAG_LEN DMA_BUFFER_LEN 70 71 #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) *\ 72 MAX_CQE_PER_PKT_XMIT) 73 #define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) 74 75 /* Descriptor size */ 76 #define SND_QUEUE_DESC_SIZE 16 /* 128 bits */ 77 #define CMP_QUEUE_DESC_SIZE 512 78 79 /* Buffer / descriptor alignments */ 80 #define NICVF_RCV_BUF_ALIGN 7 81 #define NICVF_RCV_BUF_ALIGN_BYTES BIT_ULL(NICVF_RCV_BUF_ALIGN) 82 #define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */ 83 #define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */ 84 85 #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES) 86 87 /* Queue enable/disable */ 88 #define NICVF_SQ_EN BIT_ULL(19) 89 90 /* Queue reset */ 91 #define NICVF_CQ_RESET BIT_ULL(41) 92 #define NICVF_SQ_RESET BIT_ULL(17) 93 #define NICVF_RBDR_RESET BIT_ULL(43) 94 95 enum CQ_RX_ERRLVL_E { 96 CQ_ERRLVL_MAC, 97 CQ_ERRLVL_L2, 98 CQ_ERRLVL_L3, 99 CQ_ERRLVL_L4, 100 }; 101 102 enum CQ_RX_ERROP_E { 103 CQ_RX_ERROP_RE_NONE = 0x0, 104 CQ_RX_ERROP_RE_PARTIAL = 0x1, 105 CQ_RX_ERROP_RE_JABBER = 0x2, 106 CQ_RX_ERROP_RE_FCS = 0x7, 107 CQ_RX_ERROP_RE_TERMINATE = 0x9, 108 CQ_RX_ERROP_RE_RX_CTL = 0xb, 109 CQ_RX_ERROP_PREL2_ERR = 0x1f, 110 CQ_RX_ERROP_L2_FRAGMENT = 0x20, 111 CQ_RX_ERROP_L2_OVERRUN = 0x21, 112 CQ_RX_ERROP_L2_PFCS = 0x22, 113 CQ_RX_ERROP_L2_PUNY = 0x23, 114 CQ_RX_ERROP_L2_MAL = 0x24, 115 CQ_RX_ERROP_L2_OVERSIZE = 0x25, 116 CQ_RX_ERROP_L2_UNDERSIZE = 0x26, 117 CQ_RX_ERROP_L2_LENMISM = 0x27, 118 CQ_RX_ERROP_L2_PCLP = 0x28, 119 CQ_RX_ERROP_IP_NOT = 0x41, 120 CQ_RX_ERROP_IP_CSUM_ERR = 0x42, 121 CQ_RX_ERROP_IP_MAL = 0x43, 122 CQ_RX_ERROP_IP_MALD = 0x44, 123 CQ_RX_ERROP_IP_HOP = 0x45, 124 CQ_RX_ERROP_L3_ICRC = 0x46, 125 CQ_RX_ERROP_L3_PCLP = 0x47, 126 CQ_RX_ERROP_L4_MAL = 0x61, 127 CQ_RX_ERROP_L4_CHK = 0x62, 128 CQ_RX_ERROP_UDP_LEN = 0x63, 129 CQ_RX_ERROP_L4_PORT = 0x64, 130 CQ_RX_ERROP_TCP_FLAG = 0x65, 131 CQ_RX_ERROP_TCP_OFFSET = 0x66, 132 CQ_RX_ERROP_L4_PCLP = 0x67, 133 CQ_RX_ERROP_RBDR_TRUNC = 0x70, 134 }; 135 136 enum CQ_TX_ERROP_E { 137 CQ_TX_ERROP_GOOD = 0x0, 138 CQ_TX_ERROP_DESC_FAULT = 0x10, 139 CQ_TX_ERROP_HDR_CONS_ERR = 0x11, 140 CQ_TX_ERROP_SUBDC_ERR = 0x12, 141 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, 142 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, 143 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, 144 CQ_TX_ERROP_LOCK_VIOL = 0x83, 145 CQ_TX_ERROP_DATA_FAULT = 0x84, 146 CQ_TX_ERROP_TSTMP_CONFLICT = 0x85, 147 CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86, 148 CQ_TX_ERROP_MEM_FAULT = 0x87, 149 CQ_TX_ERROP_CK_OVERLAP = 0x88, 150 CQ_TX_ERROP_CK_OFLOW = 0x89, 151 CQ_TX_ERROP_ENUM_LAST = 0x8a, 152 }; 153 154 struct cmp_queue_stats { 155 struct rx_stats { 156 struct { 157 u64 mac_errs; 158 u64 l2_errs; 159 u64 l3_errs; 160 u64 l4_errs; 161 } errlvl; 162 struct { 163 u64 good; 164 u64 partial_pkts; 165 u64 jabber_errs; 166 u64 fcs_errs; 167 u64 terminate_errs; 168 u64 bgx_rx_errs; 169 u64 prel2_errs; 170 u64 l2_frags; 171 u64 l2_overruns; 172 u64 l2_pfcs; 173 u64 l2_puny; 174 u64 l2_hdr_malformed; 175 u64 l2_oversize; 176 u64 l2_undersize; 177 u64 l2_len_mismatch; 178 u64 l2_pclp; 179 u64 non_ip; 180 u64 ip_csum_err; 181 u64 ip_hdr_malformed; 182 u64 ip_payload_malformed; 183 u64 ip_hop_errs; 184 u64 l3_icrc_errs; 185 u64 l3_pclp; 186 u64 l4_malformed; 187 u64 l4_csum_errs; 188 u64 udp_len_err; 189 u64 bad_l4_port; 190 u64 bad_tcp_flag; 191 u64 tcp_offset_errs; 192 u64 l4_pclp; 193 u64 pkt_truncated; 194 } errop; 195 } rx; 196 struct tx_stats { 197 u64 good; 198 u64 desc_fault; 199 u64 hdr_cons_err; 200 u64 subdesc_err; 201 u64 imm_size_oflow; 202 u64 data_seq_err; 203 u64 mem_seq_err; 204 u64 lock_viol; 205 u64 data_fault; 206 u64 tstmp_conflict; 207 u64 tstmp_timeout; 208 u64 mem_fault; 209 u64 csum_overlap; 210 u64 csum_overflow; 211 } tx; 212 }; 213 214 enum RQ_SQ_STATS { 215 RQ_SQ_STATS_OCTS, 216 RQ_SQ_STATS_PKTS, 217 }; 218 219 struct rx_tx_queue_stats { 220 u64 bytes; 221 u64 pkts; 222 }; 223 224 struct q_desc_mem { 225 uintptr_t dma; 226 u64 size; 227 u16 q_len; 228 uintptr_t phys_base; 229 void *base; 230 void *unalign_base; 231 bool allocated; 232 }; 233 234 struct rbdr { 235 bool enable; 236 u32 dma_size; 237 u32 thresh; /* Threshold level for interrupt */ 238 void *desc; 239 u32 head; 240 u32 tail; 241 struct q_desc_mem dmem; 242 uintptr_t buf_mem; 243 uintptr_t buffers; 244 }; 245 246 struct rcv_queue { 247 bool enable; 248 struct rbdr *rbdr_start; 249 struct rbdr *rbdr_cont; 250 bool en_tcp_reassembly; 251 u8 cq_qs; /* CQ's QS to which this RQ is assigned */ 252 u8 cq_idx; /* CQ index (0 to 7) in the QS */ 253 u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */ 254 u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */ 255 u8 start_rbdr_qs; /* First buffer ptrs - QS num */ 256 u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ 257 u8 caching; 258 struct rx_tx_queue_stats stats; 259 }; 260 261 struct cmp_queue { 262 bool enable; 263 u16 intr_timer_thresh; 264 u16 thresh; 265 void *desc; 266 struct q_desc_mem dmem; 267 struct cmp_queue_stats stats; 268 }; 269 270 struct snd_queue { 271 bool enable; 272 u8 cq_qs; /* CQ's QS to which this SQ is pointing */ 273 u8 cq_idx; /* CQ index (0 to 7) in the above QS */ 274 u16 thresh; 275 u32 free_cnt; 276 u32 head; 277 u32 tail; 278 u64 *skbuff; 279 void *desc; 280 struct q_desc_mem dmem; 281 struct rx_tx_queue_stats stats; 282 }; 283 284 struct queue_set { 285 bool enable; 286 bool be_en; 287 u8 vnic_id; 288 u8 rq_cnt; 289 u8 cq_cnt; 290 u64 cq_len; 291 u8 sq_cnt; 292 u64 sq_len; 293 u8 rbdr_cnt; 294 u64 rbdr_len; 295 struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS]; 296 struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS]; 297 struct snd_queue sq[MAX_SND_QUEUES_PER_QS]; 298 struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS]; 299 }; 300 301 #define GET_RBDR_DESC(RING, idx)\ 302 (&(((struct rbdr_entry_t *)((RING)->desc))[idx])) 303 #define GET_SQ_DESC(RING, idx)\ 304 (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx])) 305 #define GET_CQ_DESC(RING, idx)\ 306 (&(((union cq_desc_t *)((RING)->desc))[idx])) 307 308 /* CQ status bits */ 309 #define CQ_WR_FULL BIT(26) 310 #define CQ_WR_DISABLE BIT(25) 311 #define CQ_WR_FAULT BIT(24) 312 #define CQ_CQE_COUNT (0xFFFF << 0) 313 314 #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) 315 316 int nicvf_set_qset_resources(struct nicvf *nic); 317 int nicvf_config_data_transfer(struct nicvf *nic, bool enable); 318 void nicvf_qset_config(struct nicvf *nic, bool enable); 319 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 320 int qidx, bool enable); 321 322 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); 323 void nicvf_sq_disable(struct nicvf *nic, int qidx); 324 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt); 325 void nicvf_sq_free_used_descs(struct udevice *dev, 326 struct snd_queue *sq, int qidx); 327 int nicvf_sq_append_pkt(struct nicvf *nic, void *pkt, size_t pkt_len); 328 329 void *nicvf_get_rcv_pkt(struct nicvf *nic, void *cq_desc, size_t *pkt_len); 330 void nicvf_refill_rbdr(struct nicvf *nic); 331 332 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); 333 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); 334 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); 335 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx); 336 337 /* Register access APIs */ 338 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val); 339 u64 nicvf_reg_read(struct nicvf *nic, u64 offset); 340 void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val); 341 u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset); 342 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, 343 u64 qidx, u64 val); 344 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx); 345 346 /* Stats */ 347 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 348 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 349 int nicvf_check_cqe_rx_errs(struct nicvf *nic, 350 struct cmp_queue *cq, void *cq_desc); 351 int nicvf_check_cqe_tx_errs(struct nicvf *nic, 352 struct cmp_queue *cq, void *cq_desc); 353 #endif /* NICVF_QUEUES_H */ 354