1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020 Marvell International Ltd.
4  *
5  * This header file defines the work queue entry (wqe) data structure.
6  * Since this is a commonly used structure that depends on structures
7  * from several hardware blocks, those definitions have been placed
8  * in this file to create a single point of definition of the wqe
9  * format.
10  * Data structures are still named according to the block that they
11  * relate to.
12  */
13 
14 #ifndef __CVMX_WQE_H__
15 #define __CVMX_WQE_H__
16 
17 #include "cvmx-packet.h"
18 #include "cvmx-csr-enums.h"
19 #include "cvmx-pki-defs.h"
20 #include "cvmx-pip-defs.h"
21 #include "octeon-feature.h"
22 
23 #define OCT_TAG_TYPE_STRING(x)						\
24 	(((x) == CVMX_POW_TAG_TYPE_ORDERED) ?				\
25 	 "ORDERED" :							\
26 	 (((x) == CVMX_POW_TAG_TYPE_ATOMIC) ?				\
27 	  "ATOMIC" :							\
28 	  (((x) == CVMX_POW_TAG_TYPE_NULL) ? "NULL" : "NULL_NULL")))
29 
30 /* Error levels in WQE WORD2 (ERRLEV).*/
31 #define PKI_ERRLEV_E__RE_M 0x0
32 #define PKI_ERRLEV_E__LA_M 0x1
33 #define PKI_ERRLEV_E__LB_M 0x2
34 #define PKI_ERRLEV_E__LC_M 0x3
35 #define PKI_ERRLEV_E__LD_M 0x4
36 #define PKI_ERRLEV_E__LE_M 0x5
37 #define PKI_ERRLEV_E__LF_M 0x6
38 #define PKI_ERRLEV_E__LG_M 0x7
39 
40 enum cvmx_pki_errlevel {
41 	CVMX_PKI_ERRLEV_E_RE = PKI_ERRLEV_E__RE_M,
42 	CVMX_PKI_ERRLEV_E_LA = PKI_ERRLEV_E__LA_M,
43 	CVMX_PKI_ERRLEV_E_LB = PKI_ERRLEV_E__LB_M,
44 	CVMX_PKI_ERRLEV_E_LC = PKI_ERRLEV_E__LC_M,
45 	CVMX_PKI_ERRLEV_E_LD = PKI_ERRLEV_E__LD_M,
46 	CVMX_PKI_ERRLEV_E_LE = PKI_ERRLEV_E__LE_M,
47 	CVMX_PKI_ERRLEV_E_LF = PKI_ERRLEV_E__LF_M,
48 	CVMX_PKI_ERRLEV_E_LG = PKI_ERRLEV_E__LG_M
49 };
50 
51 #define CVMX_PKI_ERRLEV_MAX BIT(3) /* The size of WORD2:ERRLEV field.*/
52 
53 /* Error code in WQE WORD2 (OPCODE).*/
54 #define CVMX_PKI_OPCODE_RE_NONE	      0x0
55 #define CVMX_PKI_OPCODE_RE_PARTIAL    0x1
56 #define CVMX_PKI_OPCODE_RE_JABBER     0x2
57 #define CVMX_PKI_OPCODE_RE_FCS	      0x7
58 #define CVMX_PKI_OPCODE_RE_FCS_RCV    0x8
59 #define CVMX_PKI_OPCODE_RE_TERMINATE  0x9
60 #define CVMX_PKI_OPCODE_RE_RX_CTL     0xb
61 #define CVMX_PKI_OPCODE_RE_SKIP	      0xc
62 #define CVMX_PKI_OPCODE_RE_DMAPKT     0xf
63 #define CVMX_PKI_OPCODE_RE_PKIPAR     0x13
64 #define CVMX_PKI_OPCODE_RE_PKIPCAM    0x14
65 #define CVMX_PKI_OPCODE_RE_MEMOUT     0x15
66 #define CVMX_PKI_OPCODE_RE_BUFS_OFLOW 0x16
67 #define CVMX_PKI_OPCODE_L2_FRAGMENT   0x20
68 #define CVMX_PKI_OPCODE_L2_OVERRUN    0x21
69 #define CVMX_PKI_OPCODE_L2_PFCS	      0x22
70 #define CVMX_PKI_OPCODE_L2_PUNY	      0x23
71 #define CVMX_PKI_OPCODE_L2_MAL	      0x24
72 #define CVMX_PKI_OPCODE_L2_OVERSIZE   0x25
73 #define CVMX_PKI_OPCODE_L2_UNDERSIZE  0x26
74 #define CVMX_PKI_OPCODE_L2_LENMISM    0x27
75 #define CVMX_PKI_OPCODE_IP_NOT	      0x41
76 #define CVMX_PKI_OPCODE_IP_CHK	      0x42
77 #define CVMX_PKI_OPCODE_IP_MAL	      0x43
78 #define CVMX_PKI_OPCODE_IP_MALD	      0x44
79 #define CVMX_PKI_OPCODE_IP_HOP	      0x45
80 #define CVMX_PKI_OPCODE_L4_MAL	      0x61
81 #define CVMX_PKI_OPCODE_L4_CHK	      0x62
82 #define CVMX_PKI_OPCODE_L4_LEN	      0x63
83 #define CVMX_PKI_OPCODE_L4_PORT	      0x64
84 #define CVMX_PKI_OPCODE_TCP_FLAG      0x65
85 
86 #define CVMX_PKI_OPCODE_MAX BIT(8) /* The size of WORD2:OPCODE field.*/
87 
88 /* Layer types in pki */
89 #define CVMX_PKI_LTYPE_E_NONE_M	      0x0
90 #define CVMX_PKI_LTYPE_E_ENET_M	      0x1
91 #define CVMX_PKI_LTYPE_E_VLAN_M	      0x2
92 #define CVMX_PKI_LTYPE_E_SNAP_PAYLD_M 0x5
93 #define CVMX_PKI_LTYPE_E_ARP_M	      0x6
94 #define CVMX_PKI_LTYPE_E_RARP_M	      0x7
95 #define CVMX_PKI_LTYPE_E_IP4_M	      0x8
96 #define CVMX_PKI_LTYPE_E_IP4_OPT_M    0x9
97 #define CVMX_PKI_LTYPE_E_IP6_M	      0xA
98 #define CVMX_PKI_LTYPE_E_IP6_OPT_M    0xB
99 #define CVMX_PKI_LTYPE_E_IPSEC_ESP_M  0xC
100 #define CVMX_PKI_LTYPE_E_IPFRAG_M     0xD
101 #define CVMX_PKI_LTYPE_E_IPCOMP_M     0xE
102 #define CVMX_PKI_LTYPE_E_TCP_M	      0x10
103 #define CVMX_PKI_LTYPE_E_UDP_M	      0x11
104 #define CVMX_PKI_LTYPE_E_SCTP_M	      0x12
105 #define CVMX_PKI_LTYPE_E_UDP_VXLAN_M  0x13
106 #define CVMX_PKI_LTYPE_E_GRE_M	      0x14
107 #define CVMX_PKI_LTYPE_E_NVGRE_M      0x15
108 #define CVMX_PKI_LTYPE_E_GTP_M	      0x16
109 #define CVMX_PKI_LTYPE_E_SW28_M	      0x1C
110 #define CVMX_PKI_LTYPE_E_SW29_M	      0x1D
roundAndPackInt32(flag zSign,bits64 absZ)111 #define CVMX_PKI_LTYPE_E_SW30_M	      0x1E
112 #define CVMX_PKI_LTYPE_E_SW31_M	      0x1F
113 
114 enum cvmx_pki_layer_type {
115 	CVMX_PKI_LTYPE_E_NONE = CVMX_PKI_LTYPE_E_NONE_M,
116 	CVMX_PKI_LTYPE_E_ENET = CVMX_PKI_LTYPE_E_ENET_M,
117 	CVMX_PKI_LTYPE_E_VLAN = CVMX_PKI_LTYPE_E_VLAN_M,
118 	CVMX_PKI_LTYPE_E_SNAP_PAYLD = CVMX_PKI_LTYPE_E_SNAP_PAYLD_M,
119 	CVMX_PKI_LTYPE_E_ARP = CVMX_PKI_LTYPE_E_ARP_M,
120 	CVMX_PKI_LTYPE_E_RARP = CVMX_PKI_LTYPE_E_RARP_M,
121 	CVMX_PKI_LTYPE_E_IP4 = CVMX_PKI_LTYPE_E_IP4_M,
122 	CVMX_PKI_LTYPE_E_IP4_OPT = CVMX_PKI_LTYPE_E_IP4_OPT_M,
123 	CVMX_PKI_LTYPE_E_IP6 = CVMX_PKI_LTYPE_E_IP6_M,
124 	CVMX_PKI_LTYPE_E_IP6_OPT = CVMX_PKI_LTYPE_E_IP6_OPT_M,
125 	CVMX_PKI_LTYPE_E_IPSEC_ESP = CVMX_PKI_LTYPE_E_IPSEC_ESP_M,
126 	CVMX_PKI_LTYPE_E_IPFRAG = CVMX_PKI_LTYPE_E_IPFRAG_M,
127 	CVMX_PKI_LTYPE_E_IPCOMP = CVMX_PKI_LTYPE_E_IPCOMP_M,
128 	CVMX_PKI_LTYPE_E_TCP = CVMX_PKI_LTYPE_E_TCP_M,
129 	CVMX_PKI_LTYPE_E_UDP = CVMX_PKI_LTYPE_E_UDP_M,
130 	CVMX_PKI_LTYPE_E_SCTP = CVMX_PKI_LTYPE_E_SCTP_M,
131 	CVMX_PKI_LTYPE_E_UDP_VXLAN = CVMX_PKI_LTYPE_E_UDP_VXLAN_M,
132 	CVMX_PKI_LTYPE_E_GRE = CVMX_PKI_LTYPE_E_GRE_M,
133 	CVMX_PKI_LTYPE_E_NVGRE = CVMX_PKI_LTYPE_E_NVGRE_M,
134 	CVMX_PKI_LTYPE_E_GTP = CVMX_PKI_LTYPE_E_GTP_M,
135 	CVMX_PKI_LTYPE_E_SW28 = CVMX_PKI_LTYPE_E_SW28_M,
136 	CVMX_PKI_LTYPE_E_SW29 = CVMX_PKI_LTYPE_E_SW29_M,
137 	CVMX_PKI_LTYPE_E_SW30 = CVMX_PKI_LTYPE_E_SW30_M,
138 	CVMX_PKI_LTYPE_E_SW31 = CVMX_PKI_LTYPE_E_SW31_M,
139 	CVMX_PKI_LTYPE_E_MAX = CVMX_PKI_LTYPE_E_SW31
140 };
141 
142 typedef union {
143 	u64 u64;
144 	struct {
145 		u64 ptr_vlan : 8;
146 		u64 ptr_layer_g : 8;
147 		u64 ptr_layer_f : 8;
148 		u64 ptr_layer_e : 8;
149 		u64 ptr_layer_d : 8;
150 		u64 ptr_layer_c : 8;
151 		u64 ptr_layer_b : 8;
152 		u64 ptr_layer_a : 8;
153 	};
154 } cvmx_pki_wqe_word4_t;
155 
156 /**
157  * HW decode / err_code in work queue entry
158  */
159 typedef union {
160 	u64 u64;
161 	struct {
roundAndPackInt64(flag zSign,bits64 absZ0,bits64 absZ1)162 		u64 bufs : 8;
163 		u64 ip_offset : 8;
164 		u64 vlan_valid : 1;
165 		u64 vlan_stacked : 1;
166 		u64 unassigned : 1;
167 		u64 vlan_cfi : 1;
168 		u64 vlan_id : 12;
169 		u64 varies : 12;
170 		u64 dec_ipcomp : 1;
171 		u64 tcp_or_udp : 1;
172 		u64 dec_ipsec : 1;
173 		u64 is_v6 : 1;
174 		u64 software : 1;
175 		u64 L4_error : 1;
176 		u64 is_frag : 1;
177 		u64 IP_exc : 1;
178 		u64 is_bcast : 1;
179 		u64 is_mcast : 1;
180 		u64 not_IP : 1;
181 		u64 rcv_error : 1;
182 		u64 err_code : 8;
183 	} s;
184 	struct {
185 		u64 bufs : 8;
186 		u64 ip_offset : 8;
187 		u64 vlan_valid : 1;
188 		u64 vlan_stacked : 1;
189 		u64 unassigned : 1;
190 		u64 vlan_cfi : 1;
191 		u64 vlan_id : 12;
192 		u64 port : 12;
193 		u64 dec_ipcomp : 1;
194 		u64 tcp_or_udp : 1;
195 		u64 dec_ipsec : 1;
196 		u64 is_v6 : 1;
197 		u64 software : 1;
198 		u64 L4_error : 1;
199 		u64 is_frag : 1;
200 		u64 IP_exc : 1;
201 		u64 is_bcast : 1;
202 		u64 is_mcast : 1;
203 		u64 not_IP : 1;
204 		u64 rcv_error : 1;
205 		u64 err_code : 8;
206 	} s_cn68xx;
207 	struct {
208 		u64 bufs : 8;
extractFloat32Frac(float32 a)209 		u64 ip_offset : 8;
210 		u64 vlan_valid : 1;
211 		u64 vlan_stacked : 1;
212 		u64 unassigned : 1;
213 		u64 vlan_cfi : 1;
214 		u64 vlan_id : 12;
215 		u64 pr : 4;
216 		u64 unassigned2a : 4;
217 		u64 unassigned2 : 4;
218 		u64 dec_ipcomp : 1;
219 		u64 tcp_or_udp : 1;
220 		u64 dec_ipsec : 1;
extractFloat32Exp(float32 a)221 		u64 is_v6 : 1;
222 		u64 software : 1;
223 		u64 L4_error : 1;
224 		u64 is_frag : 1;
225 		u64 IP_exc : 1;
226 		u64 is_bcast : 1;
227 		u64 is_mcast : 1;
228 		u64 not_IP : 1;
229 		u64 rcv_error : 1;
230 		u64 err_code : 8;
231 	} s_cn38xx;
232 	struct {
extractFloat32Sign(float32 a)233 		u64 unused1 : 16;
234 		u64 vlan : 16;
235 		u64 unused2 : 32;
236 	} svlan;
237 	struct {
238 		u64 bufs : 8;
239 		u64 unused : 8;
240 		u64 vlan_valid : 1;
241 		u64 vlan_stacked : 1;
242 		u64 unassigned : 1;
243 		u64 vlan_cfi : 1;
244 		u64 vlan_id : 12;
245 		u64 varies : 12;
246 		u64 unassigned2 : 4;
247 		u64 software : 1;
248 		u64 unassigned3 : 1;
normalizeFloat32Subnormal(bits32 aSig,int16 * zExpPtr,bits32 * zSigPtr)249 		u64 is_rarp : 1;
250 		u64 is_arp : 1;
251 		u64 is_bcast : 1;
252 		u64 is_mcast : 1;
253 		u64 not_IP : 1;
254 		u64 rcv_error : 1;
255 		u64 err_code : 8;
256 	} snoip;
257 	struct {
258 		u64 bufs : 8;
259 		u64 unused : 8;
260 		u64 vlan_valid : 1;
261 		u64 vlan_stacked : 1;
262 		u64 unassigned : 1;
263 		u64 vlan_cfi : 1;
264 		u64 vlan_id : 12;
265 		u64 port : 12;
266 		u64 unassigned2 : 4;
267 		u64 software : 1;
268 		u64 unassigned3 : 1;
269 		u64 is_rarp : 1;
270 		u64 is_arp : 1;
packFloat32(flag zSign,int16 zExp,bits32 zSig)271 		u64 is_bcast : 1;
272 		u64 is_mcast : 1;
273 		u64 not_IP : 1;
274 		u64 rcv_error : 1;
275 		u64 err_code : 8;
276 	} snoip_cn68xx;
277 	struct {
278 		u64 bufs : 8;
279 		u64 unused : 8;
280 		u64 vlan_valid : 1;
281 		u64 vlan_stacked : 1;
282 		u64 unassigned : 1;
283 		u64 vlan_cfi : 1;
284 		u64 vlan_id : 12;
285 		u64 pr : 4;
286 		u64 unassigned2a : 8;
287 		u64 unassigned2 : 4;
288 		u64 software : 1;
289 		u64 unassigned3 : 1;
290 		u64 is_rarp : 1;
291 		u64 is_arp : 1;
292 		u64 is_bcast : 1;
293 		u64 is_mcast : 1;
294 		u64 not_IP : 1;
295 		u64 rcv_error : 1;
296 		u64 err_code : 8;
297 	} snoip_cn38xx;
298 } cvmx_pip_wqe_word2_t;
299 
300 typedef union {
roundAndPackFloat32(flag zSign,int16 zExp,bits32 zSig)301 	u64 u64;
302 	struct {
303 		u64 software : 1;
304 		u64 lg_hdr_type : 5;
305 		u64 lf_hdr_type : 5;
306 		u64 le_hdr_type : 5;
307 		u64 ld_hdr_type : 5;
308 		u64 lc_hdr_type : 5;
309 		u64 lb_hdr_type : 5;
310 		u64 is_la_ether : 1;
311 		u64 rsvd_0 : 8;
312 		u64 vlan_valid : 1;
313 		u64 vlan_stacked : 1;
314 		u64 stat_inc : 1;
315 		u64 pcam_flag4 : 1;
316 		u64 pcam_flag3 : 1;
317 		u64 pcam_flag2 : 1;
318 		u64 pcam_flag1 : 1;
319 		u64 is_frag : 1;
320 		u64 is_l3_bcast : 1;
321 		u64 is_l3_mcast : 1;
322 		u64 is_l2_bcast : 1;
323 		u64 is_l2_mcast : 1;
324 		u64 is_raw : 1;
325 		u64 err_level : 3;
326 		u64 err_code : 8;
327 	};
328 } cvmx_pki_wqe_word2_t;
329 
330 typedef union {
331 	u64 u64;
332 	cvmx_pki_wqe_word2_t pki;
333 	cvmx_pip_wqe_word2_t pip;
334 } cvmx_wqe_word2_t;
335 
336 typedef union {
337 	u64 u64;
338 	struct {
339 		u16 hw_chksum;
340 		u8 unused;
341 		u64 next_ptr : 40;
342 	} cn38xx;
343 	struct {
344 		u64 l4ptr : 8;	  /* 56..63 */
345 		u64 unused0 : 8;  /* 48..55 */
346 		u64 l3ptr : 8;	  /* 40..47 */
347 		u64 l2ptr : 8;	  /* 32..39 */
348 		u64 unused1 : 18; /* 14..31 */
349 		u64 bpid : 6;	  /* 8..13 */
350 		u64 unused2 : 2;  /* 6..7 */
351 		u64 pknd : 6;	  /* 0..5 */
352 	} cn68xx;
353 } cvmx_pip_wqe_word0_t;
354 
355 typedef union {
356 	u64 u64;
357 	struct {
358 		u64 rsvd_0 : 4;
359 		u64 aura : 12;
360 		u64 rsvd_1 : 1;
361 		u64 apad : 3;
362 		u64 channel : 12;
363 		u64 bufs : 8;
normalizeRoundAndPackFloat32(flag zSign,int16 zExp,bits32 zSig)364 		u64 style : 8;
365 		u64 rsvd_2 : 10;
366 		u64 pknd : 6;
367 	};
368 } cvmx_pki_wqe_word0_t;
369 
370 /* Use reserved bit, set by HW to 0, to indicate buf_ptr legacy translation*/
371 #define pki_wqe_translated word0.rsvd_1
372 
373 typedef union {
374 	u64 u64;
375 	cvmx_pip_wqe_word0_t pip;
376 	cvmx_pki_wqe_word0_t pki;
377 	struct {
extractFloat64Frac(float64 a)378 		u64 unused : 24;
379 		u64 next_ptr : 40; /* On cn68xx this is unused as well */
380 	} raw;
381 } cvmx_wqe_word0_t;
382 
383 typedef union {
384 	u64 u64;
385 	struct {
386 		u64 len : 16;
387 		u64 rsvd_0 : 2;
388 		u64 rsvd_1 : 2;
389 		u64 grp : 10;
extractFloat64Exp(float64 a)390 		cvmx_pow_tag_type_t tag_type : 2;
391 		u64 tag : 32;
392 	};
393 } cvmx_pki_wqe_word1_t;
394 
395 #define pki_errata20776 word1.rsvd_0
396 
397 typedef union {
398 	u64 u64;
399 	struct {
400 		u64 len : 16;
401 		u64 varies : 14;
extractFloat64Sign(float64 a)402 		cvmx_pow_tag_type_t tag_type : 2;
403 		u64 tag : 32;
404 	};
405 	cvmx_pki_wqe_word1_t cn78xx;
406 	struct {
407 		u64 len : 16;
408 		u64 zero_0 : 1;
409 		u64 qos : 3;
410 		u64 zero_1 : 1;
411 		u64 grp : 6;
412 		u64 zero_2 : 3;
413 		cvmx_pow_tag_type_t tag_type : 2;
414 		u64 tag : 32;
415 	} cn68xx;
416 	struct {
417 		u64 len : 16;
normalizeFloat64Subnormal(bits64 aSig,int16 * zExpPtr,bits64 * zSigPtr)418 		u64 ipprt : 6;
419 		u64 qos : 3;
420 		u64 grp : 4;
421 		u64 zero_2 : 1;
422 		cvmx_pow_tag_type_t tag_type : 2;
423 		u64 tag : 32;
424 	} cn38xx;
425 } cvmx_wqe_word1_t;
426 
427 typedef union {
428 	u64 u64;
429 	struct {
430 		u64 rsvd_0 : 8;
431 		u64 hwerr : 8;
432 		u64 rsvd_1 : 24;
433 		u64 sqid : 8;
434 		u64 rsvd_2 : 4;
435 		u64 vfnum : 12;
436 	};
437 } cvmx_wqe_word3_t;
438 
439 typedef union {
packFloat64(flag zSign,int16 zExp,bits64 zSig)440 	u64 u64;
441 	struct {
442 		u64 rsvd_0 : 21;
443 		u64 sqfc : 11;
444 		u64 rsvd_1 : 5;
445 		u64 sqtail : 11;
446 		u64 rsvd_2 : 3;
447 		u64 sqhead : 13;
448 	};
449 } cvmx_wqe_word4_t;
450 
451 /**
452  * Work queue entry format.
453  * Must be 8-byte aligned.
454  */
455 typedef struct cvmx_wqe_s {
456 	/*-------------------------------------------------------------------*/
457 	/* WORD 0                                                            */
458 	/*-------------------------------------------------------------------*/
459 	/* HW WRITE: the following 64 bits are filled by HW when a packet
460 	 * arrives.
461 	 */
462 	cvmx_wqe_word0_t word0;
463 
464 	/*-------------------------------------------------------------------*/
465 	/* WORD 1                                                            */
466 	/*-------------------------------------------------------------------*/
467 	/* HW WRITE: the following 64 bits are filled by HW when a packet
468 	 * arrives.
469 	 */
470 	cvmx_wqe_word1_t word1;
roundAndPackFloat64(flag zSign,int16 zExp,bits64 zSig)471 
472 	/*-------------------------------------------------------------------*/
473 	/* WORD 2                                                            */
474 	/*-------------------------------------------------------------------*/
475 	/* HW WRITE: the following 64-bits are filled in by hardware when a
476 	 * packet arrives. This indicates a variety of status and error
477 	 *conditions.
478 	 */
479 	cvmx_pip_wqe_word2_t word2;
480 
481 	/* Pointer to the first segment of the packet. */
482 	cvmx_buf_ptr_t packet_ptr;
483 
484 	/* HW WRITE: OCTEON will fill in a programmable amount from the packet,
485 	 * up to (at most, but perhaps less) the amount needed to fill the work
486 	 * queue entry to 128 bytes. If the packet is recognized to be IP, the
487 	 * hardware starts (except that the IPv4 header is padded for
488 	 * appropriate alignment) writing here where the IP header starts.
489 	 * If the packet is not recognized to be IP, the hardware starts
490 	 * writing the beginning of the packet here.
491 	 */
492 	u8 packet_data[96];
493 
494 	/* If desired, SW can make the work Q entry any length. For the purposes
495 	 * of discussion here, Assume 128B always, as this is all that the hardware
496 	 * deals with.
497 	 */
498 } CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;
499 
500 /**
501  * Work queue entry format for NQM
502  * Must be 8-byte aligned
503  */
504 typedef struct cvmx_wqe_nqm_s {
505 	/*-------------------------------------------------------------------*/
506 	/* WORD 0                                                            */
507 	/*-------------------------------------------------------------------*/
508 	/* HW WRITE: the following 64 bits are filled by HW when a packet
509 	 * arrives.
510 	 */
511 	cvmx_wqe_word0_t word0;
512 
513 	/*-------------------------------------------------------------------*/
514 	/* WORD 1                                                            */
515 	/*-------------------------------------------------------------------*/
516 	/* HW WRITE: the following 64 bits are filled by HW when a packet
517 	 * arrives.
518 	 */
519 	cvmx_wqe_word1_t word1;
520 
521 	/*-------------------------------------------------------------------*/
522 	/* WORD 2                                                            */
523 	/*-------------------------------------------------------------------*/
524 	/* Reserved */
525 	u64 word2;
526 
527 	/*-------------------------------------------------------------------*/
528 	/* WORD 3                                                            */
529 	/*-------------------------------------------------------------------*/
530 	/* NVMe specific information.*/
531 	cvmx_wqe_word3_t word3;
532 
533 	/*-------------------------------------------------------------------*/
534 	/* WORD 4                                                            */
535 	/*-------------------------------------------------------------------*/
normalizeRoundAndPackFloat64(flag zSign,int16 zExp,bits64 zSig)536 	/* NVMe specific information.*/
537 	cvmx_wqe_word4_t word4;
538 
539 	/* HW WRITE: OCTEON will fill in a programmable amount from the packet,
540 	 * up to (at most, but perhaps less) the amount needed to fill the work
541 	 * queue entry to 128 bytes. If the packet is recognized to be IP, the
542 	 * hardware starts (except that the IPv4 header is padded for
543 	 * appropriate alignment) writing here where the IP header starts.
544 	 * If the packet is not recognized to be IP, the hardware starts
545 	 * writing the beginning of the packet here.
546 	 */
547 	u8 packet_data[88];
548 
549 	/* If desired, SW can make the work Q entry any length.
550 	 * For the purposes of discussion here, assume 128B always, as this is
551 	 * all that the hardware deals with.
552 	 */
extractFloatx80Frac(floatx80 a)553 } CVMX_CACHE_LINE_ALIGNED cvmx_wqe_nqm_t;
554 
555 /**
556  * Work queue entry format for 78XX.
557  * In 78XX packet data always resides in WQE buffer unless option
558  * DIS_WQ_DAT=1 in PKI_STYLE_BUF, which causes packet data to use separate buffer.
559  *
560  * Must be 8-byte aligned.
561  */
562 typedef struct {
563 	/*-------------------------------------------------------------------*/
564 	/* WORD 0                                                            */
565 	/*-------------------------------------------------------------------*/
extractFloatx80Exp(floatx80 a)566 	/* HW WRITE: the following 64 bits are filled by HW when a packet
567 	 * arrives.
568 	 */
569 	cvmx_pki_wqe_word0_t word0;
570 
571 	/*-------------------------------------------------------------------*/
572 	/* WORD 1                                                            */
573 	/*-------------------------------------------------------------------*/
574 	/* HW WRITE: the following 64 bits are filled by HW when a packet
575 	 * arrives.
576 	 */
577 	cvmx_pki_wqe_word1_t word1;
578 
extractFloatx80Sign(floatx80 a)579 	/*-------------------------------------------------------------------*/
580 	/* WORD 2                                                            */
581 	/*-------------------------------------------------------------------*/
582 	/* HW WRITE: the following 64-bits are filled in by hardware when a
583 	 * packet arrives. This indicates a variety of status and error
584 	 * conditions.
585 	 */
586 	cvmx_pki_wqe_word2_t word2;
587 
588 	/*-------------------------------------------------------------------*/
589 	/* WORD 3                                                            */
590 	/*-------------------------------------------------------------------*/
591 	/* Pointer to the first segment of the packet.*/
592 	cvmx_buf_ptr_pki_t packet_ptr;
593 
594 	/*-------------------------------------------------------------------*/
normalizeFloatx80Subnormal(bits64 aSig,int32 * zExpPtr,bits64 * zSigPtr)595 	/* WORD 4                                                            */
596 	/*-------------------------------------------------------------------*/
597 	/* HW WRITE: the following 64-bits are filled in by hardware when a
598 	 * packet arrives contains a byte pointer to the start of Layer
599 	 * A/B/C/D/E/F/G relative of start of packet.
600 	 */
601 	cvmx_pki_wqe_word4_t word4;
602 
603 	/*-------------------------------------------------------------------*/
604 	/* WORDs 5/6/7 may be extended there, if WQE_HSZ is set.             */
605 	/*-------------------------------------------------------------------*/
606 	u64 wqe_data[11];
607 
608 } CVMX_CACHE_LINE_ALIGNED cvmx_wqe_78xx_t;
609 
610 /* Node LS-bit position in the WQE[grp] or PKI_QPG_TBL[grp_ok].*/
packFloatx80(flag zSign,int32 zExp,bits64 zSig)611 #define CVMX_WQE_GRP_NODE_SHIFT 8
612 
613 /*
614  * This is an accessor function into the WQE that retreives the
615  * ingress port number, which can also be used as a destination
616  * port number for the same port.
617  *
618  * @param work - Work Queue Entrey pointer
619  * @returns returns the normalized port number, also known as "ipd" port
620  */
621 static inline int cvmx_wqe_get_port(cvmx_wqe_t *work)
622 {
623 	int port;
624 
625 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
626 		/* In 78xx wqe entry has channel number not port*/
627 		port = work->word0.pki.channel;
628 		/* For BGX interfaces (0x800 - 0xdff) the 4 LSBs indicate
629 		 * the PFC channel, must be cleared to normalize to "ipd"
630 		 */
631 		if (port & 0x800)
632 			port &= 0xff0;
633 		/* Node number is in AURA field, make it part of port # */
634 		port |= (work->word0.pki.aura >> 10) << 12;
635 	} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
636 		port = work->word2.s_cn68xx.port;
637 	} else {
638 		port = work->word1.cn38xx.ipprt;
639 	}
640 
641 	return port;
642 }
643 
644 static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port)
645 {
646 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
roundAndPackFloatx80(int8 roundingPrecision,flag zSign,int32 zExp,bits64 zSig0,bits64 zSig1)647 		work->word0.pki.channel = port;
648 	else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
649 		work->word2.s_cn68xx.port = port;
650 	else
651 		work->word1.cn38xx.ipprt = port;
652 }
653 
654 static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
655 {
656 	int grp;
657 
658 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
659 		/* legacy: GRP[0..2] :=QOS */
660 		grp = (0xff & work->word1.cn78xx.grp) >> 3;
661 	else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
662 		grp = work->word1.cn68xx.grp;
663 	else
664 		grp = work->word1.cn38xx.grp;
665 
666 	return grp;
667 }
668 
669 static inline void cvmx_wqe_set_xgrp(cvmx_wqe_t *work, int grp)
670 {
671 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
672 		work->word1.cn78xx.grp = grp;
673 	else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
674 		work->word1.cn68xx.grp = grp;
675 	else
676 		work->word1.cn38xx.grp = grp;
677 }
678 
679 static inline int cvmx_wqe_get_xgrp(cvmx_wqe_t *work)
680 {
681 	int grp;
682 
683 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
684 		grp = work->word1.cn78xx.grp;
685 	else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
686 		grp = work->word1.cn68xx.grp;
687 	else
688 		grp = work->word1.cn38xx.grp;
689 
690 	return grp;
691 }
692 
693 static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp)
694 {
695 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
696 		unsigned int node = cvmx_get_node_num();
697 		/* Legacy: GRP[0..2] :=QOS */
698 		work->word1.cn78xx.grp &= 0x7;
699 		work->word1.cn78xx.grp |= 0xff & (grp << 3);
700 		work->word1.cn78xx.grp |= (node << 8);
701 	} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
702 		work->word1.cn68xx.grp = grp;
703 	} else {
704 		work->word1.cn38xx.grp = grp;
705 	}
706 }
707 
708 static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work)
709 {
710 	int qos;
711 
712 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
713 		/* Legacy: GRP[0..2] :=QOS */
714 		qos = work->word1.cn78xx.grp & 0x7;
715 	} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
716 		qos = work->word1.cn68xx.qos;
717 	} else {
718 		qos = work->word1.cn38xx.qos;
719 	}
720 
721 	return qos;
722 }
723 
724 static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos)
725 {
726 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
727 		/* legacy: GRP[0..2] :=QOS */
728 		work->word1.cn78xx.grp &= ~0x7;
729 		work->word1.cn78xx.grp |= qos & 0x7;
730 	} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
731 		work->word1.cn68xx.qos = qos;
732 	} else {
733 		work->word1.cn38xx.qos = qos;
734 	}
735 }
736 
737 static inline int cvmx_wqe_get_len(cvmx_wqe_t *work)
738 {
739 	int len;
740 
741 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
742 		len = work->word1.cn78xx.len;
743 	else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
744 		len = work->word1.cn68xx.len;
745 	else
746 		len = work->word1.cn38xx.len;
747 
748 	return len;
749 }
750 
751 static inline void cvmx_wqe_set_len(cvmx_wqe_t *work, int len)
752 {
753 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
754 		work->word1.cn78xx.len = len;
755 	else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
756 		work->word1.cn68xx.len = len;
757 	else
758 		work->word1.cn38xx.len = len;
759 }
760 
761 /**
762  * This function returns, if there was L2/L1 errors detected in packet.
763  *
764  * @param work	pointer to work queue entry
765  *
766  * @return	0 if packet had no error, non-zero to indicate error code.
767  *
768  * Please refer to HRM for the specific model for full enumaration of error codes.
769  * With Octeon1/Octeon2 models, the returned code indicates L1/L2 errors.
770  * On CN73XX/CN78XX, the return code is the value of PKI_OPCODE_E,
771  * if it is non-zero, otherwise the returned code will be derived from
772  * PKI_ERRLEV_E such that an error indicated in LayerA will return 0x20,
773  * LayerB - 0x30, LayerC - 0x40 and so forth.
774  */
775 static inline int cvmx_wqe_get_rcv_err(cvmx_wqe_t *work)
776 {
777 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
778 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
779 
780 		if (wqe->word2.err_level == CVMX_PKI_ERRLEV_E_RE || wqe->word2.err_code != 0)
781 			return wqe->word2.err_code;
782 		else
783 			return (wqe->word2.err_level << 4) + 0x10;
784 	} else if (work->word2.snoip.rcv_error) {
785 		return work->word2.snoip.err_code;
786 	}
787 
788 	return 0;
789 }
790 
791 static inline u32 cvmx_wqe_get_tag(cvmx_wqe_t *work)
792 {
793 	return work->word1.tag;
794 }
795 
796 static inline void cvmx_wqe_set_tag(cvmx_wqe_t *work, u32 tag)
797 {
798 	work->word1.tag = tag;
799 }
800 
801 static inline int cvmx_wqe_get_tt(cvmx_wqe_t *work)
802 {
803 	return work->word1.tag_type;
804 }
805 
806 static inline void cvmx_wqe_set_tt(cvmx_wqe_t *work, int tt)
807 {
808 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
809 		work->word1.cn78xx.tag_type = (cvmx_pow_tag_type_t)tt;
810 	} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
811 		work->word1.cn68xx.tag_type = (cvmx_pow_tag_type_t)tt;
812 		work->word1.cn68xx.zero_2 = 0;
813 	} else {
814 		work->word1.cn38xx.tag_type = (cvmx_pow_tag_type_t)tt;
815 		work->word1.cn38xx.zero_2 = 0;
normalizeRoundAndPackFloatx80(int8 roundingPrecision,flag zSign,int32 zExp,bits64 zSig0,bits64 zSig1)816 	}
817 }
818 
819 static inline u8 cvmx_wqe_get_unused8(cvmx_wqe_t *work)
820 {
821 	u8 bits;
822 
823 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
824 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
825 
826 		bits = wqe->word2.rsvd_0;
827 	} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
828 		bits = work->word0.pip.cn68xx.unused1;
829 	} else {
830 		bits = work->word0.pip.cn38xx.unused;
831 	}
832 
833 	return bits;
834 }
835 
836 static inline void cvmx_wqe_set_unused8(cvmx_wqe_t *work, u8 v)
837 {
838 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
839 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
840 
841 		wqe->word2.rsvd_0 = v;
842 	} else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
843 		work->word0.pip.cn68xx.unused1 = v;
844 	} else {
extractFloat128Frac1(float128 a)845 		work->word0.pip.cn38xx.unused = v;
846 	}
847 }
848 
849 static inline u8 cvmx_wqe_get_user_flags(cvmx_wqe_t *work)
850 {
851 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
852 		return work->word0.pki.rsvd_2;
853 	else
854 		return 0;
855 }
856 
857 static inline void cvmx_wqe_set_user_flags(cvmx_wqe_t *work, u8 v)
extractFloat128Frac0(float128 a)858 {
859 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
860 		work->word0.pki.rsvd_2 = v;
861 }
862 
863 static inline int cvmx_wqe_get_channel(cvmx_wqe_t *work)
864 {
865 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
866 		return (work->word0.pki.channel);
867 	else
868 		return cvmx_wqe_get_port(work);
869 }
870 
extractFloat128Exp(float128 a)871 static inline void cvmx_wqe_set_channel(cvmx_wqe_t *work, int channel)
872 {
873 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
874 		work->word0.pki.channel = channel;
875 	else
876 		debug("%s: ERROR: not supported for model\n", __func__);
877 }
878 
879 static inline int cvmx_wqe_get_aura(cvmx_wqe_t *work)
880 {
881 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
882 		return (work->word0.pki.aura);
extractFloat128Sign(float128 a)883 	else
884 		return (work->packet_ptr.s.pool);
885 }
886 
887 static inline void cvmx_wqe_set_aura(cvmx_wqe_t *work, int aura)
888 {
889 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
890 		work->word0.pki.aura = aura;
891 	else
892 		work->packet_ptr.s.pool = aura;
893 }
894 
895 static inline int cvmx_wqe_get_style(cvmx_wqe_t *work)
896 {
897 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
898 		return (work->word0.pki.style);
899 	return 0;
900 }
901 
normalizeFloat128Subnormal(bits64 aSig0,bits64 aSig1,int32 * zExpPtr,bits64 * zSig0Ptr,bits64 * zSig1Ptr)902 static inline void cvmx_wqe_set_style(cvmx_wqe_t *work, int style)
903 {
904 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
905 		work->word0.pki.style = style;
906 }
907 
908 static inline int cvmx_wqe_is_l3_ip(cvmx_wqe_t *work)
909 {
910 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
911 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
912 		/* Match all 4 values for v4/v6 with.without options */
913 		if ((wqe->word2.lc_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)
914 			return 1;
915 		if ((wqe->word2.le_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)
916 			return 1;
917 		return 0;
918 	} else {
919 		return !work->word2.s_cn38xx.not_IP;
920 	}
921 }
922 
923 static inline int cvmx_wqe_is_l3_ipv4(cvmx_wqe_t *work)
924 {
925 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
926 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
927 		/* Match 2 values - with/wotuout options */
928 		if ((wqe->word2.lc_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP4)
929 			return 1;
930 		if ((wqe->word2.le_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP4)
931 			return 1;
932 		return 0;
933 	} else {
934 		return (!work->word2.s_cn38xx.not_IP &&
935 			!work->word2.s_cn38xx.is_v6);
936 	}
937 }
938 
939 static inline int cvmx_wqe_is_l3_ipv6(cvmx_wqe_t *work)
940 {
941 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
942 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
943 		/* Match 2 values - with/wotuout options */
944 		if ((wqe->word2.lc_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP6)
945 			return 1;
946 		if ((wqe->word2.le_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP6)
packFloat128(flag zSign,int32 zExp,bits64 zSig0,bits64 zSig1)947 			return 1;
948 		return 0;
949 	} else {
950 		return (!work->word2.s_cn38xx.not_IP &&
951 			work->word2.s_cn38xx.is_v6);
952 	}
953 }
954 
955 static inline bool cvmx_wqe_is_l4_udp_or_tcp(cvmx_wqe_t *work)
956 {
957 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
958 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
959 
960 		if (wqe->word2.lf_hdr_type == CVMX_PKI_LTYPE_E_TCP)
961 			return true;
962 		if (wqe->word2.lf_hdr_type == CVMX_PKI_LTYPE_E_UDP)
963 			return true;
964 		return false;
965 	}
966 
967 	if (work->word2.s_cn38xx.not_IP)
968 		return false;
969 
970 	return (work->word2.s_cn38xx.tcp_or_udp != 0);
971 }
972 
973 static inline int cvmx_wqe_is_l2_bcast(cvmx_wqe_t *work)
974 {
975 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
976 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
977 
978 		return wqe->word2.is_l2_bcast;
979 	} else {
roundAndPackFloat128(flag zSign,int32 zExp,bits64 zSig0,bits64 zSig1,bits64 zSig2)980 		return work->word2.s_cn38xx.is_bcast;
981 	}
982 }
983 
984 static inline int cvmx_wqe_is_l2_mcast(cvmx_wqe_t *work)
985 {
986 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
987 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
988 
989 		return wqe->word2.is_l2_mcast;
990 	} else {
991 		return work->word2.s_cn38xx.is_mcast;
992 	}
993 }
994 
995 static inline void cvmx_wqe_set_l2_bcast(cvmx_wqe_t *work, bool bcast)
996 {
997 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
998 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
999 
1000 		wqe->word2.is_l2_bcast = bcast;
1001 	} else {
1002 		work->word2.s_cn38xx.is_bcast = bcast;
1003 	}
1004 }
1005 
1006 static inline void cvmx_wqe_set_l2_mcast(cvmx_wqe_t *work, bool mcast)
1007 {
1008 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1009 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1010 
1011 		wqe->word2.is_l2_mcast = mcast;
1012 	} else {
1013 		work->word2.s_cn38xx.is_mcast = mcast;
1014 	}
1015 }
1016 
1017 static inline int cvmx_wqe_is_l3_bcast(cvmx_wqe_t *work)
1018 {
1019 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1020 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1021 
1022 		return wqe->word2.is_l3_bcast;
1023 	}
1024 	debug("%s: ERROR: not supported for model\n", __func__);
1025 	return 0;
1026 }
1027 
1028 static inline int cvmx_wqe_is_l3_mcast(cvmx_wqe_t *work)
1029 {
1030 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1031 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1032 
1033 		return wqe->word2.is_l3_mcast;
1034 	}
1035 	debug("%s: ERROR: not supported for model\n", __func__);
1036 	return 0;
1037 }
1038 
1039 /**
1040  * This function returns is there was IP error detected in packet.
1041  * For 78XX it does not flag ipv4 options and ipv6 extensions.
1042  * For older chips if PIP_GBL_CTL was proviosned to flag ip4_otions and
1043  * ipv6 extension, it will be flag them.
1044  * @param work	pointer to work queue entry
1045  * @return	1 -- If IP error was found in packet
1046  *          0 -- If no IP error was found in packet.
1047  */
1048 static inline int cvmx_wqe_is_ip_exception(cvmx_wqe_t *work)
1049 {
1050 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1051 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1052 
1053 		if (wqe->word2.err_level == CVMX_PKI_ERRLEV_E_LC)
1054 			return 1;
1055 		else
1056 			return 0;
1057 	}
1058 
1059 	return work->word2.s.IP_exc;
1060 }
1061 
1062 static inline int cvmx_wqe_is_l4_error(cvmx_wqe_t *work)
1063 {
1064 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1065 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1066 
1067 		if (wqe->word2.err_level == CVMX_PKI_ERRLEV_E_LF)
1068 			return 1;
1069 		else
1070 			return 0;
1071 	} else {
1072 		return work->word2.s.L4_error;
1073 	}
1074 }
1075 
1076 static inline void cvmx_wqe_set_vlan(cvmx_wqe_t *work, bool set)
1077 {
1078 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1079 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1080 
normalizeRoundAndPackFloat128(flag zSign,int32 zExp,bits64 zSig0,bits64 zSig1)1081 		wqe->word2.vlan_valid = set;
1082 	} else {
1083 		work->word2.s.vlan_valid = set;
1084 	}
1085 }
1086 
1087 static inline int cvmx_wqe_is_vlan(cvmx_wqe_t *work)
1088 {
1089 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1090 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1091 
1092 		return wqe->word2.vlan_valid;
1093 	} else {
1094 		return work->word2.s.vlan_valid;
1095 	}
1096 }
1097 
1098 static inline int cvmx_wqe_is_vlan_stacked(cvmx_wqe_t *work)
1099 {
1100 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1101 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1102 
1103 		return wqe->word2.vlan_stacked;
1104 	} else {
1105 		return work->word2.s.vlan_stacked;
1106 	}
1107 }
1108 
1109 /**
1110  * Extract packet data buffer pointer from work queue entry.
1111  *
1112  * Returns the legacy (Octeon1/Octeon2) buffer pointer structure
1113  * for the linked buffer list.
1114  * On CN78XX, the native buffer pointer structure is converted into
int32_to_float32(int32 a)1115  * the legacy format.
1116  * The legacy buf_ptr is then stored in the WQE, and word0 reserved
1117  * field is set to indicate that the buffer pointers were translated.
1118  * If the packet data is only found inside the work queue entry,
1119  * a standard buffer pointer structure is created for it.
1120  */
1121 cvmx_buf_ptr_t cvmx_wqe_get_packet_ptr(cvmx_wqe_t *work);
1122 
1123 static inline int cvmx_wqe_get_bufs(cvmx_wqe_t *work)
1124 {
1125 	int bufs;
1126 
1127 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1128 		bufs = work->word0.pki.bufs;
1129 	} else {
1130 		/* Adjust for packet-in-WQE cases */
1131 		if (cvmx_unlikely(work->word2.s_cn38xx.bufs == 0 && !work->word2.s.software))
1132 			(void)cvmx_wqe_get_packet_ptr(work);
int32_to_float64(int32 a)1133 		bufs = work->word2.s_cn38xx.bufs;
1134 	}
1135 	return bufs;
1136 }
1137 
1138 /**
1139  * Free Work Queue Entry memory
1140  *
1141  * Will return the WQE buffer to its pool, unless the WQE contains
1142  * non-redundant packet data.
1143  * This function is intended to be called AFTER the packet data
1144  * has been passed along to PKO for transmission and release.
1145  * It can also follow a call to cvmx_helper_free_packet_data()
1146  * to release the WQE after associated data was released.
1147  */
1148 void cvmx_wqe_free(cvmx_wqe_t *work);
1149 
1150 /**
1151  * Check if a work entry has been intiated by software
1152  *
1153  */
1154 static inline bool cvmx_wqe_is_soft(cvmx_wqe_t *work)
1155 {
1156 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1157 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1158 
int32_to_floatx80(int32 a)1159 		return wqe->word2.software;
1160 	} else {
1161 		return work->word2.s.software;
1162 	}
1163 }
1164 
1165 /**
1166  * Allocate a work-queue entry for delivering software-initiated
1167  * event notifications.
1168  * The application data is copied into the work-queue entry,
1169  * if the space is sufficient.
1170  */
1171 cvmx_wqe_t *cvmx_wqe_soft_create(void *data_p, unsigned int data_sz);
1172 
1173 /* Errata (PKI-20776) PKI_BUFLINK_S's are endian-swapped
1174  * CN78XX pass 1.x has a bug where the packet pointer in each segment is
1175  * written in the opposite endianness of the configured mode. Fix these here.
1176  */
1177 static inline void cvmx_wqe_pki_errata_20776(cvmx_wqe_t *work)
1178 {
1179 	cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1180 
1181 	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && !wqe->pki_errata20776) {
1182 		u64 bufs;
1183 		cvmx_buf_ptr_pki_t buffer_next;
1184 
1185 		bufs = wqe->word0.bufs;
int32_to_float128(int32 a)1186 		buffer_next = wqe->packet_ptr;
1187 		while (bufs > 1) {
1188 			cvmx_buf_ptr_pki_t next;
1189 			void *nextaddr = cvmx_phys_to_ptr(buffer_next.addr - 8);
1190 
1191 			memcpy(&next, nextaddr, sizeof(next));
1192 			next.u64 = __builtin_bswap64(next.u64);
1193 			memcpy(nextaddr, &next, sizeof(next));
1194 			buffer_next = next;
1195 			bufs--;
1196 		}
1197 		wqe->pki_errata20776 = 1;
1198 	}
1199 }
1200 
1201 /**
1202  * @INTERNAL
1203  *
1204  * Extract the native PKI-specific buffer pointer from WQE.
1205  *
1206  * NOTE: Provisional, may be superceded.
1207  */
1208 static inline cvmx_buf_ptr_pki_t cvmx_wqe_get_pki_pkt_ptr(cvmx_wqe_t *work)
1209 {
1210 	cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1211 
int64_to_float32(int64 a)1212 	if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1213 		cvmx_buf_ptr_pki_t x = { 0 };
1214 		return x;
1215 	}
1216 
1217 	cvmx_wqe_pki_errata_20776(work);
1218 	return wqe->packet_ptr;
1219 }
1220 
1221 /**
1222  * Set the buffer segment count for a packet.
1223  *
1224  * @return Returns the actual resulting value in the WQE fielda
1225  *
1226  */
1227 static inline unsigned int cvmx_wqe_set_bufs(cvmx_wqe_t *work, unsigned int bufs)
1228 {
1229 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1230 		work->word0.pki.bufs = bufs;
1231 		return work->word0.pki.bufs;
1232 	}
1233 
1234 	work->word2.s.bufs = bufs;
1235 	return work->word2.s.bufs;
1236 }
1237 
1238 /**
1239  * Get the offset of Layer-3 header,
1240  * only supported when Layer-3 protocol is IPv4 or IPv6.
1241  *
1242  * @return Returns the offset, or 0 if the offset is not known or unsupported.
1243  *
1244  * FIXME: Assuming word4 is present.
int64_to_float64(int64 a)1245  */
1246 static inline unsigned int cvmx_wqe_get_l3_offset(cvmx_wqe_t *work)
1247 {
1248 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1249 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1250 		/* Match 4 values: IPv4/v6 w/wo options */
1251 		if ((wqe->word2.lc_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)
1252 			return wqe->word4.ptr_layer_c;
1253 	} else {
1254 		return work->word2.s.ip_offset;
1255 	}
1256 
1257 	return 0;
1258 }
1259 
1260 /**
1261  * Set the offset of Layer-3 header in a packet.
1262  * Typically used when an IP packet is generated by software
1263  * or when the Layer-2 header length is modified, and
1264  * a subsequent recalculation of checksums is anticipated.
1265  *
1266  * @return Returns the actual value of the work entry offset field.
1267  *
int64_to_floatx80(int64 a)1268  * FIXME: Assuming word4 is present.
1269  */
1270 static inline unsigned int cvmx_wqe_set_l3_offset(cvmx_wqe_t *work, unsigned int ip_off)
1271 {
1272 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1273 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1274 		/* Match 4 values: IPv4/v6 w/wo options */
1275 		if ((wqe->word2.lc_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)
1276 			wqe->word4.ptr_layer_c = ip_off;
1277 	} else {
1278 		work->word2.s.ip_offset = ip_off;
1279 	}
1280 
1281 	return cvmx_wqe_get_l3_offset(work);
1282 }
1283 
1284 /**
1285  * Set the indication that the packet contains a IPv4 Layer-3 * header.
1286  * Use 'cvmx_wqe_set_l3_ipv6()' if the protocol is IPv6.
1287  * When 'set' is false, the call will result in an indication
1288  * that the Layer-3 protocol is neither IPv4 nor IPv6.
1289  *
1290  * FIXME: Add IPV4_OPT handling based on L3 header length.
1291  */
1292 static inline void cvmx_wqe_set_l3_ipv4(cvmx_wqe_t *work, bool set)
1293 {
1294 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
int64_to_float128(int64 a)1295 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1296 
1297 		if (set)
1298 			wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_IP4;
1299 		else
1300 			wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1301 	} else {
1302 		work->word2.s.not_IP = !set;
1303 		if (set)
1304 			work->word2.s_cn38xx.is_v6 = 0;
1305 	}
1306 }
1307 
1308 /**
1309  * Set packet Layer-3 protocol to IPv6.
1310  *
1311  * FIXME: Add IPV6_OPT handling based on presence of extended headers.
1312  */
1313 static inline void cvmx_wqe_set_l3_ipv6(cvmx_wqe_t *work, bool set)
1314 {
1315 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1316 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1317 
1318 		if (set)
1319 			wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_IP6;
1320 		else
1321 			wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1322 	} else {
1323 		work->word2.s_cn38xx.not_IP = !set;
1324 		if (set)
1325 			work->word2.s_cn38xx.is_v6 = 1;
1326 	}
1327 }
1328 
1329 /**
1330  * Set a packet Layer-4 protocol type to UDP.
1331  */
1332 static inline void cvmx_wqe_set_l4_udp(cvmx_wqe_t *work, bool set)
1333 {
1334 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1335 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
float32_to_int32(float32 a)1336 
1337 		if (set)
1338 			wqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_UDP;
1339 		else
1340 			wqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1341 	} else {
1342 		if (!work->word2.s_cn38xx.not_IP)
1343 			work->word2.s_cn38xx.tcp_or_udp = set;
1344 	}
1345 }
1346 
1347 /**
1348  * Set a packet Layer-4 protocol type to TCP.
1349  */
1350 static inline void cvmx_wqe_set_l4_tcp(cvmx_wqe_t *work, bool set)
1351 {
1352 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1353 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1354 
1355 		if (set)
1356 			wqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_TCP;
1357 		else
1358 			wqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1359 	} else {
1360 		if (!work->word2.s_cn38xx.not_IP)
1361 			work->word2.s_cn38xx.tcp_or_udp = set;
1362 	}
1363 }
1364 
1365 /**
1366  * Set the "software" flag in a work entry.
1367  */
float32_to_int32_round_to_zero(float32 a)1368 static inline void cvmx_wqe_set_soft(cvmx_wqe_t *work, bool set)
1369 {
1370 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1371 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1372 
1373 		wqe->word2.software = set;
1374 	} else {
1375 		work->word2.s.software = set;
1376 	}
1377 }
1378 
1379 /**
1380  * Return true if the packet is an IP fragment.
1381  */
1382 static inline bool cvmx_wqe_is_l3_frag(cvmx_wqe_t *work)
1383 {
1384 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1385 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1386 
1387 		return (wqe->word2.is_frag != 0);
1388 	}
1389 
1390 	if (!work->word2.s_cn38xx.not_IP)
1391 		return (work->word2.s.is_frag != 0);
1392 
1393 	return false;
1394 }
1395 
1396 /**
1397  * Set the indicator that the packet is an fragmented IP packet.
1398  */
1399 static inline void cvmx_wqe_set_l3_frag(cvmx_wqe_t *work, bool set)
1400 {
1401 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1402 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1403 
1404 		wqe->word2.is_frag = set;
1405 	} else {
1406 		if (!work->word2.s_cn38xx.not_IP)
1407 			work->word2.s.is_frag = set;
1408 	}
1409 }
1410 
1411 /**
float32_to_int64(float32 a)1412  * Set the packet Layer-3 protocol to RARP.
1413  */
1414 static inline void cvmx_wqe_set_l3_rarp(cvmx_wqe_t *work, bool set)
1415 {
1416 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1417 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1418 
1419 		if (set)
1420 			wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_RARP;
1421 		else
1422 			wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1423 	} else {
1424 		work->word2.snoip.is_rarp = set;
1425 	}
1426 }
1427 
1428 /**
1429  * Set the packet Layer-3 protocol to ARP.
1430  */
1431 static inline void cvmx_wqe_set_l3_arp(cvmx_wqe_t *work, bool set)
1432 {
1433 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1434 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1435 
1436 		if (set)
1437 			wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_ARP;
1438 		else
1439 			wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1440 	} else {
1441 		work->word2.snoip.is_arp = set;
1442 	}
1443 }
1444 
1445 /**
1446  * Return true if the packet Layer-3 protocol is ARP.
1447  */
1448 static inline bool cvmx_wqe_is_l3_arp(cvmx_wqe_t *work)
float32_to_int64_round_to_zero(float32 a)1449 {
1450 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1451 		cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1452 
1453 		return (wqe->word2.lc_hdr_type == CVMX_PKI_LTYPE_E_ARP);
1454 	}
1455 
1456 	if (work->word2.s_cn38xx.not_IP)
1457 		return (work->word2.snoip.is_arp != 0);
1458 
1459 	return false;
1460 }
1461 
1462 #endif /* __CVMX_WQE_H__ */
1463