xref: /freebsd/sys/dev/axgbe/xgbe-txrx.c (revision 2b8df536)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020 Advanced Micro Devices, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * Contact Information :
28  * Rajesh Kumar <rajesh1.kumar@amd.com>
29  * Shreyank Amartya <Shreyank.Amartya@amd.com>
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 #include "xgbe.h"
35 #include "xgbe-common.h"
36 
37 /*
38  * IFLIB interfaces
39  */
40 static int axgbe_isc_txd_encap(void *, if_pkt_info_t);
41 static void axgbe_isc_txd_flush(void *, uint16_t, qidx_t);
42 static int axgbe_isc_txd_credits_update(void *, uint16_t, bool);
43 static void axgbe_isc_rxd_refill(void *, if_rxd_update_t);
44 static void axgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
45 static int axgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
46 static int axgbe_isc_rxd_pkt_get(void *, if_rxd_info_t);
47 
48 struct if_txrx axgbe_txrx = {
49 	.ift_txd_encap = axgbe_isc_txd_encap,
50 	.ift_txd_flush = axgbe_isc_txd_flush,
51 	.ift_txd_credits_update = axgbe_isc_txd_credits_update,
52 	.ift_rxd_available = axgbe_isc_rxd_available,
53 	.ift_rxd_pkt_get = axgbe_isc_rxd_pkt_get,
54 	.ift_rxd_refill = axgbe_isc_rxd_refill,
55 	.ift_rxd_flush = axgbe_isc_rxd_flush,
56 	.ift_legacy_intr = NULL
57 };
58 
59 static void
xgbe_print_pkt_info(struct xgbe_prv_data * pdata,if_pkt_info_t pi)60 xgbe_print_pkt_info(struct xgbe_prv_data *pdata, if_pkt_info_t pi)
61 {
62 
63 	axgbe_printf(1, "------Packet Info Start------\n");
64 	axgbe_printf(1, "pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
65                pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
66         axgbe_printf(1, "pi new_pidx: %d csum_flags: %x mflags: %x vtag: %d\n",
67                pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_mflags, pi->ipi_vtag);
68         axgbe_printf(1, "pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
69                pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
70         axgbe_printf(1, "pi tcp_hlen: %d tcp_hflags: %x tcp_seq: %d tso_segsz %d\n",
71                pi->ipi_tcp_hlen, pi->ipi_tcp_hflags, pi->ipi_tcp_seq, pi->ipi_tso_segsz);
72 }
73 
74 static bool
axgbe_ctx_desc_setup(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,if_pkt_info_t pi)75 axgbe_ctx_desc_setup(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
76     if_pkt_info_t pi)
77 {
78 	struct xgbe_ring_desc	*rdesc;
79 	struct xgbe_ring_data	*rdata;
80 	bool inc_cur = false;
81 
82 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
83 	rdesc = rdata->rdesc;
84 
85 	axgbe_printf(1, "ipi_tso_segsz %d cur_mss %d idx %d\n",
86 	    pi->ipi_tso_segsz, ring->tx.cur_mss, ring->cur);
87 
88 	axgbe_printf(1, "ipi_vtag 0x%x cur_vlan_ctag 0x%x\n",
89 	    pi->ipi_vtag, ring->tx.cur_vlan_ctag);
90 
91 	if ((pi->ipi_csum_flags & CSUM_TSO) &&
92 	    (pi->ipi_tso_segsz != ring->tx.cur_mss)) {
93 		/*
94 		 * Set TSO maximum segment size
95 		 * Mark as context descriptor
96 		 * Indicate this descriptor contains MSS
97 		 */
98 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
99 		    MSS, pi->ipi_tso_segsz);
100 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
101 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, TCMSSV, 1);
102 		ring->tx.cur_mss = pi->ipi_tso_segsz;
103 		inc_cur = true;
104 	}
105 
106 	if (pi->ipi_vtag && (pi->ipi_vtag != ring->tx.cur_vlan_ctag)) {
107 		/*
108 		 * Mark it as context descriptor
109 		 * Set the VLAN tag
110 		 * Indicate this descriptor contains the VLAN tag
111 		 */
112 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
113 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
114 		    VT, pi->ipi_vtag);
115 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, VLTV, 1);
116 		ring->tx.cur_vlan_ctag = pi->ipi_vtag;
117 		inc_cur = true;
118 	}
119 
120 	return (inc_cur);
121 }
122 
123 static uint16_t
axgbe_calculate_tx_parms(struct xgbe_prv_data * pdata,if_pkt_info_t pi,struct xgbe_packet_data * packet)124 axgbe_calculate_tx_parms(struct xgbe_prv_data *pdata, if_pkt_info_t pi,
125     struct xgbe_packet_data *packet)
126 {
127 	uint32_t tcp_payload_len = 0, bytes = 0;
128 	uint16_t max_len, hlen, payload_len, pkts = 0;
129 
130 	packet->tx_packets = packet->tx_bytes = 0;
131 
132 	hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
133 	if (pi->ipi_csum_flags & CSUM_TSO) {
134 
135 		tcp_payload_len = pi->ipi_len - hlen;
136 		axgbe_printf(1, "%s: ipi_len %x elen %d iplen %d tcplen %d\n",
137 		    __func__, pi->ipi_len, pi->ipi_ehdrlen, pi->ipi_ip_hlen,
138 		    pi->ipi_tcp_hlen);
139 
140 		max_len = if_getmtu(pdata->netdev) + ETH_HLEN;
141 		if (pi->ipi_vtag)
142 			max_len += VLAN_HLEN;
143 
144 		while (tcp_payload_len) {
145 
146 			payload_len = max_len - hlen;
147 			payload_len = min(payload_len, tcp_payload_len);
148 			tcp_payload_len -= payload_len;
149 			pkts++;
150 			bytes += (hlen + payload_len);
151 			axgbe_printf(1, "%s: max_len %d payload_len %d "
152 			    "tcp_len %d\n", __func__, max_len, payload_len,
153 			    tcp_payload_len);
154 		}
155 	} else {
156 		pkts = 1;
157 		bytes = pi->ipi_len;
158 	}
159 
160 	packet->tx_packets = pkts;
161 	packet->tx_bytes = bytes;
162 
163 	axgbe_printf(1, "%s: packets %d bytes %d hlen %d\n", __func__,
164 	    packet->tx_packets, packet->tx_bytes, hlen);
165 
166 	return (hlen);
167 }
168 
169 static int
axgbe_isc_txd_encap(void * arg,if_pkt_info_t pi)170 axgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
171 {
172 	struct axgbe_if_softc	*sc = (struct axgbe_if_softc*)arg;
173 	struct xgbe_prv_data	*pdata = &sc->pdata;
174 	struct xgbe_channel	*channel;
175 	struct xgbe_ring	*ring;
176 	struct xgbe_ring_desc	*rdesc;
177 	struct xgbe_ring_data	*rdata;
178 	struct xgbe_packet_data *packet;
179 	unsigned int cur, start, tx_set_ic;
180 	uint16_t offset, hlen, datalen, tcp_payload_len = 0;
181 	int cur_seg = 0;
182 
183 	xgbe_print_pkt_info(pdata, pi);
184 
185 	channel = pdata->channel[pi->ipi_qsidx];
186 	ring = channel->tx_ring;
187 	packet = &ring->packet_data;
188 	cur = start = ring->cur;
189 
190 	axgbe_printf(1, "--> %s: txq %d cur %d dirty %d\n",
191 	    __func__, pi->ipi_qsidx, ring->cur, ring->dirty);
192 
193 	MPASS(pi->ipi_len != 0);
194 	if (__predict_false(pi->ipi_len == 0)) {
195 		axgbe_error("empty packet received from stack\n");
196 		return (0);
197 	}
198 
199 	MPASS(ring->cur == pi->ipi_pidx);
200 	if (__predict_false(ring->cur != pi->ipi_pidx)) {
201 		axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__,
202 		    ring->cur, pi->ipi_pidx);
203 	}
204 
205 	/* Determine if an interrupt should be generated for this Tx:
206 	 *   Interrupt:
207 	 *     - Tx frame count exceeds the frame count setting
208 	 *     - Addition of Tx frame count to the frame count since the
209 	 *       last interrupt was set exceeds the frame count setting
210 	 *   No interrupt:
211 	 *     - No frame count setting specified (ethtool -C ethX tx-frames 0)
212 	 *     - Addition of Tx frame count to the frame count since the
213 	 *       last interrupt was set does not exceed the frame count setting
214 	 */
215 	memset(packet, 0, sizeof(*packet));
216 	hlen = axgbe_calculate_tx_parms(pdata, pi, packet);
217 	axgbe_printf(1, "%s: ipi_len %d tx_pkts %d tx_bytes %d hlen %d\n",
218 	    __func__, pi->ipi_len, packet->tx_packets, packet->tx_bytes, hlen);
219 
220 	ring->coalesce_count += packet->tx_packets;
221 	if (!pdata->tx_frames)
222 		tx_set_ic = 0;
223 	else if (packet->tx_packets > pdata->tx_frames)
224 		tx_set_ic = 1;
225 	else if ((ring->coalesce_count % pdata->tx_frames) < (packet->tx_packets))
226 		tx_set_ic = 1;
227 	else
228 		tx_set_ic = 0;
229 
230 	/* Add Context descriptor if needed (for TSO, VLAN cases) */
231 	if (axgbe_ctx_desc_setup(pdata, ring, pi))
232 		cur++;
233 
234 	rdata = XGBE_GET_DESC_DATA(ring, cur);
235 	rdesc = rdata->rdesc;
236 
237 	axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
238 	    "ipi_len 0x%x\n", __func__, cur,
239 	    lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
240 	    upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
241 	    (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
242 
243 	/* Update buffer address (for TSO this is the header) */
244 	rdesc->desc0 = cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr));
245 	rdesc->desc1 = cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr));
246 
247 	/* Update the buffer length */
248 	if (hlen == 0)
249 		hlen = pi->ipi_segs[cur_seg].ds_len;
250 	XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, hlen);
251 
252 	/* VLAN tag insertion check */
253 	if (pi->ipi_vtag) {
254 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
255 		    TX_NORMAL_DESC2_VLAN_INSERT);
256 	}
257 
258 	/* Mark it as First Descriptor */
259 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
260 
261 	/* Mark it as a NORMAL descriptor */
262 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
263 
264 	/*
265 	 * Set the OWN bit if this is not the first descriptor. For first
266 	 * descriptor, OWN bit will be set at last so that hardware will
267 	 * process the descriptors only after the OWN bit for the first
268 	 * descriptor is set
269 	 */
270 	if (cur != start)
271 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
272 
273 	if (pi->ipi_csum_flags & CSUM_TSO) {
274 		/* Enable TSO */
275 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
276 
277 		tcp_payload_len = pi->ipi_len - hlen;
278 
279 		/* Set TCP payload length*/
280 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
281 		    tcp_payload_len);
282 
283 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
284 		    pi->ipi_tcp_hlen/4);
285 
286 		axgbe_printf(1, "tcp_payload %d tcp_hlen %d\n", tcp_payload_len,
287 		    pi->ipi_tcp_hlen/4);
288 	} else {
289 		/* Enable CRC and Pad Insertion */
290 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
291 
292 		/* Enable HW CSUM*/
293 		if (pi->ipi_csum_flags)
294 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
295 
296 		/* Set total length to be transmitted */
297 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, pi->ipi_len);
298 	}
299 
300 	cur++;
301 
302 	for (cur_seg = 0 ; cur_seg < pi->ipi_nsegs ; cur_seg++) {
303 
304 		if (cur_seg == 0) {
305 			offset = hlen;
306 			datalen = pi->ipi_segs[cur_seg].ds_len - hlen;
307 		} else {
308 			offset = 0;
309 			datalen = pi->ipi_segs[cur_seg].ds_len;
310 		}
311 
312 		if (datalen) {
313 			rdata = XGBE_GET_DESC_DATA(ring, cur);
314 			rdesc = rdata->rdesc;
315 
316 
317 			/* Update buffer address */
318 			rdesc->desc0 =
319 			    cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
320 			rdesc->desc1 =
321 			    cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
322 
323 			/* Update the buffer length */
324 			XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, datalen);
325 
326 			/* Set OWN bit */
327 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
328 
329 			/* Mark it as NORMAL descriptor */
330 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
331 
332 			/* Enable HW CSUM*/
333 			if (pi->ipi_csum_flags)
334 				XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
335 
336 			axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
337 			    "ipi_len 0x%x\n", __func__, cur,
338 			    lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
339 			    upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
340 			    (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
341 
342 			cur++;
343 		}
344 	}
345 
346 	/* Set LAST bit for the last descriptor */
347 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
348 
349 	/* Set IC bit based on Tx coalescing settings */
350 	if (tx_set_ic)
351 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
352 
353 	wmb();
354 
355 	/* Set OWN bit for the first descriptor */
356 	rdata = XGBE_GET_DESC_DATA(ring, start);
357 	rdesc = rdata->rdesc;
358 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
359 
360 	ring->cur = pi->ipi_new_pidx = (cur & (ring->rdesc_count - 1));
361 
362 	axgbe_printf(1, "<-- %s: end cur %d dirty %d\n", __func__, ring->cur,
363 	    ring->dirty);
364 
365 	return (0);
366 }
367 
368 static void
axgbe_isc_txd_flush(void * arg,uint16_t txqid,qidx_t pidx)369 axgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
370 {
371 	struct axgbe_if_softc	*sc = (struct axgbe_if_softc*)arg;
372 	struct xgbe_prv_data	*pdata = &sc->pdata;
373 	struct xgbe_channel	*channel = pdata->channel[txqid];
374 	struct xgbe_ring	*ring = channel->tx_ring;
375 	struct xgbe_ring_data	*rdata = XGBE_GET_DESC_DATA(ring, pidx);
376 
377 	axgbe_printf(1, "--> %s: flush txq %d pidx %d cur %d dirty %d\n",
378 	    __func__, txqid, pidx, ring->cur, ring->dirty);
379 
380 	/* Ring Doorbell */
381 	XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
382 	    lower_32_bits(rdata->rdata_paddr));
383 }
384 
385 static int
axgbe_isc_txd_credits_update(void * arg,uint16_t txqid,bool clear)386 axgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
387 {
388 	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
389 	struct xgbe_hw_if	*hw_if = &sc->pdata.hw_if;
390 	struct xgbe_prv_data	*pdata = &sc->pdata;
391 	struct xgbe_channel     *channel = pdata->channel[txqid];
392 	struct xgbe_ring	*ring = channel->tx_ring;
393 	struct xgbe_ring_data	*rdata;
394 	int processed = 0;
395 
396 	axgbe_printf(1, "%s: txq %d clear %d cur %d dirty %d\n",
397 	    __func__, txqid, clear, ring->cur, ring->dirty);
398 
399 	if (__predict_false(ring->cur == ring->dirty)) {
400 		axgbe_printf(1, "<-- %s: cur(%d) equals dirty(%d)\n",
401 		    __func__, ring->cur, ring->dirty);
402 		return (0);
403 	}
404 
405 	/* Check whether the first dirty descriptor is Tx complete */
406 	rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
407 	if (!hw_if->tx_complete(rdata->rdesc)) {
408 		axgbe_printf(1, "<-- %s: (dirty %d)\n", __func__, ring->dirty);
409 		return (0);
410 	}
411 
412 	/*
413 	 * If clear is false just let the caller know that there
414 	 * are descriptors to reclaim
415 	 */
416 	if (!clear) {
417 		axgbe_printf(1, "<-- %s: (!clear)\n", __func__);
418 		return (1);
419 	}
420 
421 	do {
422 		hw_if->tx_desc_reset(rdata);
423 		processed++;
424 		ring->dirty = (ring->dirty + 1) & (ring->rdesc_count - 1);
425 
426 		/*
427 		 * tx_complete will return true for unused descriptors also.
428 		 * so, check tx_complete only until used descriptors.
429 		 */
430 		if (ring->cur == ring->dirty)
431 			break;
432 
433 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
434 	} while (hw_if->tx_complete(rdata->rdesc));
435 
436 	axgbe_printf(1, "<-- %s: processed %d cur %d dirty %d\n", __func__,
437 	    processed, ring->cur, ring->dirty);
438 
439 	return (processed);
440 }
441 
442 static void
axgbe_isc_rxd_refill(void * arg,if_rxd_update_t iru)443 axgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
444 {
445  	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
446 	struct xgbe_prv_data	*pdata = &sc->pdata;
447 	struct xgbe_channel     *channel = pdata->channel[iru->iru_qsidx];
448 	struct xgbe_ring	*ring = channel->rx_ring;
449 	struct xgbe_ring_data	*rdata;
450 	struct xgbe_ring_desc	*rdesc;
451 	unsigned int rx_usecs = pdata->rx_usecs;
452 	unsigned int rx_frames = pdata->rx_frames;
453 	unsigned int inte;
454 	uint8_t	count = iru->iru_count;
455 	int i, j;
456 	bool config_intr = false;
457 
458 	axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d count %d ring cur %d "
459 	    "dirty %d\n", __func__, iru->iru_qsidx, iru->iru_flidx,
460 	    iru->iru_pidx, count, ring->cur, ring->dirty);
461 
462 	for (i = iru->iru_pidx, j = 0 ; j < count ; i++, j++) {
463 
464 		if (i == sc->scctx->isc_nrxd[0])
465 			i = 0;
466 
467 		rdata = XGBE_GET_DESC_DATA(ring, i);
468 		rdesc = rdata->rdesc;
469 
470 		if (__predict_false(XGMAC_GET_BITS_LE(rdesc->desc3,
471 		    RX_NORMAL_DESC3, OWN))) {
472 			axgbe_error("%s: refill clash, cur %d dirty %d index %d"
473 			    "pidx %d\n", __func__, ring->cur, ring->dirty, j, i);
474 		}
475 
476 		if (pdata->sph_enable) {
477 			if (iru->iru_flidx == 0) {
478 
479 				/* Fill header/buffer1 address */
480 				rdesc->desc0 =
481 				    cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
482 				rdesc->desc1 =
483 				    cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
484 			} else {
485 
486 				/* Fill data/buffer2 address */
487 				rdesc->desc2 =
488 				    cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
489 				rdesc->desc3 =
490 				    cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
491 
492 				config_intr = true;
493 			}
494 		} else {
495 			/* Fill header/buffer1 address */
496 			rdesc->desc0 = rdesc->desc2 =
497 			    cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
498 			rdesc->desc1 = rdesc->desc3 =
499 			    cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
500 
501 			config_intr = true;
502 		}
503 
504 		if (config_intr) {
505 
506 			if (!rx_usecs && !rx_frames) {
507 				/* No coalescing, interrupt for every descriptor */
508 				inte = 1;
509 			} else {
510 				/* Set interrupt based on Rx frame coalescing setting */
511 				if (rx_frames && !((ring->dirty + 1) % rx_frames))
512 					inte = 1;
513 				else
514 					inte = 0;
515 			}
516 
517 			XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
518 
519 			XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
520 
521 			wmb();
522 
523 			ring->dirty = ((ring->dirty + 1) & (ring->rdesc_count - 1));
524 
525 			config_intr = false;
526 		}
527 	}
528 
529 	axgbe_printf(1, "<-- %s: rxq: %d cur: %d dirty: %d\n", __func__,
530 	    channel->queue_index, ring->cur, ring->dirty);
531 }
532 
533 static void
axgbe_isc_rxd_flush(void * arg,uint16_t qsidx,uint8_t flidx,qidx_t pidx)534 axgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx, qidx_t pidx)
535 {
536  	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
537 	struct xgbe_prv_data	*pdata = &sc->pdata;
538 	struct xgbe_channel     *channel = pdata->channel[qsidx];
539 	struct xgbe_ring	*ring = channel->rx_ring;
540 	struct xgbe_ring_data 	*rdata;
541 
542 	axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d cur %d dirty %d\n",
543 	    __func__, qsidx, flidx, pidx, ring->cur, ring->dirty);
544 
545 	rdata = XGBE_GET_DESC_DATA(ring, pidx);
546 
547 	/*
548 	 * update RX descriptor tail pointer in hardware to indicate
549 	 * that new buffers are present in the allocated memory region
550 	 */
551 	if (!pdata->sph_enable || flidx == 1) {
552 		XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
553 		    lower_32_bits(rdata->rdata_paddr));
554 	}
555 }
556 
557 static int
axgbe_isc_rxd_available(void * arg,uint16_t qsidx,qidx_t idx,qidx_t budget)558 axgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t idx, qidx_t budget)
559 {
560 	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
561 	struct xgbe_prv_data 	*pdata = &sc->pdata;
562 	struct xgbe_channel     *channel = pdata->channel[qsidx];
563 	struct xgbe_ring	*ring = channel->rx_ring;
564 	struct xgbe_ring_data   *rdata;
565 	struct xgbe_ring_desc   *rdesc;
566 	unsigned int cur;
567 	int count = 0;
568 	uint8_t incomplete = 1, context_next = 0, running = 0;
569 
570 	axgbe_printf(1, "--> %s: rxq %d idx %d budget %d cur %d dirty %d\n",
571 	    __func__, qsidx, idx, budget, ring->cur, ring->dirty);
572 
573 	if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state))) {
574 		axgbe_printf(0, "%s: Polling when XGBE_DOWN\n", __func__);
575 		return (count);
576 	}
577 
578 	cur = ring->cur;
579 	for (count = 0; count <= budget; ) {
580 
581 		rdata = XGBE_GET_DESC_DATA(ring, cur);
582 		rdesc = rdata->rdesc;
583 
584 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
585 			break;
586 
587 		running = 1;
588 
589 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
590 			incomplete = 0;
591 
592 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
593 			context_next = 1;
594 
595 		if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT))
596 			context_next = 0;
597 
598 		cur = (cur + 1) & (ring->rdesc_count - 1);
599 
600 		if (incomplete || context_next)
601 			continue;
602 
603 		/* Increment pkt count & reset variables for next full packet */
604 		count++;
605 		incomplete = 1;
606 		context_next = 0;
607 		running = 0;
608 	}
609 
610 	axgbe_printf(1, "--> %s: rxq %d cur %d incomp %d con_next %d running %d "
611 	    "count %d\n", __func__, qsidx, cur, incomplete, context_next,
612 	    running, count);
613 
614 	return (count);
615 }
616 
617 static unsigned int
xgbe_rx_buf1_len(struct xgbe_prv_data * pdata,struct xgbe_ring_data * rdata,struct xgbe_packet_data * packet)618 xgbe_rx_buf1_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata,
619     struct xgbe_packet_data *packet)
620 {
621 	unsigned int ret = 0;
622 
623 	if (pdata->sph_enable) {
624 		/* Always zero if not the first descriptor */
625 		if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) {
626 			axgbe_printf(1, "%s: Not First\n", __func__);
627 			return (0);
628 		}
629 	}
630 
631 	/* First descriptor with split header, return header length */
632 	if (rdata->rx.hdr_len) {
633 		axgbe_printf(1, "%s: hdr_len %d\n", __func__, rdata->rx.hdr_len);
634 		return (rdata->rx.hdr_len);
635 	}
636 
637 	/* First descriptor but not the last descriptor and no split header,
638 	 * so the full buffer was used, 256 represents the hardcoded value of
639 	 * a max header split defined in the hardware
640 	 */
641 	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
642 		axgbe_printf(1, "%s: Not last %d\n", __func__,
643 		    pdata->rx_buf_size);
644 		if (pdata->sph_enable) {
645 			return (256);
646 		} else {
647 			return (pdata->rx_buf_size);
648 		}
649 	}
650 
651 	/* First descriptor and last descriptor and no split header, so
652 	 * calculate how much of the buffer was used, we can return the
653 	 * segment length or the remaining bytes of the packet
654 	 */
655 	axgbe_printf(1, "%s: pkt_len %d buf_size %d\n", __func__, rdata->rx.len,
656 	    pdata->rx_buf_size);
657 
658 	if (pdata->sph_enable) {
659 		ret = min_t(unsigned int, 256, rdata->rx.len);
660 	} else {
661 		ret = rdata->rx.len;
662 	}
663 
664 	return (ret);
665 }
666 
667 static unsigned int
xgbe_rx_buf2_len(struct xgbe_prv_data * pdata,struct xgbe_ring_data * rdata,struct xgbe_packet_data * packet,unsigned int len)668 xgbe_rx_buf2_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata,
669     struct xgbe_packet_data *packet, unsigned int len)
670 {
671 
672 	/* Always the full buffer if not the last descriptor */
673 	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
674 		axgbe_printf(1, "%s: Not last %d\n", __func__, pdata->rx_buf_size);
675 		return (pdata->rx_buf_size);
676 	}
677 
678 	/* Last descriptor so calculate how much of the buffer was used
679 	 * for the last bit of data
680 	 */
681 	return ((rdata->rx.len != 0)? (rdata->rx.len - len) : 0);
682 }
683 
684 static inline void
axgbe_add_frag(struct xgbe_prv_data * pdata,if_rxd_info_t ri,int idx,int len,int pos,int flid)685 axgbe_add_frag(struct xgbe_prv_data *pdata, if_rxd_info_t ri, int idx, int len,
686     int pos, int flid)
687 {
688 	axgbe_printf(2, "idx %d len %d pos %d flid %d\n", idx, len, pos, flid);
689 	ri->iri_frags[pos].irf_flid = flid;
690 	ri->iri_frags[pos].irf_idx = idx;
691 	ri->iri_frags[pos].irf_len = len;
692 }
693 
694 static int
axgbe_isc_rxd_pkt_get(void * arg,if_rxd_info_t ri)695 axgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
696 {
697  	struct axgbe_if_softc   *sc = (struct axgbe_if_softc*)arg;
698 	struct xgbe_prv_data 	*pdata = &sc->pdata;
699 	struct xgbe_hw_if	*hw_if = &pdata->hw_if;
700 	struct xgbe_channel     *channel = pdata->channel[ri->iri_qsidx];
701 	struct xgbe_ring	*ring = channel->rx_ring;
702 	struct xgbe_packet_data *packet = &ring->packet_data;
703 	struct xgbe_ring_data	*rdata;
704 	unsigned int last, context_next, context;
705 	unsigned int buf1_len, buf2_len, len = 0, prev_cur;
706 	int i = 0;
707 
708 	axgbe_printf(2, "%s: rxq %d cidx %d cur %d dirty %d\n", __func__,
709 	    ri->iri_qsidx, ri->iri_cidx, ring->cur, ring->dirty);
710 
711 	memset(packet, 0, sizeof(struct xgbe_packet_data));
712 
713 	while (1) {
714 
715 read_again:
716 		if (hw_if->dev_read(channel)) {
717 			axgbe_printf(2, "<-- %s: OWN bit seen on %d\n",
718 		    	    __func__, ring->cur);
719 			break;
720 		}
721 
722 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
723 		prev_cur = ring->cur;
724 		ring->cur = (ring->cur + 1) & (ring->rdesc_count - 1);
725 
726 		last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
727 		    LAST);
728 
729 		context_next = XGMAC_GET_BITS(packet->attributes,
730 		    RX_PACKET_ATTRIBUTES, CONTEXT_NEXT);
731 
732 		context = XGMAC_GET_BITS(packet->attributes,
733 		    RX_PACKET_ATTRIBUTES, CONTEXT);
734 
735 		if (!context) {
736 			/* Get the data length in the descriptor buffers */
737 			buf1_len = xgbe_rx_buf1_len(pdata, rdata, packet);
738 			len += buf1_len;
739 			if (pdata->sph_enable) {
740 				buf2_len = xgbe_rx_buf2_len(pdata, rdata, packet, len);
741 				len += buf2_len;
742 			}
743 		} else
744 			buf1_len = buf2_len = 0;
745 
746 		if (packet->errors)
747 			axgbe_printf(1, "%s: last %d context %d con_next %d buf1 %d "
748 			    "buf2 %d len %d frags %d error %d\n", __func__, last, context,
749 			    context_next, buf1_len, buf2_len, len, i, packet->errors);
750 
751 		axgbe_add_frag(pdata, ri, prev_cur, buf1_len, i, 0);
752 		i++;
753 		if (pdata->sph_enable) {
754 			axgbe_add_frag(pdata, ri, prev_cur, buf2_len, i, 1);
755 			i++;
756 		}
757 
758 		if (!last || context_next)
759 			goto read_again;
760 
761 		break;
762 	}
763 
764 	if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE)) {
765 		ri->iri_csum_flags |= CSUM_IP_CHECKED;
766 		ri->iri_csum_flags |= CSUM_IP_VALID;
767 		axgbe_printf(2, "%s: csum flags 0x%x\n", __func__, ri->iri_csum_flags);
768 	}
769 
770 	if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) {
771 		ri->iri_flags |= M_VLANTAG;
772 		ri->iri_vtag = packet->vlan_ctag;
773 		axgbe_printf(2, "%s: iri_flags 0x%x vtag 0x%x\n", __func__,
774 		    ri->iri_flags, ri->iri_vtag);
775 	}
776 
777 
778 	if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH)) {
779 		ri->iri_flowid = packet->rss_hash;
780 		ri->iri_rsstype = packet->rss_hash_type;
781 		axgbe_printf(2, "%s: hash 0x%x/0x%x rsstype 0x%x/0x%x\n",
782 		    __func__, packet->rss_hash, ri->iri_flowid,
783 		    packet->rss_hash_type, ri->iri_rsstype);
784 	}
785 
786 	if (__predict_false(len == 0))
787 		axgbe_printf(1, "%s: Discarding Zero len packet\n", __func__);
788 
789 	if (__predict_false(packet->errors))
790 		axgbe_printf(1, "<-- %s: rxq: %d len: %d frags: %d cidx %d cur: %d "
791 		    "dirty: %d error 0x%x\n", __func__, ri->iri_qsidx, len, i,
792 		    ri->iri_cidx, ring->cur, ring->dirty, packet->errors);
793 
794 	axgbe_printf(1, "%s: Packet len %d frags %d\n", __func__, len, i);
795 
796 	ri->iri_len = len;
797 	ri->iri_nfrags = i;
798 
799 	return (0);
800 }
801