xref: /dragonfly/sys/dev/netif/oce/oce_queue.c (revision 7d84b73d)
1 /*-
2  * Copyright (C) 2013 Emulex
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the Emulex Corporation nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Contact Information:
32  * freebsd-drivers@emulex.com
33  *
34  * Emulex
35  * 3333 Susan Street
36  * Costa Mesa, CA 92626
37  */
38 
39 
40 
41 /* $FreeBSD: src/sys/dev/oce/oce_queue.c,v 1.5 2013/07/07 00:30:13 svnexp Exp $ */
42 
43 
44 #include "oce_if.h"
45 
46 /*****************************************************
47  * local queue functions
48  *****************************************************/
49 
50 static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
51 				  uint32_t q_len, uint32_t wq_type);
52 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
53 static void oce_wq_free(struct oce_wq *wq);
54 static void oce_wq_del(struct oce_wq *wq);
55 static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
56 				  uint32_t q_len,
57 				  uint32_t frag_size,
58 				  uint32_t mtu, uint32_t rss);
59 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
60 static void oce_rq_free(struct oce_rq *rq);
61 static void oce_rq_del(struct oce_rq *rq);
62 static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
63 				    uint32_t q_len,
64 				    uint32_t item_size,
65 				    uint32_t eq_delay,
66 				    uint32_t vector);
67 static void oce_eq_del(struct oce_eq *eq);
68 static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
69 				    struct oce_eq *eq, uint32_t q_len);
70 static void oce_mq_free(struct oce_mq *mq);
71 static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
72 			 *mbx, size_t req_size, enum qtype qtype);
73 struct oce_cq *oce_cq_create(POCE_SOFTC sc,
74 			     struct oce_eq *eq,
75 			     uint32_t q_len,
76 			     uint32_t item_size,
77 			     uint32_t sol_event,
78 			     uint32_t is_eventable,
79 			     uint32_t nodelay, uint32_t ncoalesce);
80 static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
81 
82 
83 
84 /**
85  * @brief	Create and initialize all the queues on the board
86  * @param sc	software handle to the device
87  * @returns 0	if successful, or error
88  **/
89 int
90 oce_queue_init_all(POCE_SOFTC sc)
91 {
92 	int rc = 0, i, vector;
93 	struct oce_wq *wq;
94 	struct oce_rq *rq;
95 	struct oce_aic_obj *aic;
96 
97 	/* alloc TX/RX queues */
98 	for_all_wq_queues(sc, wq, i) {
99 		sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
100 					 NIC_WQ_TYPE_STANDARD);
101 		if (!sc->wq[i])
102 			goto error;
103 
104 	}
105 
106 	for_all_rq_queues(sc, rq, i) {
107 		sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
108 					OCE_MAX_JUMBO_FRAME_SIZE,
109 					(i == 0) ? 0 : is_rss_enabled(sc));
110 		if (!sc->rq[i])
111 			goto error;
112 	}
113 
114 	/* Create network interface on card */
115 	if (oce_create_nw_interface(sc))
116 		goto error;
117 
118 	/* create all of the event queues */
119 	for (vector = 0; vector < sc->intr_count; vector++) {
120 		/* setup aic defaults for each event queue */
121 		aic = &sc->aic_obj[vector];
122 		aic->max_eqd = OCE_MAX_EQD;
123 		aic->min_eqd = OCE_MIN_EQD;
124 		aic->et_eqd = OCE_MIN_EQD;
125 		aic->enable = TRUE;
126 
127 		sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
128 						 0, vector);
129 		if (!sc->eq[vector])
130 			goto error;
131 	}
132 
133 	/* create Tx, Rx and mcc queues */
134 	for_all_wq_queues(sc, wq, i) {
135 		rc = oce_wq_create(wq, sc->eq[i]);
136 		if (rc)
137 			goto error;
138 		wq->queue_index = i;
139 		TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
140 	}
141 
142 	for_all_rq_queues(sc, rq, i) {
143 		rc = oce_rq_create(rq, sc->if_id,
144 					sc->eq[(i == 0) ? 0:(i-1)]);
145 		if (rc)
146 			goto error;
147 		rq->queue_index = i;
148 	}
149 
150 	sc->mq = oce_mq_create(sc, sc->eq[0], 64);
151 	if (!sc->mq)
152 		goto error;
153 
154 	return rc;
155 
156 error:
157 	oce_queue_release_all(sc);
158 	return 1;
159 }
160 
161 
162 
163 /**
164  * @brief Releases all mailbox queues created
165  * @param sc		software handle to the device
166  */
167 void
168 oce_queue_release_all(POCE_SOFTC sc)
169 {
170 	int i = 0;
171 	struct oce_wq *wq;
172 	struct oce_rq *rq;
173 	struct oce_eq *eq;
174 
175 	for_all_rq_queues(sc, rq, i) {
176 		if (rq) {
177 			oce_rq_del(sc->rq[i]);
178 			oce_rq_free(sc->rq[i]);
179 		}
180 	}
181 
182 	for_all_wq_queues(sc, wq, i) {
183 		if (wq) {
184 			oce_wq_del(sc->wq[i]);
185 			oce_wq_free(sc->wq[i]);
186 		}
187 	}
188 
189 	if (sc->mq)
190 		oce_mq_free(sc->mq);
191 
192 	for_all_evnt_queues(sc, eq, i) {
193 		if (eq)
194 			oce_eq_del(sc->eq[i]);
195 	}
196 }
197 
198 
199 
200 /**
201  * @brief 		Function to create a WQ for NIC Tx
202  * @param sc 		software handle to the device
203  * @param qlen		number of entries in the queue
204  * @param wq_type	work queue type
205  * @returns		the pointer to the WQ created or NULL on failure
206  */
207 static struct
208 oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
209 {
210 	struct oce_wq *wq;
211 	int rc = 0, i;
212 
213 	/* q_len must be min 256 and max 2k */
214 	if (q_len < 256 || q_len > 2048) {
215 		device_printf(sc->dev,
216 			  "Invalid q length. Must be "
217 			  "[256, 2000]: 0x%x\n", q_len);
218 		return NULL;
219 	}
220 
221 	/* allocate wq */
222 	wq = kmalloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
223 	if (!wq)
224 		return NULL;
225 
226 	/* Set the wq config */
227 	wq->cfg.q_len = q_len;
228 	wq->cfg.wq_type = (uint8_t) wq_type;
229 	wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
230 	wq->cfg.nbufs = 2 * wq->cfg.q_len;
231 	wq->cfg.nhdl = 2 * wq->cfg.q_len;
232 
233 	wq->parent = (void *)sc;
234 
235 	rc = bus_dma_tag_create(NULL,
236 				1, 0,
237 				BUS_SPACE_MAXADDR,
238 				BUS_SPACE_MAXADDR,
239 				OCE_MAX_TX_SIZE,
240 				OCE_MAX_TX_ELEMENTS,
241 				PAGE_SIZE, 0, &wq->tag);
242 
243 	if (rc)
244 		goto free_wq;
245 
246 
247 	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
248 		rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
249 		if (rc)
250 			goto free_wq;
251 	}
252 
253 	wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
254 	if (!wq->ring)
255 		goto free_wq;
256 
257 
258 	LOCK_CREATE(&wq->tx_lock, "TX_lock");
259 
260 #if 0 /* XXX swildner: MULTIQUEUE */
261 	/* Allocate buf ring for multiqueue*/
262 	wq->br = buf_ring_alloc(4096, M_DEVBUF,
263 			M_WAITOK, &wq->tx_lock.mutex);
264 	if (!wq->br)
265 		goto free_wq;
266 #endif
267 	return wq;
268 
269 
270 free_wq:
271 	device_printf(sc->dev, "Create WQ failed\n");
272 	oce_wq_free(wq);
273 	return NULL;
274 }
275 
276 
277 
278 /**
279  * @brief 		Frees the work queue
280  * @param wq		pointer to work queue to free
281  */
282 static void
283 oce_wq_free(struct oce_wq *wq)
284 {
285 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
286 	int i;
287 
288 	taskqueue_drain(taskqueue_swi, &wq->txtask);
289 
290 	if (wq->ring != NULL) {
291 		oce_destroy_ring_buffer(sc, wq->ring);
292 		wq->ring = NULL;
293 	}
294 
295 	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
296 		if (wq->pckts[i].map != NULL) {
297 			bus_dmamap_unload(wq->tag, wq->pckts[i].map);
298 			bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
299 			wq->pckts[i].map = NULL;
300 		}
301 	}
302 
303 	if (wq->tag != NULL)
304 		bus_dma_tag_destroy(wq->tag);
305 #if 0 /* XXX swildner: MULTIQUEUE */
306 	if (wq->br != NULL)
307 		buf_ring_free(wq->br, M_DEVBUF);
308 #endif
309 
310 	LOCK_DESTROY(&wq->tx_lock);
311 	kfree(wq, M_DEVBUF);
312 }
313 
314 
315 
316 /**
317  * @brief 		Create a work queue
318  * @param wq		pointer to work queue
319  * @param eq		pointer to associated event queue
320  */
321 static int
322 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
323 {
324 	POCE_SOFTC sc = wq->parent;
325 	struct oce_cq *cq;
326 	int rc = 0;
327 
328 	/* create the CQ */
329 	cq = oce_cq_create(sc,
330 			   eq,
331 			   CQ_LEN_1024,
332 			   sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
333 	if (!cq)
334 		return ENXIO;
335 
336 
337 	wq->cq = cq;
338 
339 	rc = oce_mbox_create_wq(wq);
340 	if (rc)
341 		goto error;
342 
343 	wq->qstate = QCREATED;
344 	wq->wq_free = wq->cfg.q_len;
345 	wq->ring->cidx = 0;
346 	wq->ring->pidx = 0;
347 
348 	eq->cq[eq->cq_valid] = cq;
349 	eq->cq_valid++;
350 	cq->cb_arg = wq;
351 	cq->cq_handler = oce_wq_handler;
352 
353 	return 0;
354 
355 error:
356 	device_printf(sc->dev, "WQ create failed\n");
357 	oce_wq_del(wq);
358 	return rc;
359 }
360 
361 
362 
363 
364 /**
365  * @brief 		Delete a work queue
366  * @param wq		pointer to work queue
367  */
368 static void
369 oce_wq_del(struct oce_wq *wq)
370 {
371 	struct oce_mbx mbx;
372 	struct mbx_delete_nic_wq *fwcmd;
373 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
374 
375 	if (wq->qstate == QCREATED) {
376 		bzero(&mbx, sizeof(struct oce_mbx));
377 		/* now fill the command */
378 		fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
379 		fwcmd->params.req.wq_id = wq->wq_id;
380 		(void)oce_destroy_q(sc, &mbx,
381 				sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
382 		wq->qstate = QDELETED;
383 	}
384 
385 	if (wq->cq != NULL) {
386 		oce_cq_del(sc, wq->cq);
387 		wq->cq = NULL;
388 	}
389 }
390 
391 
392 
393 /**
394  * @brief 		function to allocate receive queue resources
395  * @param sc		software handle to the device
396  * @param q_len		length of receive queue
397  * @param frag_size	size of an receive queue fragment
398  * @param mtu		maximum transmission unit
399  * @param rss		is-rss-queue flag
400  * @returns		the pointer to the RQ created or NULL on failure
401  */
402 static struct
403 oce_rq *oce_rq_init(POCE_SOFTC sc,
404 				  uint32_t q_len,
405 				  uint32_t frag_size,
406 				  uint32_t mtu, uint32_t rss)
407 {
408 	struct oce_rq *rq;
409 	int rc = 0, i;
410 
411 	if (OCE_LOG2(frag_size) <= 0)
412 		return NULL;
413 
414 	if ((q_len == 0) || (q_len > 1024))
415 		return NULL;
416 
417 	/* allocate the rq */
418 	rq = kmalloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
419 	if (!rq)
420 		return NULL;
421 
422 
423 	rq->cfg.q_len = q_len;
424 	rq->cfg.frag_size = frag_size;
425 	rq->cfg.mtu = mtu;
426 	rq->cfg.eqd = 0;
427 #if 0 /* XXX swildner: LRO */
428 	rq->lro_pkts_queued = 0;
429 #endif
430 	rq->cfg.is_rss_queue = rss;
431 	rq->packets_in = 0;
432         rq->packets_out = 0;
433         rq->pending = 0;
434 
435 	rq->parent = (void *)sc;
436 
437 	rc = bus_dma_tag_create(NULL,
438 				1, 0,
439 				BUS_SPACE_MAXADDR,
440 				BUS_SPACE_MAXADDR,
441 				OCE_MAX_RX_SIZE,
442 				1, PAGE_SIZE, 0, &rq->tag);
443 
444 	if (rc)
445 		goto free_rq;
446 
447 	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
448 		rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
449 		if (rc)
450 			goto free_rq;
451 	}
452 
453 	/* create the ring buffer */
454 	rq->ring = oce_create_ring_buffer(sc, q_len,
455 				 sizeof(struct oce_nic_rqe));
456 	if (!rq->ring)
457 		goto free_rq;
458 
459 	LOCK_CREATE(&rq->rx_lock, "RX_lock");
460 
461 	return rq;
462 
463 free_rq:
464 	device_printf(sc->dev, "Create RQ failed\n");
465 	oce_rq_free(rq);
466 	return NULL;
467 }
468 
469 
470 
471 
472 /**
473  * @brief 		Free a receive queue
474  * @param rq		pointer to receive queue
475  */
476 static void
477 oce_rq_free(struct oce_rq *rq)
478 {
479 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
480 	int i = 0 ;
481 
482 	if (rq->ring != NULL) {
483 		oce_destroy_ring_buffer(sc, rq->ring);
484 		rq->ring = NULL;
485 	}
486 	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
487 		if (rq->pckts[i].map != NULL) {
488 			bus_dmamap_unload(rq->tag, rq->pckts[i].map);
489 			bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
490 			rq->pckts[i].map = NULL;
491 		}
492 		if (rq->pckts[i].mbuf) {
493 			m_free(rq->pckts[i].mbuf);
494 			rq->pckts[i].mbuf = NULL;
495 		}
496 	}
497 
498 	if (rq->tag != NULL)
499 		bus_dma_tag_destroy(rq->tag);
500 
501 	LOCK_DESTROY(&rq->rx_lock);
502 	kfree(rq, M_DEVBUF);
503 }
504 
505 
506 
507 
508 /**
509  * @brief 		Create a receive queue
510  * @param rq 		receive queue
511  * @param if_id		interface identifier index`
512  * @param eq		pointer to event queue
513  */
514 static int
515 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
516 {
517 	POCE_SOFTC sc = rq->parent;
518 	struct oce_cq *cq;
519 
520 	cq = oce_cq_create(sc,
521 			   eq,
522 			   CQ_LEN_1024,
523 			   sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
524 	if (!cq)
525 		return ENXIO;
526 
527 	rq->cq = cq;
528 	rq->cfg.if_id = if_id;
529 
530 	/* Dont create RQ here. Create in if_activate */
531 	rq->qstate     = 0;
532 	rq->ring->cidx = 0;
533 	rq->ring->pidx = 0;
534 	eq->cq[eq->cq_valid] = cq;
535 	eq->cq_valid++;
536 	cq->cb_arg = rq;
537 	cq->cq_handler = oce_rq_handler;
538 
539 	return 0;
540 
541 }
542 
543 
544 
545 
546 /**
547  * @brief 		Delete a receive queue
548  * @param rq		receive queue
549  */
550 static void
551 oce_rq_del(struct oce_rq *rq)
552 {
553 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
554 	struct oce_mbx mbx;
555 	struct mbx_delete_nic_rq *fwcmd;
556 
557 	if (rq->qstate == QCREATED) {
558 		bzero(&mbx, sizeof(mbx));
559 
560 		fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
561 		fwcmd->params.req.rq_id = rq->rq_id;
562 		(void)oce_destroy_q(sc, &mbx,
563 				sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
564 		rq->qstate = QDELETED;
565 	}
566 
567 	if (rq->cq != NULL) {
568 		oce_cq_del(sc, rq->cq);
569 		rq->cq = NULL;
570 	}
571 }
572 
573 
574 
575 /**
576  * @brief		function to create an event queue
577  * @param sc		software handle to the device
578  * @param q_len		length of event queue
579  * @param item_size	size of an event queue item
580  * @param eq_delay	event queue delay
581  * @retval eq      	success, pointer to event queue
582  * @retval NULL		failure
583  */
584 static struct
585 oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
586 				    uint32_t item_size,
587 				    uint32_t eq_delay,
588 				    uint32_t vector)
589 {
590 	struct oce_eq *eq;
591 	int rc = 0;
592 
593 	/* allocate an eq */
594 	eq = kmalloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
595 	if (eq == NULL)
596 		return NULL;
597 
598 	eq->parent = (void *)sc;
599 	eq->eq_id = 0xffff;
600 	eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
601 	if (!eq->ring)
602 		goto free_eq;
603 
604 	eq->eq_cfg.q_len = q_len;
605 	eq->eq_cfg.item_size = item_size;
606 	eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
607 
608 	rc = oce_mbox_create_eq(eq);
609 	if (rc)
610 		goto free_eq;
611 
612 	sc->intrs[sc->neqs++].eq = eq;
613 
614 	return eq;
615 
616 free_eq:
617 	oce_eq_del(eq);
618 	return NULL;
619 }
620 
621 
622 
623 
624 /**
625  * @brief 		Function to delete an event queue
626  * @param eq		pointer to an event queue
627  */
628 static void
629 oce_eq_del(struct oce_eq *eq)
630 {
631 	struct oce_mbx mbx;
632 	struct mbx_destroy_common_eq *fwcmd;
633 	POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
634 
635 	if (eq->eq_id != 0xffff) {
636 		bzero(&mbx, sizeof(mbx));
637 		fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
638 		fwcmd->params.req.id = eq->eq_id;
639 		(void)oce_destroy_q(sc, &mbx,
640 			sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
641 	}
642 
643 	if (eq->ring != NULL) {
644 		oce_destroy_ring_buffer(sc, eq->ring);
645 		eq->ring = NULL;
646 	}
647 
648 	kfree(eq, M_DEVBUF);
649 
650 }
651 
652 
653 
654 
655 /**
656  * @brief		Function to create an MQ
657  * @param sc		software handle to the device
658  * @param eq		the EQ to associate with the MQ for event notification
659  * @param q_len		the number of entries to create in the MQ
660  * @returns		pointer to the created MQ, failure otherwise
661  */
662 static struct oce_mq *
663 oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
664 {
665 	struct oce_mbx mbx;
666 	struct mbx_create_common_mq_ex *fwcmd = NULL;
667 	struct oce_mq *mq = NULL;
668 	int rc = 0;
669 	struct oce_cq *cq;
670 	oce_mq_ext_ctx_t *ctx;
671 	uint32_t num_pages;
672 	uint32_t page_size;
673 	int version;
674 
675 	cq = oce_cq_create(sc, eq, CQ_LEN_256,
676 			sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
677 	if (!cq)
678 		return NULL;
679 
680 	/* allocate the mq */
681 	mq = kmalloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
682 	if (!mq) {
683 		oce_cq_del(sc, cq);
684 		goto error;
685 	}
686 
687 	mq->parent = sc;
688 
689 	mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
690 	if (!mq->ring)
691 		goto error;
692 
693 	bzero(&mbx, sizeof(struct oce_mbx));
694 
695 	IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
696 	fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
697 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
698 				MBX_SUBSYSTEM_COMMON,
699 				OPCODE_COMMON_CREATE_MQ_EXT,
700 				MBX_TIMEOUT_SEC,
701 				sizeof(struct mbx_create_common_mq_ex),
702 				version);
703 
704 	num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
705 	page_size = mq->ring->num_items * mq->ring->item_size;
706 
707 	ctx = &fwcmd->params.req.context;
708 
709 	if (IS_XE201(sc)) {
710 		ctx->v1.num_pages = num_pages;
711 		ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
712 		ctx->v1.cq_id = cq->cq_id;
713 		ctx->v1.valid = 1;
714 		ctx->v1.async_cq_id = cq->cq_id;
715 		ctx->v1.async_cq_valid = 1;
716 		/* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
717 		ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
718 		ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
719 		ctx->v1.async_evt_bitmap |=
720 					LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
721 	}
722 	else {
723 		ctx->v0.num_pages = num_pages;
724 		ctx->v0.cq_id = cq->cq_id;
725 		ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
726 		ctx->v0.valid = 1;
727 		/* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
728 		ctx->v0.async_evt_bitmap = 0xffffffff;
729 	}
730 
731 	mbx.u0.s.embedded = 1;
732 	mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
733 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
734 
735 	rc = oce_mbox_post(sc, &mbx, NULL);
736 	if (!rc)
737                 rc = fwcmd->hdr.u0.rsp.status;
738 	if (rc) {
739 		device_printf(sc->dev,"%s failed - cmd status: %d\n",
740 			      __FUNCTION__, rc);
741 		goto error;
742 	}
743 	mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
744 	mq->cq = cq;
745 	eq->cq[eq->cq_valid] = cq;
746 	eq->cq_valid++;
747 	mq->cq->eq = eq;
748 	mq->cfg.q_len = (uint8_t) q_len;
749 	mq->cfg.eqd = 0;
750 	mq->qstate = QCREATED;
751 
752 	mq->cq->cb_arg = mq;
753 	mq->cq->cq_handler = oce_mq_handler;
754 
755 	return mq;
756 
757 error:
758 	device_printf(sc->dev, "MQ create failed\n");
759 	oce_mq_free(mq);
760 	mq = NULL;
761 	return mq;
762 }
763 
764 
765 
766 
767 
768 /**
769  * @brief		Function to free a mailbox queue
770  * @param mq		pointer to a mailbox queue
771  */
772 static void
773 oce_mq_free(struct oce_mq *mq)
774 {
775 	POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
776 	struct oce_mbx mbx;
777 	struct mbx_destroy_common_mq *fwcmd;
778 
779 	if (!mq)
780 		return;
781 
782 	if (mq->ring != NULL) {
783 		oce_destroy_ring_buffer(sc, mq->ring);
784 		mq->ring = NULL;
785 		if (mq->qstate == QCREATED) {
786 			bzero(&mbx, sizeof (struct oce_mbx));
787 			fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
788 			fwcmd->params.req.id = mq->mq_id;
789 			(void) oce_destroy_q(sc, &mbx,
790 				sizeof (struct mbx_destroy_common_mq),
791 				QTYPE_MQ);
792 		}
793 		mq->qstate = QDELETED;
794 	}
795 
796 	if (mq->cq != NULL) {
797 		oce_cq_del(sc, mq->cq);
798 		mq->cq = NULL;
799 	}
800 
801 	kfree(mq, M_DEVBUF);
802 	mq = NULL;
803 }
804 
805 
806 
807 /**
808  * @brief		Function to delete a EQ, CQ, MQ, WQ or RQ
809  * @param sc		sofware handle to the device
810  * @param mbx		mailbox command to send to the fw to delete the queue
811  *			(mbx contains the queue information to delete)
812  * @param req_size	the size of the mbx payload dependent on the qtype
813  * @param qtype		the type of queue i.e. EQ, CQ, MQ, WQ or RQ
814  * @returns 		0 on success, failure otherwise
815  */
816 static int
817 oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
818 		enum qtype qtype)
819 {
820 	struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
821 	int opcode;
822 	int subsys;
823 	int rc = 0;
824 
825 	switch (qtype) {
826 	case QTYPE_EQ:
827 		opcode = OPCODE_COMMON_DESTROY_EQ;
828 		subsys = MBX_SUBSYSTEM_COMMON;
829 		break;
830 	case QTYPE_CQ:
831 		opcode = OPCODE_COMMON_DESTROY_CQ;
832 		subsys = MBX_SUBSYSTEM_COMMON;
833 		break;
834 	case QTYPE_MQ:
835 		opcode = OPCODE_COMMON_DESTROY_MQ;
836 		subsys = MBX_SUBSYSTEM_COMMON;
837 		break;
838 	case QTYPE_WQ:
839 		opcode = NIC_DELETE_WQ;
840 		subsys = MBX_SUBSYSTEM_NIC;
841 		break;
842 	case QTYPE_RQ:
843 		opcode = NIC_DELETE_RQ;
844 		subsys = MBX_SUBSYSTEM_NIC;
845 		break;
846 	default:
847 		return EINVAL;
848 	}
849 
850 	mbx_common_req_hdr_init(hdr, 0, 0, subsys,
851 				opcode, MBX_TIMEOUT_SEC, req_size,
852 				OCE_MBX_VER_V0);
853 
854 	mbx->u0.s.embedded = 1;
855 	mbx->payload_length = (uint32_t) req_size;
856 	DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
857 
858 	rc = oce_mbox_post(sc, mbx, NULL);
859 	if (!rc)
860                 rc = hdr->u0.rsp.status;
861 	if (rc)
862 		device_printf(sc->dev,"%s failed - cmd status: %d\n",
863 			      __FUNCTION__, rc);
864 	return rc;
865 }
866 
867 
868 
869 /**
870  * @brief		Function to create a completion queue
871  * @param sc		software handle to the device
872  * @param eq		optional eq to be associated with to the cq
873  * @param q_len		length of completion queue
874  * @param item_size	size of completion queue items
875  * @param sol_event	command context event
876  * @param is_eventable	event table
877  * @param nodelay	no delay flag
878  * @param ncoalesce	no coalescence flag
879  * @returns 		pointer to the cq created, NULL on failure
880  */
881 struct oce_cq *
882 oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
883 			     uint32_t q_len,
884 			     uint32_t item_size,
885 			     uint32_t sol_event,
886 			     uint32_t is_eventable,
887 			     uint32_t nodelay, uint32_t ncoalesce)
888 {
889 	struct oce_cq *cq = NULL;
890 	int rc = 0;
891 
892 	cq = kmalloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
893 	if (!cq)
894 		return NULL;
895 
896 	cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
897 	if (!cq->ring)
898 		goto error;
899 
900 	cq->parent = sc;
901 	cq->eq = eq;
902 	cq->cq_cfg.q_len = q_len;
903 	cq->cq_cfg.item_size = item_size;
904 	cq->cq_cfg.nodelay = (uint8_t) nodelay;
905 
906 	rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
907 	if (rc)
908 		goto error;
909 
910 	sc->cq[sc->ncqs++] = cq;
911 
912 	return cq;
913 
914 error:
915 	device_printf(sc->dev, "CQ create failed\n");
916 	oce_cq_del(sc, cq);
917 	return NULL;
918 }
919 
920 
921 
922 /**
923  * @brief		Deletes the completion queue
924  * @param sc		software handle to the device
925  * @param cq		pointer to a completion queue
926  */
927 static void
928 oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
929 {
930 	struct oce_mbx mbx;
931 	struct mbx_destroy_common_cq *fwcmd;
932 
933 	if (cq->ring != NULL) {
934 
935 		bzero(&mbx, sizeof(struct oce_mbx));
936 		/* now fill the command */
937 		fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
938 		fwcmd->params.req.id = cq->cq_id;
939 		(void)oce_destroy_q(sc, &mbx,
940 			sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
941 		/*NOW destroy the ring */
942 		oce_destroy_ring_buffer(sc, cq->ring);
943 		cq->ring = NULL;
944 	}
945 
946 	kfree(cq, M_DEVBUF);
947 	cq = NULL;
948 }
949 
950 
951 
952 /**
953  * @brief		Start a receive queue
954  * @param rq		pointer to a receive queue
955  */
956 int
957 oce_start_rq(struct oce_rq *rq)
958 {
959 	int rc;
960 
961 	rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len);
962 
963 	if (rc == 0)
964 		oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
965 	return rc;
966 }
967 
968 
969 
970 /**
971  * @brief		Start a work queue
972  * @param wq		pointer to a work queue
973  */
974 int
975 oce_start_wq(struct oce_wq *wq)
976 {
977 	oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
978 	return 0;
979 }
980 
981 
982 
983 /**
984  * @brief		Start a mailbox queue
985  * @param mq		pointer to a mailbox queue
986  */
987 int
988 oce_start_mq(struct oce_mq *mq)
989 {
990 	oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
991 	return 0;
992 }
993 
994 
995 
996 /**
997  * @brief		Function to arm an EQ so that it can generate events
998  * @param sc		software handle to the device
999  * @param qid		id of the EQ returned by the fw at the time of creation
1000  * @param npopped	number of EQEs to arm
1001  * @param rearm		rearm bit enable/disable
1002  * @param clearint	bit to clear the interrupt condition because of which
1003  *			EQEs are generated
1004  */
1005 void
1006 oce_arm_eq(POCE_SOFTC sc,
1007 	   int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
1008 {
1009 	eq_db_t eq_db = { 0 };
1010 
1011 	eq_db.bits.rearm = rearm;
1012 	eq_db.bits.event = 1;
1013 	eq_db.bits.num_popped = npopped;
1014 	eq_db.bits.clrint = clearint;
1015 	eq_db.bits.qid = qid;
1016 	OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
1017 
1018 }
1019 
1020 
1021 
1022 
1023 /**
1024  * @brief		Function to arm a CQ with CQEs
1025  * @param sc		software handle to the device
1026  * @param qid		id of the CQ returned by the fw at the time of creation
1027  * @param npopped	number of CQEs to arm
1028  * @param rearm		rearm bit enable/disable
1029  */
1030 void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
1031 {
1032 	cq_db_t cq_db = { 0 };
1033 
1034 	cq_db.bits.rearm = rearm;
1035 	cq_db.bits.num_popped = npopped;
1036 	cq_db.bits.event = 0;
1037 	cq_db.bits.qid = qid;
1038 	OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
1039 
1040 }
1041 
1042 
1043 
1044 
1045 /*
1046  * @brief		function to cleanup the eqs used during stop
1047  * @param eq		pointer to event queue structure
1048  * @returns		the number of EQs processed
1049  */
1050 void
1051 oce_drain_eq(struct oce_eq *eq)
1052 {
1053 
1054 	struct oce_eqe *eqe;
1055 	uint16_t num_eqe = 0;
1056 	POCE_SOFTC sc = eq->parent;
1057 
1058 	do {
1059 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1060 		if (eqe->evnt == 0)
1061 			break;
1062 		eqe->evnt = 0;
1063 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1064 					BUS_DMASYNC_POSTWRITE);
1065 		num_eqe++;
1066 		RING_GET(eq->ring, 1);
1067 
1068 	} while (TRUE);
1069 
1070 	oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1071 
1072 }
1073 
1074 
1075 
1076 void
1077 oce_drain_wq_cq(struct oce_wq *wq)
1078 {
1079         POCE_SOFTC sc = wq->parent;
1080         struct oce_cq *cq = wq->cq;
1081         struct oce_nic_tx_cqe *cqe;
1082         int num_cqes = 0;
1083 
1084 	bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1085 				 BUS_DMASYNC_POSTWRITE);
1086 
1087 	do {
1088 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1089 		if (cqe->u0.dw[3] == 0)
1090 			break;
1091 		cqe->u0.dw[3] = 0;
1092 		bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1093 				 BUS_DMASYNC_POSTWRITE);
1094 		RING_GET(cq->ring, 1);
1095 		num_cqes++;
1096 
1097 	} while (TRUE);
1098 
1099 	oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1100 
1101 }
1102 
1103 
1104 /*
1105  * @brief		function to drain a MCQ and process its CQEs
1106  * @param dev		software handle to the device
1107  * @param cq		pointer to the cq to drain
1108  * @returns		the number of CQEs processed
1109  */
1110 void
1111 oce_drain_mq_cq(void *arg)
1112 {
1113 	/* TODO: additional code. */
1114 	return;
1115 }
1116 
1117 
1118 
1119 /**
1120  * @brief		function to process a Recieve queue
1121  * @param arg		pointer to the RQ to charge
1122  * @return		number of cqes processed
1123  */
1124 void
1125 oce_drain_rq_cq(struct oce_rq *rq)
1126 {
1127 	struct oce_nic_rx_cqe *cqe;
1128 	uint16_t num_cqe = 0;
1129 	struct oce_cq  *cq;
1130 	POCE_SOFTC sc;
1131 
1132 	sc = rq->parent;
1133 	cq = rq->cq;
1134 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1135 	/* dequeue till you reach an invalid cqe */
1136 	while (RQ_CQE_VALID(cqe)) {
1137 		RQ_CQE_INVALIDATE(cqe);
1138 		RING_GET(cq->ring, 1);
1139 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1140 		    struct oce_nic_rx_cqe);
1141 		num_cqe++;
1142 	}
1143 	oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1144 
1145 	return;
1146 }
1147 
1148 
1149 void
1150 oce_free_posted_rxbuf(struct oce_rq *rq)
1151 {
1152 	struct oce_packet_desc *pd;
1153 
1154 	while (rq->pending) {
1155 
1156 		pd = &rq->pckts[rq->packets_out];
1157 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1158 		bus_dmamap_unload(rq->tag, pd->map);
1159 		if (pd->mbuf != NULL) {
1160 			m_freem(pd->mbuf);
1161 			pd->mbuf = NULL;
1162 		}
1163 
1164 		if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
1165 			rq->packets_out = 0;
1166 		else
1167 			rq->packets_out++;
1168 
1169                 rq->pending--;
1170 	}
1171 
1172 }
1173 
1174 void
1175 oce_stop_rx(POCE_SOFTC sc)
1176 {
1177 	struct oce_mbx mbx;
1178 	struct mbx_delete_nic_rq *fwcmd;
1179 	struct oce_rq *rq;
1180 	int i = 0;
1181 
1182 	for_all_rq_queues(sc, rq, i) {
1183 		if (rq->qstate == QCREATED) {
1184 			/* Delete rxq in firmware */
1185 
1186 			bzero(&mbx, sizeof(mbx));
1187 			fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1188 			fwcmd->params.req.rq_id = rq->rq_id;
1189 
1190 			(void)oce_destroy_q(sc, &mbx,
1191 				sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
1192 
1193 			rq->qstate = QDELETED;
1194 
1195 			DELAY(1);
1196 
1197 			/* Free posted RX buffers that are not used */
1198 			oce_free_posted_rxbuf(rq);
1199 
1200 		}
1201 	}
1202 }
1203 
1204 
1205 
1206 int
1207 oce_start_rx(POCE_SOFTC sc)
1208 {
1209 	struct oce_rq *rq;
1210 	int rc = 0, i;
1211 
1212 	for_all_rq_queues(sc, rq, i) {
1213 		if (rq->qstate == QCREATED)
1214 			continue;
1215 		rc = oce_mbox_create_rq(rq);
1216 		if (rc)
1217 			goto error;
1218 		/* reset queue pointers */
1219 		rq->qstate 	 = QCREATED;
1220 		rq->pending	 = 0;
1221 		rq->ring->cidx	 = 0;
1222 		rq->ring->pidx	 = 0;
1223 		rq->packets_in	 = 0;
1224 		rq->packets_out	 = 0;
1225 	}
1226 
1227 	DELAY(1);
1228 
1229 	/* RSS config */
1230 	if (is_rss_enabled(sc)) {
1231 		rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1232 		if (rc)
1233 			goto error;
1234 
1235 	}
1236 
1237 	return rc;
1238 error:
1239 	device_printf(sc->dev, "Start RX failed\n");
1240 	return rc;
1241 
1242 }
1243