xref: /dragonfly/sys/dev/netif/oce/oce_queue.c (revision 73610d44)
1 /*-
2  * Copyright (C) 2013 Emulex
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the Emulex Corporation nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Contact Information:
32  * freebsd-drivers@emulex.com
33  *
34  * Emulex
35  * 3333 Susan Street
36  * Costa Mesa, CA 92626
37  */
38 
39 
40 
41 /* $FreeBSD: src/sys/dev/oce/oce_queue.c,v 1.5 2013/07/07 00:30:13 svnexp Exp $ */
42 
43 
44 #include "oce_if.h"
45 
46 /*****************************************************
47  * local queue functions
48  *****************************************************/
49 
50 static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
51 				  uint32_t q_len, uint32_t wq_type);
52 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
53 static void oce_wq_free(struct oce_wq *wq);
54 static void oce_wq_del(struct oce_wq *wq);
55 static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
56 				  uint32_t q_len,
57 				  uint32_t frag_size,
58 				  uint32_t mtu, uint32_t rss);
59 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
60 static void oce_rq_free(struct oce_rq *rq);
61 static void oce_rq_del(struct oce_rq *rq);
62 static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
63 				    uint32_t q_len,
64 				    uint32_t item_size,
65 				    uint32_t eq_delay,
66 				    uint32_t vector);
67 static void oce_eq_del(struct oce_eq *eq);
68 static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
69 				    struct oce_eq *eq, uint32_t q_len);
70 static void oce_mq_free(struct oce_mq *mq);
71 static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
72 			 *mbx, size_t req_size, enum qtype qtype);
73 struct oce_cq *oce_cq_create(POCE_SOFTC sc,
74 			     struct oce_eq *eq,
75 			     uint32_t q_len,
76 			     uint32_t item_size,
77 			     uint32_t sol_event,
78 			     uint32_t is_eventable,
79 			     uint32_t nodelay, uint32_t ncoalesce);
80 static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
81 
82 
83 
84 /**
85  * @brief	Create and initialize all the queues on the board
86  * @param sc	software handle to the device
87  * @returns 0	if successful, or error
88  **/
89 int
90 oce_queue_init_all(POCE_SOFTC sc)
91 {
92 	int rc = 0, i, vector;
93 	struct oce_wq *wq;
94 	struct oce_rq *rq;
95 	struct oce_aic_obj *aic;
96 
97 	/* alloc TX/RX queues */
98 	for_all_wq_queues(sc, wq, i) {
99 		sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
100 					 NIC_WQ_TYPE_STANDARD);
101 		if (!sc->wq[i])
102 			goto error;
103 
104 	}
105 
106 	for_all_rq_queues(sc, rq, i) {
107 		sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
108 					OCE_MAX_JUMBO_FRAME_SIZE,
109 					(i == 0) ? 0 : is_rss_enabled(sc));
110 		if (!sc->rq[i])
111 			goto error;
112 	}
113 
114 	/* Create network interface on card */
115 	if (oce_create_nw_interface(sc))
116 		goto error;
117 
118 	/* create all of the event queues */
119 	for (vector = 0; vector < sc->intr_count; vector++) {
120 		/* setup aic defaults for each event queue */
121 		aic = &sc->aic_obj[vector];
122 		aic->max_eqd = OCE_MAX_EQD;
123 		aic->min_eqd = OCE_MIN_EQD;
124 		aic->et_eqd = OCE_MIN_EQD;
125 		aic->enable = TRUE;
126 
127 		sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
128 						 0, vector);
129 		if (!sc->eq[vector])
130 			goto error;
131 	}
132 
133 	/* create Tx, Rx and mcc queues */
134 	for_all_wq_queues(sc, wq, i) {
135 		rc = oce_wq_create(wq, sc->eq[i]);
136 		if (rc)
137 			goto error;
138 		wq->queue_index = i;
139 		TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
140 	}
141 
142 	for_all_rq_queues(sc, rq, i) {
143 		rc = oce_rq_create(rq, sc->if_id,
144 					sc->eq[(i == 0) ? 0:(i-1)]);
145 		if (rc)
146 			goto error;
147 		rq->queue_index = i;
148 	}
149 
150 	sc->mq = oce_mq_create(sc, sc->eq[0], 64);
151 	if (!sc->mq)
152 		goto error;
153 
154 	return rc;
155 
156 error:
157 	oce_queue_release_all(sc);
158 	return 1;
159 }
160 
161 
162 
163 /**
164  * @brief Releases all mailbox queues created
165  * @param sc		software handle to the device
166  */
167 void
168 oce_queue_release_all(POCE_SOFTC sc)
169 {
170 	int i = 0;
171 	struct oce_wq *wq;
172 	struct oce_rq *rq;
173 	struct oce_eq *eq;
174 
175 	for_all_rq_queues(sc, rq, i) {
176 		if (rq) {
177 			oce_rq_del(sc->rq[i]);
178 			oce_rq_free(sc->rq[i]);
179 		}
180 	}
181 
182 	for_all_wq_queues(sc, wq, i) {
183 		if (wq) {
184 			oce_wq_del(sc->wq[i]);
185 			oce_wq_free(sc->wq[i]);
186 		}
187 	}
188 
189 	if (sc->mq)
190 		oce_mq_free(sc->mq);
191 
192 	for_all_evnt_queues(sc, eq, i) {
193 		if (eq)
194 			oce_eq_del(sc->eq[i]);
195 	}
196 }
197 
198 
199 
200 /**
201  * @brief 		Function to create a WQ for NIC Tx
202  * @param sc 		software handle to the device
203  * @param qlen		number of entries in the queue
204  * @param wq_type	work queue type
205  * @returns		the pointer to the WQ created or NULL on failure
206  */
207 static struct
208 oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
209 {
210 	struct oce_wq *wq;
211 	int rc = 0, i;
212 
213 	/* q_len must be min 256 and max 2k */
214 	if (q_len < 256 || q_len > 2048) {
215 		device_printf(sc->dev,
216 			  "Invalid q length. Must be "
217 			  "[256, 2000]: 0x%x\n", q_len);
218 		return NULL;
219 	}
220 
221 	/* allocate wq */
222 	wq = kmalloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
223 	if (!wq)
224 		return NULL;
225 
226 	/* Set the wq config */
227 	wq->cfg.q_len = q_len;
228 	wq->cfg.wq_type = (uint8_t) wq_type;
229 	wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
230 	wq->cfg.nbufs = 2 * wq->cfg.q_len;
231 	wq->cfg.nhdl = 2 * wq->cfg.q_len;
232 
233 	wq->parent = (void *)sc;
234 
235 	rc = bus_dma_tag_create(NULL,
236 				1, 0,
237 				BUS_SPACE_MAXADDR,
238 				BUS_SPACE_MAXADDR,
239 				NULL, NULL,
240 				OCE_MAX_TX_SIZE,
241 				OCE_MAX_TX_ELEMENTS,
242 				PAGE_SIZE, 0, &wq->tag);
243 
244 	if (rc)
245 		goto free_wq;
246 
247 
248 	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
249 		rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
250 		if (rc)
251 			goto free_wq;
252 	}
253 
254 	wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
255 	if (!wq->ring)
256 		goto free_wq;
257 
258 
259 	LOCK_CREATE(&wq->tx_lock, "TX_lock");
260 
261 #if 0 /* XXX swildner: MULTIQUEUE */
262 	/* Allocate buf ring for multiqueue*/
263 	wq->br = buf_ring_alloc(4096, M_DEVBUF,
264 			M_WAITOK, &wq->tx_lock.mutex);
265 	if (!wq->br)
266 		goto free_wq;
267 #endif
268 	return wq;
269 
270 
271 free_wq:
272 	device_printf(sc->dev, "Create WQ failed\n");
273 	oce_wq_free(wq);
274 	return NULL;
275 }
276 
277 
278 
279 /**
280  * @brief 		Frees the work queue
281  * @param wq		pointer to work queue to free
282  */
283 static void
284 oce_wq_free(struct oce_wq *wq)
285 {
286 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
287 	int i;
288 
289 	taskqueue_drain(taskqueue_swi, &wq->txtask);
290 
291 	if (wq->ring != NULL) {
292 		oce_destroy_ring_buffer(sc, wq->ring);
293 		wq->ring = NULL;
294 	}
295 
296 	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
297 		if (wq->pckts[i].map != NULL) {
298 			bus_dmamap_unload(wq->tag, wq->pckts[i].map);
299 			bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
300 			wq->pckts[i].map = NULL;
301 		}
302 	}
303 
304 	if (wq->tag != NULL)
305 		bus_dma_tag_destroy(wq->tag);
306 #if 0 /* XXX swildner: MULTIQUEUE */
307 	if (wq->br != NULL)
308 		buf_ring_free(wq->br, M_DEVBUF);
309 #endif
310 
311 	LOCK_DESTROY(&wq->tx_lock);
312 	kfree(wq, M_DEVBUF);
313 }
314 
315 
316 
317 /**
318  * @brief 		Create a work queue
319  * @param wq		pointer to work queue
320  * @param eq		pointer to associated event queue
321  */
322 static int
323 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
324 {
325 	POCE_SOFTC sc = wq->parent;
326 	struct oce_cq *cq;
327 	int rc = 0;
328 
329 	/* create the CQ */
330 	cq = oce_cq_create(sc,
331 			   eq,
332 			   CQ_LEN_1024,
333 			   sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
334 	if (!cq)
335 		return ENXIO;
336 
337 
338 	wq->cq = cq;
339 
340 	rc = oce_mbox_create_wq(wq);
341 	if (rc)
342 		goto error;
343 
344 	wq->qstate = QCREATED;
345 	wq->wq_free = wq->cfg.q_len;
346 	wq->ring->cidx = 0;
347 	wq->ring->pidx = 0;
348 
349 	eq->cq[eq->cq_valid] = cq;
350 	eq->cq_valid++;
351 	cq->cb_arg = wq;
352 	cq->cq_handler = oce_wq_handler;
353 
354 	return 0;
355 
356 error:
357 	device_printf(sc->dev, "WQ create failed\n");
358 	oce_wq_del(wq);
359 	return rc;
360 }
361 
362 
363 
364 
365 /**
366  * @brief 		Delete a work queue
367  * @param wq		pointer to work queue
368  */
369 static void
370 oce_wq_del(struct oce_wq *wq)
371 {
372 	struct oce_mbx mbx;
373 	struct mbx_delete_nic_wq *fwcmd;
374 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
375 
376 	if (wq->qstate == QCREATED) {
377 		bzero(&mbx, sizeof(struct oce_mbx));
378 		/* now fill the command */
379 		fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
380 		fwcmd->params.req.wq_id = wq->wq_id;
381 		(void)oce_destroy_q(sc, &mbx,
382 				sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
383 		wq->qstate = QDELETED;
384 	}
385 
386 	if (wq->cq != NULL) {
387 		oce_cq_del(sc, wq->cq);
388 		wq->cq = NULL;
389 	}
390 }
391 
392 
393 
394 /**
395  * @brief 		function to allocate receive queue resources
396  * @param sc		software handle to the device
397  * @param q_len		length of receive queue
398  * @param frag_size	size of an receive queue fragment
399  * @param mtu		maximum transmission unit
400  * @param rss		is-rss-queue flag
401  * @returns		the pointer to the RQ created or NULL on failure
402  */
403 static struct
404 oce_rq *oce_rq_init(POCE_SOFTC sc,
405 				  uint32_t q_len,
406 				  uint32_t frag_size,
407 				  uint32_t mtu, uint32_t rss)
408 {
409 	struct oce_rq *rq;
410 	int rc = 0, i;
411 
412 	if (OCE_LOG2(frag_size) <= 0)
413 		return NULL;
414 
415 	if ((q_len == 0) || (q_len > 1024))
416 		return NULL;
417 
418 	/* allocate the rq */
419 	rq = kmalloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
420 	if (!rq)
421 		return NULL;
422 
423 
424 	rq->cfg.q_len = q_len;
425 	rq->cfg.frag_size = frag_size;
426 	rq->cfg.mtu = mtu;
427 	rq->cfg.eqd = 0;
428 #if 0 /* XXX swildner: LRO */
429 	rq->lro_pkts_queued = 0;
430 #endif
431 	rq->cfg.is_rss_queue = rss;
432 	rq->packets_in = 0;
433         rq->packets_out = 0;
434         rq->pending = 0;
435 
436 	rq->parent = (void *)sc;
437 
438 	rc = bus_dma_tag_create(NULL,
439 				1, 0,
440 				BUS_SPACE_MAXADDR,
441 				BUS_SPACE_MAXADDR,
442 				NULL, NULL,
443 				OCE_MAX_RX_SIZE,
444 				1, PAGE_SIZE, 0, &rq->tag);
445 
446 	if (rc)
447 		goto free_rq;
448 
449 	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
450 		rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
451 		if (rc)
452 			goto free_rq;
453 	}
454 
455 	/* create the ring buffer */
456 	rq->ring = oce_create_ring_buffer(sc, q_len,
457 				 sizeof(struct oce_nic_rqe));
458 	if (!rq->ring)
459 		goto free_rq;
460 
461 	LOCK_CREATE(&rq->rx_lock, "RX_lock");
462 
463 	return rq;
464 
465 free_rq:
466 	device_printf(sc->dev, "Create RQ failed\n");
467 	oce_rq_free(rq);
468 	return NULL;
469 }
470 
471 
472 
473 
474 /**
475  * @brief 		Free a receive queue
476  * @param rq		pointer to receive queue
477  */
478 static void
479 oce_rq_free(struct oce_rq *rq)
480 {
481 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
482 	int i = 0 ;
483 
484 	if (rq->ring != NULL) {
485 		oce_destroy_ring_buffer(sc, rq->ring);
486 		rq->ring = NULL;
487 	}
488 	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
489 		if (rq->pckts[i].map != NULL) {
490 			bus_dmamap_unload(rq->tag, rq->pckts[i].map);
491 			bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
492 			rq->pckts[i].map = NULL;
493 		}
494 		if (rq->pckts[i].mbuf) {
495 			m_free(rq->pckts[i].mbuf);
496 			rq->pckts[i].mbuf = NULL;
497 		}
498 	}
499 
500 	if (rq->tag != NULL)
501 		bus_dma_tag_destroy(rq->tag);
502 
503 	LOCK_DESTROY(&rq->rx_lock);
504 	kfree(rq, M_DEVBUF);
505 }
506 
507 
508 
509 
510 /**
511  * @brief 		Create a receive queue
512  * @param rq 		receive queue
513  * @param if_id		interface identifier index`
514  * @param eq		pointer to event queue
515  */
516 static int
517 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
518 {
519 	POCE_SOFTC sc = rq->parent;
520 	struct oce_cq *cq;
521 
522 	cq = oce_cq_create(sc,
523 			   eq,
524 			   CQ_LEN_1024,
525 			   sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
526 	if (!cq)
527 		return ENXIO;
528 
529 	rq->cq = cq;
530 	rq->cfg.if_id = if_id;
531 
532 	/* Dont create RQ here. Create in if_activate */
533 	rq->qstate     = 0;
534 	rq->ring->cidx = 0;
535 	rq->ring->pidx = 0;
536 	eq->cq[eq->cq_valid] = cq;
537 	eq->cq_valid++;
538 	cq->cb_arg = rq;
539 	cq->cq_handler = oce_rq_handler;
540 
541 	return 0;
542 
543 }
544 
545 
546 
547 
548 /**
549  * @brief 		Delete a receive queue
550  * @param rq		receive queue
551  */
552 static void
553 oce_rq_del(struct oce_rq *rq)
554 {
555 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
556 	struct oce_mbx mbx;
557 	struct mbx_delete_nic_rq *fwcmd;
558 
559 	if (rq->qstate == QCREATED) {
560 		bzero(&mbx, sizeof(mbx));
561 
562 		fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
563 		fwcmd->params.req.rq_id = rq->rq_id;
564 		(void)oce_destroy_q(sc, &mbx,
565 				sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
566 		rq->qstate = QDELETED;
567 	}
568 
569 	if (rq->cq != NULL) {
570 		oce_cq_del(sc, rq->cq);
571 		rq->cq = NULL;
572 	}
573 }
574 
575 
576 
577 /**
578  * @brief		function to create an event queue
579  * @param sc		software handle to the device
580  * @param q_len		length of event queue
581  * @param item_size	size of an event queue item
582  * @param eq_delay	event queue delay
583  * @retval eq      	success, pointer to event queue
584  * @retval NULL		failure
585  */
586 static struct
587 oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
588 				    uint32_t item_size,
589 				    uint32_t eq_delay,
590 				    uint32_t vector)
591 {
592 	struct oce_eq *eq;
593 	int rc = 0;
594 
595 	/* allocate an eq */
596 	eq = kmalloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
597 	if (eq == NULL)
598 		return NULL;
599 
600 	eq->parent = (void *)sc;
601 	eq->eq_id = 0xffff;
602 	eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
603 	if (!eq->ring)
604 		goto free_eq;
605 
606 	eq->eq_cfg.q_len = q_len;
607 	eq->eq_cfg.item_size = item_size;
608 	eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
609 
610 	rc = oce_mbox_create_eq(eq);
611 	if (rc)
612 		goto free_eq;
613 
614 	sc->intrs[sc->neqs++].eq = eq;
615 
616 	return eq;
617 
618 free_eq:
619 	oce_eq_del(eq);
620 	return NULL;
621 }
622 
623 
624 
625 
626 /**
627  * @brief 		Function to delete an event queue
628  * @param eq		pointer to an event queue
629  */
630 static void
631 oce_eq_del(struct oce_eq *eq)
632 {
633 	struct oce_mbx mbx;
634 	struct mbx_destroy_common_eq *fwcmd;
635 	POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
636 
637 	if (eq->eq_id != 0xffff) {
638 		bzero(&mbx, sizeof(mbx));
639 		fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
640 		fwcmd->params.req.id = eq->eq_id;
641 		(void)oce_destroy_q(sc, &mbx,
642 			sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
643 	}
644 
645 	if (eq->ring != NULL) {
646 		oce_destroy_ring_buffer(sc, eq->ring);
647 		eq->ring = NULL;
648 	}
649 
650 	kfree(eq, M_DEVBUF);
651 
652 }
653 
654 
655 
656 
657 /**
658  * @brief		Function to create an MQ
659  * @param sc		software handle to the device
660  * @param eq		the EQ to associate with the MQ for event notification
661  * @param q_len		the number of entries to create in the MQ
662  * @returns		pointer to the created MQ, failure otherwise
663  */
664 static struct oce_mq *
665 oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
666 {
667 	struct oce_mbx mbx;
668 	struct mbx_create_common_mq_ex *fwcmd = NULL;
669 	struct oce_mq *mq = NULL;
670 	int rc = 0;
671 	struct oce_cq *cq;
672 	oce_mq_ext_ctx_t *ctx;
673 	uint32_t num_pages;
674 	uint32_t page_size;
675 	int version;
676 
677 	cq = oce_cq_create(sc, eq, CQ_LEN_256,
678 			sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
679 	if (!cq)
680 		return NULL;
681 
682 	/* allocate the mq */
683 	mq = kmalloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
684 	if (!mq) {
685 		oce_cq_del(sc, cq);
686 		goto error;
687 	}
688 
689 	mq->parent = sc;
690 
691 	mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
692 	if (!mq->ring)
693 		goto error;
694 
695 	bzero(&mbx, sizeof(struct oce_mbx));
696 
697 	IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
698 	fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
699 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
700 				MBX_SUBSYSTEM_COMMON,
701 				OPCODE_COMMON_CREATE_MQ_EXT,
702 				MBX_TIMEOUT_SEC,
703 				sizeof(struct mbx_create_common_mq_ex),
704 				version);
705 
706 	num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
707 	page_size = mq->ring->num_items * mq->ring->item_size;
708 
709 	ctx = &fwcmd->params.req.context;
710 
711 	if (IS_XE201(sc)) {
712 		ctx->v1.num_pages = num_pages;
713 		ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
714 		ctx->v1.cq_id = cq->cq_id;
715 		ctx->v1.valid = 1;
716 		ctx->v1.async_cq_id = cq->cq_id;
717 		ctx->v1.async_cq_valid = 1;
718 		/* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
719 		ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
720 		ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
721 		ctx->v1.async_evt_bitmap |=
722 					LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
723 	}
724 	else {
725 		ctx->v0.num_pages = num_pages;
726 		ctx->v0.cq_id = cq->cq_id;
727 		ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
728 		ctx->v0.valid = 1;
729 		/* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
730 		ctx->v0.async_evt_bitmap = 0xffffffff;
731 	}
732 
733 	mbx.u0.s.embedded = 1;
734 	mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
735 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
736 
737 	rc = oce_mbox_post(sc, &mbx, NULL);
738 	if (!rc)
739                 rc = fwcmd->hdr.u0.rsp.status;
740 	if (rc) {
741 		device_printf(sc->dev,"%s failed - cmd status: %d\n",
742 			      __FUNCTION__, rc);
743 		goto error;
744 	}
745 	mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
746 	mq->cq = cq;
747 	eq->cq[eq->cq_valid] = cq;
748 	eq->cq_valid++;
749 	mq->cq->eq = eq;
750 	mq->cfg.q_len = (uint8_t) q_len;
751 	mq->cfg.eqd = 0;
752 	mq->qstate = QCREATED;
753 
754 	mq->cq->cb_arg = mq;
755 	mq->cq->cq_handler = oce_mq_handler;
756 
757 	return mq;
758 
759 error:
760 	device_printf(sc->dev, "MQ create failed\n");
761 	oce_mq_free(mq);
762 	mq = NULL;
763 	return mq;
764 }
765 
766 
767 
768 
769 
770 /**
771  * @brief		Function to free a mailbox queue
772  * @param mq		pointer to a mailbox queue
773  */
774 static void
775 oce_mq_free(struct oce_mq *mq)
776 {
777 	POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
778 	struct oce_mbx mbx;
779 	struct mbx_destroy_common_mq *fwcmd;
780 
781 	if (!mq)
782 		return;
783 
784 	if (mq->ring != NULL) {
785 		oce_destroy_ring_buffer(sc, mq->ring);
786 		mq->ring = NULL;
787 		if (mq->qstate == QCREATED) {
788 			bzero(&mbx, sizeof (struct oce_mbx));
789 			fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
790 			fwcmd->params.req.id = mq->mq_id;
791 			(void) oce_destroy_q(sc, &mbx,
792 				sizeof (struct mbx_destroy_common_mq),
793 				QTYPE_MQ);
794 		}
795 		mq->qstate = QDELETED;
796 	}
797 
798 	if (mq->cq != NULL) {
799 		oce_cq_del(sc, mq->cq);
800 		mq->cq = NULL;
801 	}
802 
803 	kfree(mq, M_DEVBUF);
804 	mq = NULL;
805 }
806 
807 
808 
809 /**
810  * @brief		Function to delete a EQ, CQ, MQ, WQ or RQ
811  * @param sc		sofware handle to the device
812  * @param mbx		mailbox command to send to the fw to delete the queue
813  *			(mbx contains the queue information to delete)
814  * @param req_size	the size of the mbx payload dependent on the qtype
815  * @param qtype		the type of queue i.e. EQ, CQ, MQ, WQ or RQ
816  * @returns 		0 on success, failure otherwise
817  */
818 static int
819 oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
820 		enum qtype qtype)
821 {
822 	struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
823 	int opcode;
824 	int subsys;
825 	int rc = 0;
826 
827 	switch (qtype) {
828 	case QTYPE_EQ:
829 		opcode = OPCODE_COMMON_DESTROY_EQ;
830 		subsys = MBX_SUBSYSTEM_COMMON;
831 		break;
832 	case QTYPE_CQ:
833 		opcode = OPCODE_COMMON_DESTROY_CQ;
834 		subsys = MBX_SUBSYSTEM_COMMON;
835 		break;
836 	case QTYPE_MQ:
837 		opcode = OPCODE_COMMON_DESTROY_MQ;
838 		subsys = MBX_SUBSYSTEM_COMMON;
839 		break;
840 	case QTYPE_WQ:
841 		opcode = NIC_DELETE_WQ;
842 		subsys = MBX_SUBSYSTEM_NIC;
843 		break;
844 	case QTYPE_RQ:
845 		opcode = NIC_DELETE_RQ;
846 		subsys = MBX_SUBSYSTEM_NIC;
847 		break;
848 	default:
849 		return EINVAL;
850 	}
851 
852 	mbx_common_req_hdr_init(hdr, 0, 0, subsys,
853 				opcode, MBX_TIMEOUT_SEC, req_size,
854 				OCE_MBX_VER_V0);
855 
856 	mbx->u0.s.embedded = 1;
857 	mbx->payload_length = (uint32_t) req_size;
858 	DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
859 
860 	rc = oce_mbox_post(sc, mbx, NULL);
861 	if (!rc)
862                 rc = hdr->u0.rsp.status;
863 	if (rc)
864 		device_printf(sc->dev,"%s failed - cmd status: %d\n",
865 			      __FUNCTION__, rc);
866 	return rc;
867 }
868 
869 
870 
871 /**
872  * @brief		Function to create a completion queue
873  * @param sc		software handle to the device
874  * @param eq		optional eq to be associated with to the cq
875  * @param q_len		length of completion queue
876  * @param item_size	size of completion queue items
877  * @param sol_event	command context event
878  * @param is_eventable	event table
879  * @param nodelay	no delay flag
880  * @param ncoalesce	no coalescence flag
881  * @returns 		pointer to the cq created, NULL on failure
882  */
883 struct oce_cq *
884 oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
885 			     uint32_t q_len,
886 			     uint32_t item_size,
887 			     uint32_t sol_event,
888 			     uint32_t is_eventable,
889 			     uint32_t nodelay, uint32_t ncoalesce)
890 {
891 	struct oce_cq *cq = NULL;
892 	int rc = 0;
893 
894 	cq = kmalloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
895 	if (!cq)
896 		return NULL;
897 
898 	cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
899 	if (!cq->ring)
900 		goto error;
901 
902 	cq->parent = sc;
903 	cq->eq = eq;
904 	cq->cq_cfg.q_len = q_len;
905 	cq->cq_cfg.item_size = item_size;
906 	cq->cq_cfg.nodelay = (uint8_t) nodelay;
907 
908 	rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
909 	if (rc)
910 		goto error;
911 
912 	sc->cq[sc->ncqs++] = cq;
913 
914 	return cq;
915 
916 error:
917 	device_printf(sc->dev, "CQ create failed\n");
918 	oce_cq_del(sc, cq);
919 	return NULL;
920 }
921 
922 
923 
924 /**
925  * @brief		Deletes the completion queue
926  * @param sc		software handle to the device
927  * @param cq		pointer to a completion queue
928  */
929 static void
930 oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
931 {
932 	struct oce_mbx mbx;
933 	struct mbx_destroy_common_cq *fwcmd;
934 
935 	if (cq->ring != NULL) {
936 
937 		bzero(&mbx, sizeof(struct oce_mbx));
938 		/* now fill the command */
939 		fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
940 		fwcmd->params.req.id = cq->cq_id;
941 		(void)oce_destroy_q(sc, &mbx,
942 			sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
943 		/*NOW destroy the ring */
944 		oce_destroy_ring_buffer(sc, cq->ring);
945 		cq->ring = NULL;
946 	}
947 
948 	kfree(cq, M_DEVBUF);
949 	cq = NULL;
950 }
951 
952 
953 
954 /**
955  * @brief		Start a receive queue
956  * @param rq		pointer to a receive queue
957  */
958 int
959 oce_start_rq(struct oce_rq *rq)
960 {
961 	int rc;
962 
963 	rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len);
964 
965 	if (rc == 0)
966 		oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
967 	return rc;
968 }
969 
970 
971 
972 /**
973  * @brief		Start a work queue
974  * @param wq		pointer to a work queue
975  */
976 int
977 oce_start_wq(struct oce_wq *wq)
978 {
979 	oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
980 	return 0;
981 }
982 
983 
984 
985 /**
986  * @brief		Start a mailbox queue
987  * @param mq		pointer to a mailbox queue
988  */
989 int
990 oce_start_mq(struct oce_mq *mq)
991 {
992 	oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
993 	return 0;
994 }
995 
996 
997 
998 /**
999  * @brief		Function to arm an EQ so that it can generate events
1000  * @param sc		software handle to the device
1001  * @param qid		id of the EQ returned by the fw at the time of creation
1002  * @param npopped	number of EQEs to arm
1003  * @param rearm		rearm bit enable/disable
1004  * @param clearint	bit to clear the interrupt condition because of which
1005  *			EQEs are generated
1006  */
1007 void
1008 oce_arm_eq(POCE_SOFTC sc,
1009 	   int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
1010 {
1011 	eq_db_t eq_db = { 0 };
1012 
1013 	eq_db.bits.rearm = rearm;
1014 	eq_db.bits.event = 1;
1015 	eq_db.bits.num_popped = npopped;
1016 	eq_db.bits.clrint = clearint;
1017 	eq_db.bits.qid = qid;
1018 	OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
1019 
1020 }
1021 
1022 
1023 
1024 
1025 /**
1026  * @brief		Function to arm a CQ with CQEs
1027  * @param sc		software handle to the device
1028  * @param qid		id of the CQ returned by the fw at the time of creation
1029  * @param npopped	number of CQEs to arm
1030  * @param rearm		rearm bit enable/disable
1031  */
1032 void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
1033 {
1034 	cq_db_t cq_db = { 0 };
1035 
1036 	cq_db.bits.rearm = rearm;
1037 	cq_db.bits.num_popped = npopped;
1038 	cq_db.bits.event = 0;
1039 	cq_db.bits.qid = qid;
1040 	OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
1041 
1042 }
1043 
1044 
1045 
1046 
1047 /*
1048  * @brief		function to cleanup the eqs used during stop
1049  * @param eq		pointer to event queue structure
1050  * @returns		the number of EQs processed
1051  */
1052 void
1053 oce_drain_eq(struct oce_eq *eq)
1054 {
1055 
1056 	struct oce_eqe *eqe;
1057 	uint16_t num_eqe = 0;
1058 	POCE_SOFTC sc = eq->parent;
1059 
1060 	do {
1061 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1062 		if (eqe->evnt == 0)
1063 			break;
1064 		eqe->evnt = 0;
1065 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1066 					BUS_DMASYNC_POSTWRITE);
1067 		num_eqe++;
1068 		RING_GET(eq->ring, 1);
1069 
1070 	} while (TRUE);
1071 
1072 	oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1073 
1074 }
1075 
1076 
1077 
1078 void
1079 oce_drain_wq_cq(struct oce_wq *wq)
1080 {
1081         POCE_SOFTC sc = wq->parent;
1082         struct oce_cq *cq = wq->cq;
1083         struct oce_nic_tx_cqe *cqe;
1084         int num_cqes = 0;
1085 
1086 	bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1087 				 BUS_DMASYNC_POSTWRITE);
1088 
1089 	do {
1090 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1091 		if (cqe->u0.dw[3] == 0)
1092 			break;
1093 		cqe->u0.dw[3] = 0;
1094 		bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1095 				 BUS_DMASYNC_POSTWRITE);
1096 		RING_GET(cq->ring, 1);
1097 		num_cqes++;
1098 
1099 	} while (TRUE);
1100 
1101 	oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1102 
1103 }
1104 
1105 
1106 /*
1107  * @brief		function to drain a MCQ and process its CQEs
1108  * @param dev		software handle to the device
1109  * @param cq		pointer to the cq to drain
1110  * @returns		the number of CQEs processed
1111  */
1112 void
1113 oce_drain_mq_cq(void *arg)
1114 {
1115 	/* TODO: additional code. */
1116 	return;
1117 }
1118 
1119 
1120 
1121 /**
1122  * @brief		function to process a Recieve queue
1123  * @param arg		pointer to the RQ to charge
1124  * @return		number of cqes processed
1125  */
1126 void
1127 oce_drain_rq_cq(struct oce_rq *rq)
1128 {
1129 	struct oce_nic_rx_cqe *cqe;
1130 	uint16_t num_cqe = 0;
1131 	struct oce_cq  *cq;
1132 	POCE_SOFTC sc;
1133 
1134 	sc = rq->parent;
1135 	cq = rq->cq;
1136 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1137 	/* dequeue till you reach an invalid cqe */
1138 	while (RQ_CQE_VALID(cqe)) {
1139 		RQ_CQE_INVALIDATE(cqe);
1140 		RING_GET(cq->ring, 1);
1141 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1142 		    struct oce_nic_rx_cqe);
1143 		num_cqe++;
1144 	}
1145 	oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1146 
1147 	return;
1148 }
1149 
1150 
1151 void
1152 oce_free_posted_rxbuf(struct oce_rq *rq)
1153 {
1154 	struct oce_packet_desc *pd;
1155 
1156 	while (rq->pending) {
1157 
1158 		pd = &rq->pckts[rq->packets_out];
1159 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1160 		bus_dmamap_unload(rq->tag, pd->map);
1161 		if (pd->mbuf != NULL) {
1162 			m_freem(pd->mbuf);
1163 			pd->mbuf = NULL;
1164 		}
1165 
1166 		if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
1167 			rq->packets_out = 0;
1168 		else
1169 			rq->packets_out++;
1170 
1171                 rq->pending--;
1172 	}
1173 
1174 }
1175 
1176 void
1177 oce_stop_rx(POCE_SOFTC sc)
1178 {
1179 	struct oce_mbx mbx;
1180 	struct mbx_delete_nic_rq *fwcmd;
1181 	struct oce_rq *rq;
1182 	int i = 0;
1183 
1184 	for_all_rq_queues(sc, rq, i) {
1185 		if (rq->qstate == QCREATED) {
1186 			/* Delete rxq in firmware */
1187 
1188 			bzero(&mbx, sizeof(mbx));
1189 			fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1190 			fwcmd->params.req.rq_id = rq->rq_id;
1191 
1192 			(void)oce_destroy_q(sc, &mbx,
1193 				sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
1194 
1195 			rq->qstate = QDELETED;
1196 
1197 			DELAY(1);
1198 
1199 			/* Free posted RX buffers that are not used */
1200 			oce_free_posted_rxbuf(rq);
1201 
1202 		}
1203 	}
1204 }
1205 
1206 
1207 
1208 int
1209 oce_start_rx(POCE_SOFTC sc)
1210 {
1211 	struct oce_rq *rq;
1212 	int rc = 0, i;
1213 
1214 	for_all_rq_queues(sc, rq, i) {
1215 		if (rq->qstate == QCREATED)
1216 			continue;
1217 		rc = oce_mbox_create_rq(rq);
1218 		if (rc)
1219 			goto error;
1220 		/* reset queue pointers */
1221 		rq->qstate 	 = QCREATED;
1222 		rq->pending	 = 0;
1223 		rq->ring->cidx	 = 0;
1224 		rq->ring->pidx	 = 0;
1225 		rq->packets_in	 = 0;
1226 		rq->packets_out	 = 0;
1227 	}
1228 
1229 	DELAY(1);
1230 
1231 	/* RSS config */
1232 	if (is_rss_enabled(sc)) {
1233 		rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1234 		if (rc)
1235 			goto error;
1236 
1237 	}
1238 
1239 	return rc;
1240 error:
1241 	device_printf(sc->dev, "Start RX failed\n");
1242 	return rc;
1243 
1244 }
1245