1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Source file containing Queue handling functions
29  *
30  */
31 
32 #include <oce_impl.h>
33 
34 int oce_destroy_q(struct oce_dev  *oce, struct oce_mbx  *mbx, size_t req_size,
35     enum qtype  qtype);
36 struct oce_cq *oce_cq_create(struct oce_dev *dev, struct oce_eq *eq,
37     uint32_t q_len, uint32_t entry_size, boolean_t sol_event,
38     boolean_t is_eventable, boolean_t nodelay, uint32_t ncoalesce);
39 
40 int oce_cq_del(struct oce_dev *dev, struct oce_cq *cq);
41 
42 /*
43  * function to create an event queue
44  *
45  * dev - software handle to the device
46  * eqcfg - pointer to a config structure containg the eq parameters
47  *
48  * return pointer to EQ; NULL on failure
49  */
50 struct oce_eq *
51 oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
52     uint32_t eq_delay)
53 {
54 	struct oce_eq *eq;
55 	struct oce_mbx mbx;
56 	struct mbx_create_common_eq *fwcmd;
57 	int ret = 0;
58 
59 	bzero(&mbx, sizeof (struct oce_mbx));
60 
61 	/* allocate mbx */
62 	fwcmd = (struct mbx_create_common_eq *)&mbx.payload;
63 
64 	/* allocate an eq */
65 	eq = kmem_zalloc(sizeof (struct oce_eq), KM_NOSLEEP);
66 	if (eq == NULL) {
67 		oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
68 		    "EQ allocation failed");
69 		return (NULL);
70 	}
71 
72 	eq->ring = create_ring_buffer(dev, q_len,
73 	    item_size, DDI_DMA_CONSISTENT);
74 	if (eq->ring == NULL) {
75 		oce_log(dev, CE_WARN, MOD_CONFIG,
76 		    "EQ	ring alloc failed:0x%p",
77 		    (void *)eq->ring);
78 		kmem_free(eq, sizeof (struct oce_eq));
79 		return (NULL);
80 	}
81 
82 	/* now send the mbx using the MQ mailbox */
83 	bzero(fwcmd, sizeof (struct mbx_create_common_eq));
84 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
85 	    MBX_SUBSYSTEM_COMMON,
86 	    OPCODE_CREATE_COMMON_EQ, MBX_TIMEOUT_SEC,
87 	    sizeof (struct mbx_create_common_eq));
88 
89 	fwcmd->params.req.num_pages = eq->ring->dbuf->num_pages;
90 	oce_page_list(eq->ring->dbuf, &fwcmd->params.req.pages[0],
91 	    eq->ring->dbuf->num_pages);
92 
93 	/* dw 0 */
94 	fwcmd->params.req.eq_ctx.size = (item_size == 4) ? 0 : 1;
95 	fwcmd->params.req.eq_ctx.valid = 1;
96 	/* dw 1 */
97 	fwcmd->params.req.eq_ctx.armed = 0;
98 	fwcmd->params.req.eq_ctx.pd = 0;
99 	fwcmd->params.req.eq_ctx.count = OCE_LOG2(q_len/256);
100 
101 	/* dw 2 */
102 	fwcmd->params.req.eq_ctx.function = dev->fn;
103 	fwcmd->params.req.eq_ctx.nodelay  = 0;
104 	fwcmd->params.req.eq_ctx.phase = 0;
105 	/* todo: calculate multiplier from max min and cur */
106 	fwcmd->params.req.eq_ctx.delay_mult = eq_delay;
107 
108 	/* fill rest of mbx */
109 	mbx.u0.s.embedded = 1;
110 	mbx.payload_length = sizeof (struct mbx_create_common_eq);
111 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
112 
113 	/* now post the command */
114 	ret = oce_mbox_post(dev, &mbx, NULL);
115 
116 	if (ret != 0) {
117 		oce_log(dev, CE_WARN, MOD_CONFIG,
118 		    "EQ create failed: %d", ret);
119 		destroy_ring_buffer(dev, eq->ring);
120 		kmem_free(eq, sizeof (struct oce_eq));
121 		return (NULL);
122 	}
123 
124 	/* interpret the response */
125 	eq->eq_id = LE_16(fwcmd->params.rsp.eq_id);
126 	eq->eq_cfg.q_len = q_len;
127 	eq->eq_cfg.item_size = item_size;
128 	eq->eq_cfg.cur_eqd = (uint8_t)eq_delay;
129 	eq->parent = (void *)dev;
130 	atomic_inc_32(&dev->neqs);
131 	mutex_init(&eq->lock, NULL, MUTEX_DRIVER,
132 	    DDI_INTR_PRI(dev->intr_pri));
133 	oce_log(dev, CE_NOTE, MOD_CONFIG,
134 	    "EQ created, eq=0x%p eq_id=0x%x", (void *)eq, eq->eq_id);
135 	return (eq);
136 } /* oce_eq_create */
137 
138 /*
139  * function to delete an event queue
140  *
141  * dev - software handle to the device
142  * eq - handle to the eq to be deleted
143  *
144  * return 0=>success, failure otherwise
145  */
146 int
147 oce_eq_del(struct oce_dev *dev, struct oce_eq *eq)
148 {
149 	/* destroy the ring */
150 	destroy_ring_buffer(dev, eq->ring);
151 	eq->ring = NULL;
152 
153 	mutex_destroy(&eq->lock);
154 	/* release the eq */
155 	kmem_free(eq, sizeof (struct oce_eq));
156 	atomic_dec_32(&dev->neqs);
157 
158 	return (DDI_SUCCESS);
159 } /* oce_eq_del */
160 
161 /*
162  * function to create a completion queue
163  *
164  * dev - software handle to the device
165  * eq - optional eq to be associated with to the cq
166  * cqcfg - configuration for this queue
167  *
168  * return pointer to the cq created. NULL on failure
169  */
170 struct oce_cq *
171 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
172     uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
173     boolean_t nodelay, uint32_t ncoalesce)
174 {
175 	struct oce_cq *cq = NULL;
176 	struct oce_mbx mbx;
177 	struct mbx_create_common_cq *fwcmd;
178 	int ret = 0;
179 
180 	bzero(&mbx, sizeof (struct oce_mbx));
181 	/* create cq */
182 	cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
183 	if (cq == NULL) {
184 		oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
185 		    "CQ allocation failed");
186 		return (NULL);
187 	}
188 
189 	/* create the ring buffer for this queue */
190 	cq->ring = create_ring_buffer(dev, q_len,
191 	    item_size, DDI_DMA_CONSISTENT);
192 	if (cq->ring == NULL) {
193 		oce_log(dev, CE_WARN, MOD_CONFIG,
194 		    "CQ ring alloc failed:0x%p",
195 		    (void *)cq->ring);
196 		kmem_free(cq, sizeof (struct oce_cq));
197 		return (NULL);
198 	}
199 
200 	/* allocate mbx */
201 	fwcmd = (struct mbx_create_common_cq *)&mbx.payload;
202 
203 	/* fill the command header */
204 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
205 	    MBX_SUBSYSTEM_COMMON,
206 	    OPCODE_CREATE_COMMON_CQ, MBX_TIMEOUT_SEC,
207 	    sizeof (struct mbx_create_common_cq));
208 
209 	/* fill command context */
210 	/* dw0 */
211 	fwcmd->params.req.cq_ctx.eventable = is_eventable;
212 	fwcmd->params.req.cq_ctx.sol_event = sol_event;
213 	fwcmd->params.req.cq_ctx.valid = 1;
214 	fwcmd->params.req.cq_ctx.count = OCE_LOG2(q_len/256);
215 	fwcmd->params.req.cq_ctx.nodelay = nodelay;
216 	fwcmd->params.req.cq_ctx.coalesce_wm = ncoalesce;
217 
218 	/* dw1 */
219 	fwcmd->params.req.cq_ctx.armed = 1;
220 	fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
221 	fwcmd->params.req.cq_ctx.pd = 0;
222 
223 	/* dw2 */
224 	fwcmd->params.req.cq_ctx.function = dev->fn;
225 
226 	/* fill the rest of the command */
227 	fwcmd->params.req.num_pages = cq->ring->dbuf->num_pages;
228 	oce_page_list(cq->ring->dbuf, &fwcmd->params.req.pages[0],
229 	    cq->ring->dbuf->num_pages);
230 
231 	/* fill rest of mbx */
232 	mbx.u0.s.embedded = 1;
233 	mbx.payload_length = sizeof (struct mbx_create_common_cq);
234 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
235 
236 	/* now send the mail box */
237 	ret = oce_mbox_post(dev, &mbx, NULL);
238 
239 	if (ret != 0) {
240 		oce_log(dev, CE_WARN, MOD_CONFIG,
241 		    "CQ create failed: 0x%x", ret);
242 		destroy_ring_buffer(dev, cq->ring);
243 		kmem_free(cq, sizeof (struct oce_cq));
244 		return (NULL);
245 	}
246 
247 	cq->parent = dev;
248 	cq->eq = eq; /* eq array index */
249 	cq->cq_cfg.q_len = q_len;
250 	cq->cq_cfg.item_size = item_size;
251 	cq->cq_cfg.sol_eventable = (uint8_t)sol_event;
252 	cq->cq_cfg.nodelay = (uint8_t)nodelay;
253 	/* interpret the response */
254 	cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
255 	dev->cq[cq->cq_id] = cq;
256 	atomic_inc_32(&eq->ref_count);
257 	mutex_init(&cq->lock, NULL, MUTEX_DRIVER,
258 	    DDI_INTR_PRI(dev->intr_pri));
259 
260 	return (cq);
261 } /* oce_cq_create */
262 
263 /*
264  * function to delete a completion queue
265  *
266  * dev - software handle to the device
267  * cq - handle to the CQ to delete
268  *
269  * return 0 => success, failure otherwise
270  */
271 int
272 oce_cq_del(struct oce_dev *dev, struct oce_cq *cq)
273 {
274 	/* Reset the handler */
275 	cq->cq_handler = NULL;
276 	/* destroy the ring */
277 	destroy_ring_buffer(dev, cq->ring);
278 	cq->ring = NULL;
279 
280 	/* decrement eq ref count */
281 	atomic_dec_32(&cq->eq->ref_count);
282 	mutex_destroy(&cq->lock);
283 
284 	/* release the eq */
285 	kmem_free(cq, sizeof (struct oce_cq));
286 
287 	return (0);
288 } /* oce_cq_del */
289 
290 /*
291  * function to create an MQ
292  *
293  * dev - software handle to the device
294  * eq - the EQ to associate with the MQ for event notification
295  * q_len - the number of entries to create in the MQ
296  *
297  * return pointer to the created MQ, failure otherwise
298  */
299 struct oce_mq *
300 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
301 {
302 	struct oce_mbx mbx;
303 	struct mbx_create_common_mq *fwcmd;
304 	struct oce_mq *mq = NULL;
305 	int ret = 0;
306 	struct oce_cq  *cq;
307 
308 	bzero(&mbx, sizeof (struct oce_mbx));
309 
310 	/* allocate mbx */
311 	fwcmd = (struct mbx_create_common_mq *)&mbx.payload;
312 
313 	/* Create the Completion Q */
314 
315 	cq = oce_cq_create(dev, eq, CQ_LEN_256,
316 	    sizeof (struct oce_mq_cqe),
317 	    B_FALSE, B_TRUE, B_TRUE, 0);
318 	if (cq == NULL) {
319 		return (NULL);
320 	}
321 	/* allocate the mq */
322 	mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
323 	if (mq == NULL) {
324 		oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
325 		    "MQ allocation failed");
326 	}
327 
328 	/* create the ring buffer for this queue */
329 	mq->ring = create_ring_buffer(dev, q_len,
330 	    sizeof (struct oce_mbx), DDI_DMA_CONSISTENT);
331 	if (mq->ring == NULL) {
332 		oce_log(dev, CE_WARN, MOD_CONFIG,
333 		    "MQ ring alloc failed:0x%p",
334 		    (void *)mq->ring);
335 		goto mq_ring_alloc;
336 	}
337 
338 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
339 	    MBX_SUBSYSTEM_COMMON,
340 	    OPCODE_CREATE_COMMON_MQ, MBX_TIMEOUT_SEC,
341 	    sizeof (struct mbx_create_common_mq));
342 
343 	fwcmd->params.req.num_pages = mq->ring->dbuf->num_pages;
344 	oce_page_list(mq->ring->dbuf, fwcmd->params.req.pages,
345 	    mq->ring->dbuf->num_pages);
346 	fwcmd->params.req.context.u0.s.cq_id = cq->cq_id;
347 	fwcmd->params.req.context.u0.s.ring_size =
348 	    OCE_LOG2(q_len) + 1;
349 	fwcmd->params.req.context.u0.s.valid = 1;
350 	fwcmd->params.req.context.u0.s.fid = dev->fn;
351 
352 	/* fill rest of mbx */
353 	mbx.u0.s.embedded = 1;
354 	mbx.payload_length = sizeof (struct mbx_create_common_mq);
355 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
356 
357 	/* now send the mail box */
358 	ret = oce_mbox_post(dev, &mbx, NULL);
359 	if (ret != DDI_SUCCESS) {
360 		oce_log(dev, CE_WARN, MOD_CONFIG,
361 		    "MQ create failed: 0x%x", ret);
362 		goto mq_fail;
363 	}
364 
365 	/* interpret the response */
366 	mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
367 	mq->cq = cq;
368 	mq->cfg.q_len = (uint8_t)q_len;
369 	mq->cfg.eqd = 0;
370 
371 	/* fill rest of the mq */
372 	mq->parent = dev;
373 
374 	/* set the MQCQ handlers */
375 	cq->cq_handler = oce_drain_mq_cq;
376 	cq->cb_arg = (void *)mq;
377 	return (mq);
378 
379 mq_fail:
380 	destroy_ring_buffer(dev, mq->ring);
381 mq_ring_alloc:
382 	kmem_free(mq, sizeof (struct oce_mq));
383 	(void) oce_cq_del(dev, cq);
384 	return (NULL);
385 } /* oce_mq_create */
386 
387 /*
388  * function to delete an MQ
389  *
390  * dev - software handle to the device
391  * mq - pointer to the MQ to delete
392  *
393  * return 0 => success, failure otherwise
394  */
395 int
396 oce_mq_del(struct oce_dev *dev, struct oce_mq *mq)
397 {
398 	int ret = 0;
399 
400 	/* destroy the ring */
401 	destroy_ring_buffer(dev, mq->ring);
402 	mq->ring = NULL;
403 
404 	/* destroy the CQ */
405 	ret = oce_cq_del(dev, mq->cq);
406 	if (ret != DDI_SUCCESS) {
407 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
408 		    "MQCQ destroy Failed ");
409 	}
410 
411 	/* release the eq */
412 	kmem_free(mq, sizeof (struct oce_mq));
413 	return (DDI_SUCCESS);
414 } /* oce_mq_del */
415 
416 /*
417  * function to create a WQ for NIC Tx
418  *
419  * dev - software handle to the device
420  * wqcfg - configuration structure providing WQ config parameters
421  *
422  * return pointer to the WQ created. NULL on failure
423  */
424 struct oce_wq *
425 oce_wq_create(struct oce_dev *dev, struct oce_eq *eq,
426     uint32_t q_len, int wq_type)
427 {
428 	struct oce_mbx mbx;
429 	struct mbx_create_nic_wq *fwcmd;
430 	struct oce_wq *wq;
431 	struct oce_cq *cq;
432 	char str[MAX_POOL_NAME];
433 	int ret;
434 
435 	ASSERT(dev != NULL);
436 
437 	bzero(&mbx, sizeof (struct oce_mbx));
438 
439 	/* q_len must be min 256 and max 2k */
440 	if (q_len < 256 || q_len > 2048) {
441 		oce_log(dev, CE_WARN, MOD_CONFIG,
442 		    "Invalid q length. Must be "
443 		    "[256, 2000]: 0x%x", q_len);
444 		return (NULL);
445 	}
446 
447 	/* allocate wq */
448 	wq = kmem_zalloc(sizeof (struct oce_wq), KM_NOSLEEP);
449 	if (wq == NULL) {
450 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
451 		    "WQ allocation failed");
452 	}
453 
454 	/* Set the wq config */
455 	wq->cfg.q_len = q_len;
456 	wq->cfg.wq_type = (uint8_t)wq_type;
457 	wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
458 	wq->cfg.nbufs = 2 * wq->cfg.q_len;
459 	wq->cfg.nhdl = 2 * wq->cfg.q_len;
460 
461 	/* assign parent */
462 	wq->parent = (void *)dev;
463 
464 	/* Create the WQ Buffer pool */
465 	ret  = oce_wqb_cache_create(wq, dev->bcopy_limit);
466 	if (ret != DDI_SUCCESS) {
467 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
468 		    "WQ Buffer Pool create failed ");
469 		goto wqb_fail;
470 	}
471 
472 	/* Create a pool of memory handles */
473 	ret = oce_wqm_cache_create(wq);
474 	if (ret != DDI_SUCCESS) {
475 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
476 		    "WQ MAP Handles Pool create failed ");
477 		goto wqm_fail;
478 	}
479 
480 	(void) snprintf(str, MAX_POOL_NAME, "%s%d", "oce_wqed_", dev->dev_id);
481 	wq->wqed_cache = kmem_cache_create(str, sizeof (oce_wqe_desc_t),
482 	    0, oce_wqe_desc_ctor,
483 	    oce_wqe_desc_dtor, NULL, NULL, NULL, 0);
484 	if (wq->wqed_cache == NULL) {
485 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
486 		    "WQ Packet Desc Pool create failed ");
487 		goto wqed_fail;
488 	}
489 
490 	/* create the CQ */
491 	cq = oce_cq_create(dev, eq, CQ_LEN_1024,
492 	    sizeof (struct oce_nic_tx_cqe),
493 	    B_FALSE, B_TRUE, B_FALSE, 3);
494 	if (cq == NULL) {
495 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
496 		    "WCCQ create failed ");
497 		goto wccq_fail;
498 	}
499 
500 	/* create the ring buffer */
501 	wq->ring = create_ring_buffer(dev, q_len,
502 	    NIC_WQE_SIZE, DDI_DMA_CONSISTENT);
503 	if (wq->ring == NULL) {
504 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
505 		    "Failed to create WQ ring ");
506 		goto wq_ringfail;
507 	}
508 
509 	/* now fill the command */
510 	fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;
511 	bzero(fwcmd, sizeof (struct mbx_create_nic_wq));
512 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
513 	    MBX_SUBSYSTEM_NIC,
514 	    OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
515 	    sizeof (struct mbx_create_nic_wq));
516 
517 	fwcmd->params.req.nic_wq_type = (uint8_t)wq_type;
518 	fwcmd->params.req.num_pages = wq->ring->dbuf->num_pages;
519 	oce_log(dev, CE_NOTE, MOD_CONFIG, "NUM_PAGES = 0x%d size = %lu",
520 	    (uint32_t)wq->ring->dbuf->num_pages,
521 	    wq->ring->dbuf->size);
522 
523 	/* workaround: fill 0x01 for ulp_mask in rsvd0 */
524 	fwcmd->params.req.rsvd0 = 0x01;
525 	fwcmd->params.req.wq_size = OCE_LOG2(q_len) + 1;
526 	fwcmd->params.req.valid = 1;
527 	fwcmd->params.req.pd_id = 0;
528 	fwcmd->params.req.pci_function_id = dev->fn;
529 	fwcmd->params.req.cq_id = cq->cq_id;
530 
531 	oce_page_list(wq->ring->dbuf, fwcmd->params.req.pages,
532 	    wq->ring->dbuf->num_pages);
533 
534 	/* fill rest of mbx */
535 	mbx.u0.s.embedded = 1;
536 	mbx.payload_length = sizeof (struct mbx_create_nic_wq);
537 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
538 
539 	/* now post the command */
540 	ret = oce_mbox_post(dev, &mbx, NULL);
541 	if (ret != DDI_SUCCESS) {
542 		oce_log(dev, CE_WARN, MOD_CONFIG,
543 		    "WQ create failed: %d", ret);
544 		goto wq_fail;
545 
546 	}
547 
548 	/* interpret the response */
549 	wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
550 
551 	/* All are free to start with */
552 	wq->wq_free = q_len;
553 	wq->cq = cq;
554 
555 	/* set the WQCQ handlers */
556 	cq->cq_handler = oce_drain_wq_cq;
557 	cq->cb_arg = (void *)wq;
558 
559 	/* set the default eq delay for the eq associated with this wq */
560 	(void) oce_set_eq_delay(dev, &eq->eq_id, 1, wq->cfg.eqd);
561 
562 	/* Initialize WQ lock */
563 	mutex_init(&wq->lock, NULL, MUTEX_DRIVER,
564 	    DDI_INTR_PRI(dev->intr_pri));
565 	atomic_inc_32(&dev->nwqs);
566 
567 	OCE_LIST_CREATE(&wq->wqe_desc_list, DDI_INTR_PRI(dev->intr_pri));
568 
569 	return (wq);
570 
571 wq_fail:
572 	destroy_ring_buffer(dev, wq->ring);
573 wq_ringfail:
574 	(void) oce_cq_del(dev, cq);
575 wccq_fail:
576 	kmem_cache_destroy(wq->wqed_cache);
577 wqed_fail:
578 	oce_wqm_cache_destroy(wq);
579 wqm_fail:
580 	oce_wqb_cache_destroy(wq);
581 wqb_fail:
582 	kmem_free(wq, sizeof (struct oce_wq));
583 	return (NULL);
584 } /* oce_wq_create */
585 
586 /*
587  * function to delete a WQ
588  *
589  * dev - software handle to the device
590  * wq - WQ to delete
591  *
592  * return 0 => success, failure otherwise
593  */
594 int
595 oce_wq_del(struct oce_dev *dev, struct oce_wq *wq)
596 {
597 	ASSERT(dev != NULL);
598 	ASSERT(wq != NULL);
599 
600 	/* destroy the ring buffer */
601 	destroy_ring_buffer(dev, wq->ring);
602 	wq->ring = NULL;
603 
604 	/* destroy cq */
605 	(void) oce_cq_del(dev, wq->cq);
606 	wq->cq = NULL;
607 
608 	kmem_cache_destroy(wq->wqed_cache);
609 	oce_wqm_cache_destroy(wq);
610 	oce_wqb_cache_destroy(wq);
611 
612 	/* Free the packet descriptor list */
613 	OCE_LIST_DESTROY(&wq->wqe_desc_list);
614 
615 	/* Destroy the Mutex */
616 	mutex_destroy(&wq->lock);
617 	kmem_free(wq, sizeof (struct oce_wq));
618 	atomic_dec_32(&dev->nwqs);
619 	return (DDI_SUCCESS);
620 } /* oce_wq_del */
621 
622 /*
623  * function to create a RQ
624  *
625  * dev - software handle to the device
626  * rqcfg - configuration structure providing RQ config parameters
627  *
628  * return pointer to the RQ created. NULL on failure
629  */
630 struct oce_rq *
631 oce_rq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
632     uint32_t frag_size, uint32_t mtu, int16_t if_id,
633     boolean_t rss)
634 {
635 	struct oce_mbx mbx;
636 	struct mbx_create_nic_rq *fwcmd;
637 	struct oce_rq *rq;
638 	int ret;
639 	struct oce_cq *cq;
640 
641 	bzero(&mbx, sizeof (struct oce_mbx));
642 
643 	/* validate q creation parameters */
644 	if (!OCE_LOG2(frag_size))
645 		return (NULL);
646 	if ((q_len == 0) || (q_len > 1024))
647 		return (NULL);
648 
649 	/* allocate the rq */
650 	rq = kmem_zalloc(sizeof (struct oce_rq), KM_NOSLEEP);
651 	if (rq == NULL) {
652 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
653 		    "RQ allocation failed");
654 		return (NULL);
655 	}
656 
657 	rq->cfg.q_len = q_len;
658 	rq->cfg.frag_size = frag_size;
659 	rq->cfg.if_id = if_id;
660 	rq->cfg.mtu = mtu;
661 	rq->cfg.eqd = 0;
662 	rq->cfg.nbufs = 8 * 1024;
663 
664 	/* assign parent */
665 	rq->parent = (void *)dev;
666 
667 	/* create the cache */
668 	ret  =  oce_rqb_cache_create(rq, OCE_RQ_BUF_SIZE +
669 	    OCE_RQE_BUF_HEADROOM);
670 	if (ret != DDI_SUCCESS) {
671 		goto rqb_fail;
672 	}
673 	cq = oce_cq_create(dev, eq, CQ_LEN_1024, sizeof (struct oce_nic_rx_cqe),
674 	    B_FALSE, B_TRUE, B_FALSE, 3);
675 	if (cq == NULL) {
676 		goto rccq_fail;
677 	}
678 
679 	/* create the ring buffer */
680 	rq->ring = create_ring_buffer(dev, q_len,
681 	    sizeof (struct oce_nic_rqe), DDI_DMA_CONSISTENT);
682 	if (rq->ring == NULL) {
683 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
684 		    "RQ ring create failed ");
685 		goto rq_ringfail;
686 	}
687 
688 	/* allocate mbx */
689 	rq->shadow_ring = kmem_zalloc(sizeof (struct rq_shadow_entry) *
690 	    q_len, KM_SLEEP);
691 
692 	/* now fill the command */
693 	fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
694 
695 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
696 	    MBX_SUBSYSTEM_NIC,
697 	    OPCODE_CREATE_NIC_RQ, MBX_TIMEOUT_SEC,
698 	    sizeof (struct mbx_create_nic_rq));
699 
700 	fwcmd->params.req.num_pages = rq->ring->dbuf->num_pages;
701 
702 	fwcmd->params.req.frag_size = OCE_LOG2(frag_size);
703 	fwcmd->params.req.cq_id = cq->cq_id;
704 
705 	oce_page_list(rq->ring->dbuf, fwcmd->params.req.pages,
706 	    rq->ring->dbuf->num_pages);
707 
708 	fwcmd->params.req.if_id = if_id;
709 
710 	fwcmd->params.req.max_frame_size = (uint16_t)mtu;
711 	fwcmd->params.req.is_rss_queue = rss;
712 
713 	/* fill rest of mbx */
714 	mbx.u0.s.embedded = 1;
715 	mbx.payload_length = sizeof (struct mbx_create_nic_rq);
716 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
717 
718 	/* now post the command */
719 	ret = oce_mbox_post(dev, &mbx, NULL);
720 	if (ret != 0) {
721 		oce_log(dev, CE_WARN, MOD_CONFIG,
722 		    "RQ create failed: %d", ret);
723 		goto rq_fail;
724 	}
725 
726 	/* interpret the response */
727 	rq->rq_id = LE_16(fwcmd->params.rsp.u0.s.rq_id);
728 	rq->rss_cpuid = fwcmd->params.rsp.u0.s.rss_cpuid;
729 	rq->cq = cq;
730 
731 	/* Initialize the RQ lock */
732 	mutex_init(&rq->lock, NULL, MUTEX_DRIVER,
733 	    DDI_INTR_PRI(dev->intr_pri));
734 	atomic_inc_32(&dev->nrqs);
735 
736 	/* set the Completion Handler */
737 	cq->cq_handler = oce_drain_rq_cq;
738 	cq->cb_arg  = (void *)rq;
739 	return (rq);
740 
741 rq_fail:
742 	kmem_free(rq->shadow_ring,
743 	    sizeof (struct rq_shadow_entry) * q_len);
744 	destroy_ring_buffer(dev, rq->ring);
745 rq_ringfail:
746 	(void) oce_cq_del(dev, cq);
747 rccq_fail:
748 	oce_rqb_cache_destroy(rq);
749 rqb_fail:
750 	kmem_free(rq, sizeof (struct oce_rq));
751 	return (NULL);
752 } /* oce_rq_create */
753 
754 /*
755  * function to delete an RQ
756  *
757  * dev - software handle to the device
758  * rq - RQ to delete
759  *
760  * return 0 => success, failure otherwise
761  */
762 int
763 oce_rq_del(struct oce_dev *dev, struct oce_rq *rq)
764 {
765 
766 	ASSERT(dev != NULL);
767 	ASSERT(rq != NULL);
768 
769 	(void) oce_cq_del(dev, rq->cq);
770 	rq->cq = NULL;
771 
772 	/* Free any outstanding buffers with hardware */
773 	oce_rq_discharge(rq);
774 
775 	/* Destroy buffer cache */
776 	oce_rqb_cache_destroy(rq);
777 	destroy_ring_buffer(dev, rq->ring);
778 	rq->ring = NULL;
779 
780 	kmem_free(rq->shadow_ring,
781 	    sizeof (struct rq_shadow_entry) * rq->cfg.q_len);
782 	mutex_destroy(&rq->lock);
783 	kmem_free(rq, sizeof (struct oce_rq));
784 	atomic_dec_32(&dev->nrqs);
785 	return (DDI_SUCCESS);
786 } /* oce_rq_del */
787 
788 /*
789  * function to arm an EQ so that it can generate events
790  *
791  * dev - software handle to the device
792  * qid - id of the EQ returned by the fw at the time of creation
793  * npopped - number of EQEs to arm with
794  * rearm - rearm bit
795  * clearint - bit to clear the interrupt condition because of which
796  *	EQEs are generated
797  *
798  * return none
799  */
800 void
801 oce_arm_eq(struct oce_dev *dev, int16_t qid, int npopped,
802     boolean_t rearm, boolean_t clearint)
803 {
804 	eq_db_t eq_db = {0};
805 
806 	eq_db.bits.rearm = rearm;
807 	eq_db.bits.event  = B_TRUE;
808 	eq_db.bits.num_popped = npopped;
809 	eq_db.bits.clrint = clearint;
810 	eq_db.bits.qid = qid;
811 	OCE_DB_WRITE32(dev, PD_EQ_DB, eq_db.dw0);
812 }
813 
814 /*
815  * function to arm a CQ with CQEs
816  *
817  * dev - software handle to the device
818  * qid - the id of the CQ returned by the fw at the time of creation
819  * npopped - number of CQEs to arm with
820  * rearm - rearm bit enable/disable
821  *
822  * return none
823  */
824 void
825 oce_arm_cq(struct oce_dev *dev, int16_t qid, int npopped,
826     boolean_t rearm)
827 {
828 	cq_db_t cq_db = {0};
829 	cq_db.bits.rearm = rearm;
830 	cq_db.bits.num_popped = npopped;
831 	cq_db.bits.event = 0;
832 	cq_db.bits.qid = qid;
833 	OCE_DB_WRITE32(dev, PD_CQ_DB, cq_db.dw0);
834 }
835 
836 
837 /*
838  * function to delete a EQ, CQ, MQ, WQ or RQ
839  *
840  * dev - sofware handle to the device
841  * mbx - mbox command to send to the fw to delete the queue
842  *	mbx contains the queue information to delete
843  * req_size - the size of the mbx payload dependent on the qtype
844  * qtype - the type of queue i.e. EQ, CQ, MQ, WQ or RQ
845  *
846  * return DDI_SUCCESS => success, failure otherwise
847  */
848 int
849 oce_destroy_q(struct oce_dev *dev, struct oce_mbx  *mbx, size_t req_size,
850     enum qtype qtype)
851 {
852 	struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
853 	int opcode;
854 	int subsys;
855 	int ret;
856 
857 	switch (qtype) {
858 	case QTYPE_EQ: {
859 		opcode = OPCODE_DESTROY_COMMON_EQ;
860 		subsys = MBX_SUBSYSTEM_COMMON;
861 		break;
862 	}
863 	case QTYPE_CQ: {
864 		opcode = OPCODE_DESTROY_COMMON_CQ;
865 		subsys = MBX_SUBSYSTEM_COMMON;
866 		break;
867 	}
868 	case QTYPE_MQ: {
869 		opcode = OPCODE_DESTROY_COMMON_MQ;
870 		subsys = MBX_SUBSYSTEM_COMMON;
871 		break;
872 	}
873 	case QTYPE_WQ: {
874 		opcode = OPCODE_DELETE_NIC_WQ;
875 		subsys = MBX_SUBSYSTEM_NIC;
876 		break;
877 	}
878 	case QTYPE_RQ: {
879 		opcode = OPCODE_DELETE_NIC_RQ;
880 		subsys = MBX_SUBSYSTEM_NIC;
881 		break;
882 	}
883 	default: {
884 		ASSERT(0);
885 		break;
886 	}
887 	}
888 
889 	mbx_common_req_hdr_init(hdr, 0, 0, subsys,
890 	    opcode, MBX_TIMEOUT_SEC, req_size);
891 
892 	/* fill rest of mbx */
893 	mbx->u0.s.embedded = 1;
894 	mbx->payload_length = (uint32_t)req_size;
895 	DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
896 
897 	/* send command */
898 	ret = oce_mbox_post(dev, mbx, NULL);
899 
900 	if (ret != 0) {
901 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
902 		    "Failed to del q ");
903 	}
904 	return (ret);
905 }
906 
907 /*
908  * function to set the delay parameter in the EQ for interrupt coalescing
909  *
910  * dev - software handle to the device
911  * eq_arr - array of EQ ids to delete
912  * eq_cnt - number of elements in eq_arr
913  * eq_delay - delay parameter
914  *
915  * return DDI_SUCCESS => success, failure otherwise
916  */
917 int
918 oce_set_eq_delay(struct oce_dev *dev, uint32_t *eq_arr,
919     uint32_t eq_cnt, uint32_t eq_delay)
920 {
921 	struct oce_mbx mbx;
922 	struct mbx_modify_common_eq_delay *fwcmd;
923 	int ret;
924 	int neq;
925 
926 	bzero(&mbx, sizeof (struct oce_mbx));
927 	fwcmd = (struct mbx_modify_common_eq_delay *)&mbx.payload;
928 
929 	/* fill the command */
930 	fwcmd->params.req.num_eq = eq_cnt;
931 	for (neq = 0; neq < eq_cnt; neq++) {
932 		fwcmd->params.req.delay[neq].eq_id = eq_arr[neq];
933 		fwcmd->params.req.delay[neq].phase = 0;
934 		fwcmd->params.req.delay[neq].dm = eq_delay;
935 
936 	}
937 
938 	/* initialize the ioctl header */
939 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
940 	    MBX_SUBSYSTEM_COMMON,
941 	    OPCODE_MODIFY_COMMON_EQ_DELAY,
942 	    MBX_TIMEOUT_SEC,
943 	    sizeof (struct mbx_modify_common_eq_delay));
944 
945 	/* fill rest of mbx */
946 	mbx.u0.s.embedded = 1;
947 	mbx.payload_length = sizeof (struct mbx_modify_common_eq_delay);
948 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
949 
950 	/* post the command */
951 	ret = oce_mbox_post(dev, &mbx, NULL);
952 	if (ret != 0) {
953 		oce_log(dev, CE_WARN, MOD_CONFIG,
954 		    "Failed to set EQ delay %d", ret);
955 	}
956 
957 	return (ret);
958 } /* oce_set_eq_delay */
959 
960 /*
961  * function to cleanup the eqs used during stop
962  *
963  * eq - pointer to event queue structure
964  *
965  * return none
966  */
967 void
968 oce_drain_eq(struct oce_eq *eq)
969 {
970 	struct oce_eqe *eqe;
971 	uint16_t num_eqe = 0;
972 
973 	/* get the first item in eq to process */
974 	eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
975 
976 	while (eqe->u0.dw0) {
977 		eqe->u0.dw0 = LE_32(eqe->u0.dw0);
978 
979 		/* clear valid bit */
980 		eqe->u0.dw0 = 0;
981 
982 		/* process next eqe */
983 		RING_GET(eq->ring, 1);
984 
985 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
986 		num_eqe++;
987 	} /* for all EQEs */
988 } /* oce_drain_eq */
989