1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Source file containing the implementation of the Hardware specific
29  * functions
30  */
31 
32 #include <oce_impl.h>
33 #include <oce_stat.h>
34 #include <oce_ioctl.h>
35 
36 static ddi_device_acc_attr_t reg_accattr = {
37 	DDI_DEVICE_ATTR_V0,
38 	DDI_STRUCTURE_LE_ACC,
39 	DDI_STRICTORDER_ACC,
40 	DDI_FLAGERR_ACC
41 };
42 
43 extern int oce_destroy_q(struct oce_dev *dev, struct oce_mbx *mbx,
44     size_t req_size, enum qtype qtype);
45 
46 /*
47  * function to map the device memory
48  *
49  * dev - handle to device private data structure
50  *
51  */
52 int
53 oce_pci_init(struct oce_dev *dev)
54 {
55 	int ret = 0;
56 	off_t bar_size = 0;
57 
58 	ASSERT(NULL != dev);
59 	ASSERT(NULL != dev->dip);
60 
61 	/* get number of supported bars */
62 	ret = ddi_dev_nregs(dev->dip, &dev->num_bars);
63 	if (ret != DDI_SUCCESS) {
64 		oce_log(dev, CE_WARN, MOD_CONFIG,
65 		    "%d: could not retrieve num_bars", MOD_CONFIG);
66 		return (DDI_FAILURE);
67 	}
68 
69 	/* verify each bar and map it accordingly */
70 	/* PCI CFG */
71 	ret = ddi_dev_regsize(dev->dip, OCE_DEV_CFG_BAR, &bar_size);
72 	if (ret != DDI_SUCCESS) {
73 		oce_log(dev, CE_WARN, MOD_CONFIG,
74 		    "Could not get sizeof BAR %d",
75 		    OCE_DEV_CFG_BAR);
76 		return (DDI_FAILURE);
77 	}
78 
79 	ret = ddi_regs_map_setup(dev->dip, OCE_DEV_CFG_BAR, &dev->dev_cfg_addr,
80 	    0, bar_size, &reg_accattr, &dev->dev_cfg_handle);
81 
82 	if (ret != DDI_SUCCESS) {
83 		oce_log(dev, CE_WARN, MOD_CONFIG,
84 		    "Could not map bar %d",
85 		    OCE_DEV_CFG_BAR);
86 		return (DDI_FAILURE);
87 	}
88 
89 	/* CSR */
90 	ret = ddi_dev_regsize(dev->dip, OCE_PCI_CSR_BAR, &bar_size);
91 
92 	if (ret != DDI_SUCCESS) {
93 		oce_log(dev, CE_WARN, MOD_CONFIG,
94 		    "Could not get sizeof BAR %d",
95 		    OCE_PCI_CSR_BAR);
96 		return (DDI_FAILURE);
97 	}
98 
99 	ret = ddi_regs_map_setup(dev->dip, OCE_PCI_CSR_BAR, &dev->csr_addr,
100 	    0, bar_size, &reg_accattr, &dev->csr_handle);
101 	if (ret != DDI_SUCCESS) {
102 		oce_log(dev, CE_WARN, MOD_CONFIG,
103 		    "Could not map bar %d",
104 		    OCE_PCI_CSR_BAR);
105 		ddi_regs_map_free(&dev->dev_cfg_handle);
106 		return (DDI_FAILURE);
107 	}
108 
109 	/* Doorbells */
110 	ret = ddi_dev_regsize(dev->dip, OCE_PCI_DB_BAR, &bar_size);
111 	if (ret != DDI_SUCCESS) {
112 		oce_log(dev, CE_WARN, MOD_CONFIG,
113 		    "%d Could not get sizeof BAR %d",
114 		    ret, OCE_PCI_DB_BAR);
115 		ddi_regs_map_free(&dev->csr_handle);
116 		ddi_regs_map_free(&dev->dev_cfg_handle);
117 		return (DDI_FAILURE);
118 	}
119 
120 	ret = ddi_regs_map_setup(dev->dip, OCE_PCI_DB_BAR, &dev->db_addr,
121 	    0, 0, &reg_accattr, &dev->db_handle);
122 	if (ret != DDI_SUCCESS) {
123 		oce_log(dev, CE_WARN, MOD_CONFIG,
124 		    "Could not map bar %d", OCE_PCI_DB_BAR);
125 		ddi_regs_map_free(&dev->csr_handle);
126 		ddi_regs_map_free(&dev->dev_cfg_handle);
127 		return (DDI_FAILURE);
128 	}
129 
130 	dev->fn =  OCE_PCI_FUNC(dev);
131 	ret = oce_fm_check_acc_handle(dev, dev->dev_cfg_handle);
132 
133 	if (ret != DDI_FM_OK) {
134 		oce_pci_fini(dev);
135 		return (DDI_FAILURE);
136 	}
137 
138 	return (DDI_SUCCESS);
139 } /* oce_pci_init */
140 
141 /*
142  * function to free device memory mapping mapped using
143  * oce_pci_init
144  *
145  * dev - handle to device private data
146  */
147 void
148 oce_pci_fini(struct oce_dev *dev)
149 {
150 	ASSERT(NULL != dev);
151 	ASSERT(NULL != dev->dip);
152 
153 	ddi_regs_map_free(&dev->db_handle);
154 	ddi_regs_map_free(&dev->csr_handle);
155 	ddi_regs_map_free(&dev->dev_cfg_handle);
156 } /* oce_pci_fini */
157 
158 /*
159  * function to initailise the hardware. This includes creation of queues,
160  * interfaces and associated buffers for data movement
161  *
162  * dev - software handle to the device
163  *
164  */
165 int
166 oce_hw_init(struct oce_dev *dev)
167 {
168 	int ret = DDI_SUCCESS;
169 
170 	/* create an interface for the device with out mac */
171 	ret = oce_if_create(dev, OCE_DEFAULT_IF_CAP, OCE_DEFAULT_IF_CAP_EN,
172 	    0, &dev->mac_addr[0], (uint32_t *)&dev->if_id);
173 	if (ret != 0) {
174 		oce_log(dev, CE_WARN, MOD_CONFIG,
175 		    "Interface creation failed: 0x%x", ret);
176 		dev->if_id = OCE_INVAL_IF_ID;
177 		goto init_fail;
178 	}
179 
180 	dev->if_cap_flags = OCE_DEFAULT_IF_CAP_EN;
181 
182 	/* Enable VLAN Promisc on HW */
183 	ret = oce_config_vlan(dev, (uint8_t)dev->if_id, NULL, 0,
184 	    B_TRUE, B_TRUE);
185 	if (ret != 0) {
186 		oce_log(dev, CE_WARN, MOD_CONFIG,
187 		    "Config vlan failed: %d", ret);
188 		goto init_fail;
189 
190 	}
191 
192 	/* set default flow control */
193 	ret = oce_set_flow_control(dev, dev->flow_control);
194 	if (ret != 0) {
195 		oce_log(dev, CE_NOTE, MOD_CONFIG,
196 		    "Set flow control failed: %d", ret);
197 	}
198 
199 	/* set to promiscuous mode */
200 	ret = oce_set_promiscuous(dev, dev->promisc);
201 
202 	if (ret != 0) {
203 		oce_log(dev, CE_NOTE, MOD_CONFIG,
204 		    "Set Promisc failed: %d", ret);
205 	}
206 	/* this could happen if the  driver is resuming after suspend */
207 	if (dev->num_mca > 0) {
208 		ret = oce_set_multicast_table(dev, dev->multi_cast,
209 		    dev->num_mca, B_FALSE);
210 		if (ret != 0) {
211 			oce_log(dev, CE_NOTE, MOD_CONFIG,
212 			    "Set Multicast failed: %d", ret);
213 		}
214 	}
215 
216 	/* we are done. Now return */
217 	return (DDI_SUCCESS);
218 
219 init_fail:
220 	oce_hw_fini(dev);
221 	return (DDI_FAILURE);
222 } /* oce_hw_init */
223 
224 /*
225  * function to return resources allocated in oce_hw_init
226  *
227  * dev - software handle to the device
228  *
229  */
230 void
231 oce_hw_fini(struct oce_dev *dev)
232 {
233 	int i;
234 
235 	/* release OS resources */
236 	if (dev->mq != NULL) {
237 		(void) oce_mq_del(dev, dev->mq);
238 		dev->mq = NULL;
239 	}
240 
241 	if (dev->wq[0] != NULL) {
242 		(void) oce_wq_del(dev, dev->wq[0]);
243 		dev->wq[0] = NULL;
244 	}
245 	for (i = 0; i < dev->num_vectors; i++) {
246 		if (dev->eq[i] != NULL) {
247 			if (oce_eq_del(dev, dev->eq[i])) {
248 				oce_log(dev, CE_WARN, MOD_CONFIG,
249 				    "eq[%d] del failed", i);
250 			}
251 			dev->eq[i] = NULL;
252 		}
253 	}
254 	if (dev->if_id >= 0) {
255 		(void) oce_if_del(dev, dev->if_id);
256 	}
257 
258 	if (dev->rq[0] != NULL) {
259 		(void) oce_rq_del(dev, dev->rq[0]);
260 		dev->rq[0] = NULL;
261 	}
262 } /* oce_hw_fini */
263 
264 int
265 oce_chip_hw_init(struct oce_dev *dev)
266 {
267 	struct oce_wq *wq;
268 	struct oce_rq *rq;
269 	struct oce_eq *eq;
270 	struct oce_mq *mq;
271 	int i = 0;
272 
273 	/*
274 	 * create Event Queues. One event queue per available vector. In
275 	 * case of INTX, only one vector is available and will handle
276 	 * event notification for Write Queue (WQ), Receive Queue (RQ) and
277 	 * Mbox Queue (MQ).
278 	 *
279 	 * The EQ is not directly used by the WQ, RQ and MQ. The WQ, RQ and
280 	 * MQ is composed of a Completion Queue (CQ) that is created per
281 	 * queue and is dependent on the queue type. The EQ passed is
282 	 * associated with the CQ at the time of creation.
283 	 *
284 	 * In the case of MSIX, there will be one EQ for the RQ and one EQ
285 	 * shared between the WQ and MQ.
286 	 */
287 	for (i = 0; i < dev->num_vectors; i++) {
288 		eq = oce_eq_create(dev, EQ_LEN_1024, EQE_SIZE_4, 0);
289 		if (eq == NULL) {
290 			oce_log(dev, CE_WARN, MOD_CONFIG,
291 			    "EQ creation(%d) failed ", i);
292 			goto chip_fail;
293 		}
294 		/* Save the eq pointer */
295 		dev->eq[eq->eq_id % OCE_MAX_EQ] = eq;
296 	}
297 
298 	/*
299 	 * create the Write Queue (WQ). The WQ is the low level sructure for
300 	 * queueing send packets. It maintains a ring buffer to queue packets
301 	 * to be sent out on the wire and return the context to the host
302 	 * when there is a send complete event.
303 	 *
304 	 * The WQ uses a Completion Queue (CQ) with an associated EQ for
305 	 * handling send completion events.
306 	 */
307 	wq = oce_wq_create(dev, dev->eq[0],
308 	    dev->tx_ring_size, NIC_WQ_TYPE_STANDARD);
309 	if (wq ==  NULL) {
310 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
311 		    "WQ creation failed ");
312 		goto chip_fail;
313 	}
314 	/* store the WQ pointer */
315 	dev->wq[0] = wq;
316 
317 	/*
318 	 * create the Receive Queue (RQ). The RQ is the low level structure
319 	 * for receiving data from the wire, It implements a ring buffer
320 	 * that allows the adpater to DMA data onto host buffers.
321 	 *
322 	 * The RQ uses a Completion Queue (CQ) with an associated EQ for
323 	 * handling recieve events when packets are received by the adapter
324 	 */
325 	rq = oce_rq_create(dev,
326 	    ((dev->num_vectors > 1) ? dev->eq[1] : dev->eq[0]),
327 	    dev->rx_ring_size,
328 	    OCE_RQ_BUF_SIZE, OCE_RQ_MAX_FRAME_SZ,
329 	    dev->if_id, B_FALSE);
330 	if (rq == NULL) {
331 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
332 		    "RQ creation failed ");
333 		goto chip_fail;
334 	}
335 	dev->rq[0] = rq;
336 
337 	/*
338 	 * create the Mailbox Queue (MQ). Only one per adapter instance can
339 	 * be created. The MQ is used for receiving asynchronous adapter
340 	 * events, like link status updates.
341 	 *
342 	 * The MQ uses an Asynchronous CQ (ACQ) with an associated EQ for
343 	 * handling asynchronous event notification to the host.
344 	 */
345 	mq = oce_mq_create(dev, dev->eq[0], 64);
346 	if (mq == NULL) {
347 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
348 		    "MQ creation failed ");
349 		goto chip_fail;
350 	}
351 	dev->mq = mq;
352 
353 	return (DDI_SUCCESS);
354 chip_fail:
355 	oce_chip_hw_fini(dev);
356 	return (DDI_FAILURE);
357 } /* oce_chip_hw_init */
358 
359 
360 void
361 oce_chip_hw_fini(struct oce_dev *dev)
362 {
363 	struct oce_mbx mbx;
364 	struct mbx_destroy_common_mq *mq_cmd;
365 	struct mbx_delete_nic_rq *rq_cmd;
366 	struct mbx_delete_nic_wq *wq_cmd;
367 	struct mbx_destroy_common_cq *cq_cmd;
368 	struct oce_mq *mq = dev->mq;
369 	struct oce_rq *rq = dev->rq[0];
370 	struct oce_wq *wq = dev->wq[0];
371 	struct oce_eq *eq = NULL;
372 	struct mbx_destroy_common_eq *eq_cmd;
373 	int i;
374 
375 	if (mq != NULL) {
376 
377 		/* send a command to delete the MQ */
378 		bzero(&mbx, sizeof (struct oce_mbx));
379 		mq_cmd = (struct mbx_destroy_common_mq *)&mbx.payload;
380 
381 		mq_cmd->params.req.id = mq->mq_id;
382 		(void) oce_destroy_q(dev, &mbx,
383 		    sizeof (struct mbx_destroy_common_cq), QTYPE_MQ);
384 
385 		/* send a command to delete the MQ_CQ */
386 		bzero(&mbx, sizeof (struct oce_mbx));
387 		cq_cmd = (struct mbx_destroy_common_cq *)&mbx.payload;
388 
389 		cq_cmd->params.req.id = mq->cq->cq_id;
390 		(void) oce_destroy_q(dev, &mbx,
391 		    sizeof (struct mbx_destroy_common_cq), QTYPE_CQ);
392 		mq->ring->pidx = mq->ring->cidx = 0;
393 	}
394 
395 	if (rq != NULL) {
396 		/* send a command to delete the RQ */
397 		bzero(&mbx, sizeof (struct oce_mbx));
398 
399 		rq_cmd = (struct mbx_delete_nic_rq *)&mbx.payload;
400 		rq_cmd->params.req.rq_id = rq->rq_id;
401 
402 		(void) oce_destroy_q(dev, &mbx,
403 		    sizeof (struct mbx_delete_nic_rq), QTYPE_RQ);
404 
405 		rq->ring->cidx = rq->ring->pidx = 0;
406 
407 		/* send a command to delete the RQ_CQ */
408 		bzero(&mbx, sizeof (struct oce_mbx));
409 		cq_cmd = (struct mbx_destroy_common_cq *)&mbx.payload;
410 
411 		cq_cmd->params.req.id = rq->cq->cq_id;
412 		(void) oce_destroy_q(dev, &mbx,
413 		    sizeof (struct mbx_destroy_common_cq), QTYPE_CQ);
414 		rq->cq->ring->pidx = rq->cq->ring->cidx = 0;
415 	}
416 
417 	if (wq != NULL) {
418 		/* send a command to delete the WQ */
419 		bzero(&mbx, sizeof (struct oce_mbx));
420 
421 		/* now fill the command */
422 		wq_cmd = (struct mbx_delete_nic_wq *)&mbx.payload;
423 		wq_cmd->params.req.wq_id = wq->wq_id;
424 		(void) oce_destroy_q(dev, &mbx,
425 		    sizeof (struct mbx_delete_nic_wq), QTYPE_WQ);
426 
427 		wq->ring->pidx = wq->ring->cidx = 0;
428 
429 		/* send a command to delete the WQ_CQ */
430 		bzero(&mbx, sizeof (struct oce_mbx));
431 		cq_cmd = (struct mbx_destroy_common_cq *)&mbx.payload;
432 		cq_cmd->params.req.id = wq->cq->cq_id;
433 		(void) oce_destroy_q(dev, &mbx,
434 		    sizeof (struct mbx_destroy_common_cq), QTYPE_CQ);
435 		wq->cq->ring->pidx = wq->cq->ring->cidx = 0;
436 	}
437 
438 	for (i = 0; i < dev->num_vectors; i++) {
439 		eq = dev->eq[i];
440 		if (eq != NULL) {
441 			bzero(&mbx, sizeof (struct oce_mbx));
442 			/* send a command to delete the EQ */
443 			eq_cmd = (struct mbx_destroy_common_eq *)&mbx.payload;
444 
445 			eq_cmd->params.req.id = eq->eq_id;
446 
447 			(void) oce_destroy_q(dev, &mbx,
448 			    sizeof (struct mbx_destroy_common_eq), QTYPE_EQ);
449 			eq->ring->pidx = eq->ring->cidx = 0;
450 		}
451 	}
452 }
453 
454 /*
455  * function to check if a reset is required
456  *
457  * dev - software handle to the device
458  *
459  */
460 boolean_t
461 oce_is_reset_pci(struct oce_dev *dev)
462 {
463 	mpu_ep_semaphore_t post_status;
464 
465 	ASSERT(dev != NULL);
466 	ASSERT(dev->dip != NULL);
467 
468 	post_status.dw0 = 0;
469 	post_status.dw0 = OCE_CSR_READ32(dev, MPU_EP_SEMAPHORE);
470 
471 	if (post_status.bits.stage == POST_STAGE_ARMFW_READY) {
472 		return (B_FALSE);
473 	} else if ((post_status.bits.stage <= POST_STAGE_AWAITING_HOST_RDY) ||
474 	    post_status.bits.stage == POST_STAGE_ARMFW_UE) {
475 		return (B_TRUE);
476 	}
477 
478 	return (B_TRUE);
479 } /* oce_is_reset_pci */
480 
481 /*
482  * function to do a soft reset on the device
483  *
484  * dev - software handle to the device
485  *
486  */
487 int
488 oce_pci_soft_reset(struct oce_dev *dev)
489 {
490 	pcicfg_soft_reset_t soft_rst;
491 	/* struct mpu_ep_control ep_control; */
492 	/* struct pcicfg_online1 online1; */
493 	clock_t tmo;
494 	clock_t earlier = ddi_get_lbolt();
495 
496 	ASSERT(dev != NULL);
497 
498 	/* issue soft reset */
499 	soft_rst.dw0 = OCE_CFG_READ32(dev, PCICFG_SOFT_RESET);
500 	soft_rst.bits.soft_reset = 0x01;
501 	OCE_CFG_WRITE32(dev, PCICFG_SOFT_RESET, soft_rst.dw0);
502 
503 	/* wait till soft reset bit deasserts */
504 	tmo = drv_usectohz(60000000); /* 1.0min */
505 	do {
506 		if ((ddi_get_lbolt() - earlier) > tmo) {
507 			tmo = 0;
508 			break;
509 		}
510 
511 		soft_rst.dw0 = OCE_CFG_READ32(dev, PCICFG_SOFT_RESET);
512 		if (soft_rst.bits.soft_reset)
513 			drv_usecwait(100);
514 	} while (soft_rst.bits.soft_reset);
515 
516 	if (soft_rst.bits.soft_reset) {
517 		oce_log(dev, CE_WARN, MOD_CONFIG,
518 		    "0x%x soft_reset"
519 		    "bit asserted[1]. Reset failed",
520 		    soft_rst.dw0);
521 		return (DDI_FAILURE);
522 	}
523 
524 	return (oce_POST(dev));
525 } /* oce_pci_soft_reset */
526 /*
527  * function to trigger a POST on the device
528  *
529  * dev - software handle to the device
530  *
531  */
532 int
533 oce_POST(struct oce_dev *dev)
534 {
535 	mpu_ep_semaphore_t post_status;
536 	clock_t tmo;
537 	clock_t earlier = ddi_get_lbolt();
538 
539 	/* read semaphore CSR */
540 	post_status.dw0 = OCE_CSR_READ32(dev, MPU_EP_SEMAPHORE);
541 
542 	/* if host is ready then wait for fw ready else send POST */
543 	if (post_status.bits.stage <= POST_STAGE_AWAITING_HOST_RDY) {
544 		post_status.bits.stage = POST_STAGE_CHIP_RESET;
545 		OCE_CSR_WRITE32(dev, MPU_EP_SEMAPHORE, post_status.dw0);
546 	}
547 
548 	/* wait for FW ready */
549 	tmo = drv_usectohz(60000000); /* 1.0min */
550 	for (;;) {
551 		if ((ddi_get_lbolt() - earlier) > tmo) {
552 			tmo = 0;
553 			break;
554 		}
555 
556 		post_status.dw0 = OCE_CSR_READ32(dev, MPU_EP_SEMAPHORE);
557 		if (post_status.bits.error) break;
558 		if (post_status.bits.stage == POST_STAGE_ARMFW_READY)
559 			break;
560 
561 		drv_usecwait(100);
562 	}
563 
564 	if (post_status.bits.error) {
565 		oce_log(dev, CE_WARN, MOD_CONFIG,
566 		    "0x%x POST ERROR!!", post_status.dw0);
567 		return (DDI_FAILURE);
568 	} else if (post_status.bits.stage == POST_STAGE_ARMFW_READY) {
569 		oce_log(dev, CE_WARN, MOD_CONFIG,
570 		    "0x%x POST SUCCESSFUL",
571 		    post_status.dw0);
572 		return (DDI_SUCCESS);
573 	} else {
574 		oce_log(dev, CE_WARN, MOD_CONFIG,
575 		    "0x%x POST timedout", post_status.dw0);
576 		return (DDI_FAILURE);
577 	}
578 } /* oce_POST */
579 /*
580  * function to modify register access attributes corresponding to the
581  * FM capabilities configured by the user
582  *
583  * fm_caps - fm capability configured by the user and accepted by the driver
584  */
585 void
586 oce_set_reg_fma_flags(int fm_caps)
587 {
588 	if (fm_caps == DDI_FM_NOT_CAPABLE) {
589 		return;
590 	}
591 	if (DDI_FM_ACC_ERR_CAP(fm_caps)) {
592 		reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC;
593 	} else {
594 		reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
595 	}
596 } /* oce_set_fma_flags */
597