xref: /freebsd/sys/dev/ocs_fc/ocs_pci.c (revision 9768746b)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #define OCS_COPYRIGHT "Copyright (C) 2017 Broadcom. All rights reserved."
35 
36 /**
37  * @file
38  * Implementation of required FreeBSD PCI interface functions
39  */
40 
41 #include "ocs.h"
42 #include "version.h"
43 #include <sys/sysctl.h>
44 #include <sys/malloc.h>
45 
46 static MALLOC_DEFINE(M_OCS, "OCS", "OneCore Storage data");
47 
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcivar.h>
50 
51 #include <machine/bus.h>
52 
53 /**
54  * Tunable parameters for transport
55  */
56 int logmask = 0;
57 int ctrlmask = 2;
58 int logdest = 1;
59 int loglevel = LOG_INFO;
60 int ramlog_size = 1*1024*1024;
61 int ddump_saved_size = 0;
62 static const char *queue_topology = "eq cq rq cq mq $nulp($nwq(cq wq:ulp=$rpt1)) cq wq:len=256:class=1";
63 
64 static void ocs_release_bus(struct ocs_softc *);
65 static int32_t ocs_intr_alloc(struct ocs_softc *);
66 static int32_t ocs_intr_setup(struct ocs_softc *);
67 static int32_t ocs_intr_teardown(struct ocs_softc *);
68 static int ocs_pci_intx_filter(void *);
69 static void ocs_pci_intr(void *);
70 static int32_t ocs_init_dma_tag(struct ocs_softc *ocs);
71 
72 static int32_t ocs_setup_fcports(ocs_t *ocs);
73 
74 ocs_t *ocs_devices[MAX_OCS_DEVICES];
75 
76 /**
77  * @brief Check support for the given device
78  *
79  * Determine support for a given device by examining the PCI vendor and
80  * device IDs
81  *
82  * @param dev device abstraction
83  *
84  * @return 0 if device is supported, ENXIO otherwise
85  */
86 static int
87 ocs_pci_probe(device_t dev)
88 {
89 	char	*desc = NULL;
90 
91 	if (pci_get_vendor(dev) != PCI_VENDOR_EMULEX) {
92 		return ENXIO;
93 	}
94 
95 	switch (pci_get_device(dev)) {
96 	case PCI_PRODUCT_EMULEX_OCE16001:
97 		desc = "Emulex LightPulse FC Adapter";
98 		break;
99 	case PCI_PRODUCT_EMULEX_LPE31004:
100 		desc = "Emulex LightPulse FC Adapter";
101 		break;
102 	case PCI_PRODUCT_EMULEX_OCE50102:
103 		desc = "Emulex LightPulse 10GbE FCoE/NIC Adapter";
104 		break;
105 	case PCI_PRODUCT_EMULEX_LANCER_G7:
106 		desc = "Emulex LightPulse G7 FC Adapter";
107 		break;
108 	default:
109 		return ENXIO;
110 	}
111 
112 	device_set_desc(dev, desc);
113 
114 	return BUS_PROBE_DEFAULT;
115 }
116 
117 static int
118 ocs_map_g7_bars(device_t dev, struct ocs_softc *ocs)
119 {
120 	int i, r;
121 	uint32_t  val = 0;
122 
123 	for (i = 0, r = 0; i < PCI_MAX_BAR; i++) {
124 		val = pci_read_config(dev, PCIR_BAR(i), 4);
125 		if (!PCI_BAR_MEM(val)) {
126 			continue;
127                 }
128                 if (!(val & PCIM_BAR_MEM_BASE)) {
129 			/* no address */
130 			continue;
131 		}
132 		ocs->reg[r].rid = PCIR_BAR(i);
133 		ocs->reg[r].res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
134 				&ocs->reg[r].rid, RF_ACTIVE);
135 		if (ocs->reg[r].res) {
136 			ocs->reg[r].btag = rman_get_bustag(ocs->reg[r].res);
137 			ocs->reg[r].bhandle = rman_get_bushandle(ocs->reg[r].res);
138 			r++;
139 		} else {
140 			device_printf(dev, "bus_alloc_resource failed rid=%#x\n",
141 			ocs->reg[r].rid);
142 			ocs_release_bus(ocs);
143 			return ENXIO;
144 		}
145 
146 		/*
147 		 * If the 64-bit attribute is set, both this BAR and the
148 		 * next form the complete address. Skip processing the
149 		 * next BAR.
150 		 */
151 		if (val & PCIM_BAR_MEM_64) {
152 			i++;
153 		}
154 	}
155 
156 	return 0;
157 }
158 
159 static int
160 ocs_map_bars(device_t dev, struct ocs_softc *ocs)
161 {
162 	/*
163 	 * Map PCI BAR0 register into the CPU's space.
164 	 */
165 
166 	ocs->reg[0].rid = PCIR_BAR(PCI_64BIT_BAR0);
167 	ocs->reg[0].res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
168 			&ocs->reg[0].rid, RF_ACTIVE);
169 
170 	if (ocs->reg[0].res == NULL) {
171 		device_printf(dev, "bus_alloc_resource failed rid=%#x\n",
172 				ocs->reg[0].rid);
173 		return ENXIO;
174 	}
175 
176 	ocs->reg[0].btag = rman_get_bustag(ocs->reg[0].res);
177 	ocs->reg[0].bhandle = rman_get_bushandle(ocs->reg[0].res);
178 	return 0;
179 }
180 
181 static int
182 ocs_setup_params(struct ocs_softc *ocs)
183 {
184 	int32_t	i = 0;
185 	const char	*hw_war_version;
186 	/* Setup tunable parameters */
187 	ocs->ctrlmask = ctrlmask;
188 	ocs->speed = 0;
189 	ocs->topology = 0;
190 	ocs->ethernet_license = 0;
191 	ocs->num_scsi_ios = 8192;
192 	ocs->enable_hlm = 0;
193 	ocs->hlm_group_size = 8;
194 	ocs->logmask = logmask;
195 
196 	ocs->config_tgt = FALSE;
197 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
198 					"target", &i)) {
199 		if (1 == i) {
200 			ocs->config_tgt = TRUE;
201 			device_printf(ocs->dev, "Enabling target\n");
202 		}
203 	}
204 
205 	ocs->config_ini = TRUE;
206 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
207 					"initiator", &i)) {
208 		if (0 == i) {
209 			ocs->config_ini = FALSE;
210 			device_printf(ocs->dev, "Disabling initiator\n");
211 		}
212 	}
213 	ocs->enable_ini = ocs->config_ini;
214 
215 	if (!ocs->config_ini && !ocs->config_tgt) {
216 		device_printf(ocs->dev, "Unsupported, both initiator and target mode disabled.\n");
217 		return 1;
218         }
219 
220 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
221 					"logmask", &logmask)) {
222 		device_printf(ocs->dev, "logmask = %#x\n", logmask);
223 	}
224 
225 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
226 					"logdest", &logdest)) {
227 		device_printf(ocs->dev, "logdest = %#x\n", logdest);
228 	}
229 
230 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
231 					"loglevel", &loglevel)) {
232 		device_printf(ocs->dev, "loglevel = %#x\n", loglevel);
233 	}
234 
235 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
236 					"ramlog_size", &ramlog_size)) {
237 		device_printf(ocs->dev, "ramlog_size = %#x\n", ramlog_size);
238 	}
239 
240 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
241 					"ddump_saved_size", &ddump_saved_size)) {
242 		device_printf(ocs->dev, "ddump_saved_size= %#x\n", ddump_saved_size);
243 	}
244 
245 	/* If enabled, initailize a RAM logging buffer */
246 	if (logdest & 2) {
247 		ocs->ramlog = ocs_ramlog_init(ocs, ramlog_size/OCS_RAMLOG_DEFAULT_BUFFERS,
248 			OCS_RAMLOG_DEFAULT_BUFFERS);
249 		/* If NULL was returned, then we'll simply skip using the ramlog but */
250 		/* set logdest to 1 to ensure that we at least get default logging.  */
251 		if (ocs->ramlog == NULL) {
252 			logdest = 1;
253 		}
254 	}
255 
256 	/* initialize a saved ddump */
257 	if (ddump_saved_size) {
258 		if (ocs_textbuf_alloc(ocs, &ocs->ddump_saved, ddump_saved_size)) {
259 			ocs_log_err(ocs, "failed to allocate memory for saved ddump\n");
260 		}
261 	}
262 
263 	if (0 == resource_string_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
264 					"hw_war_version", &hw_war_version)) {
265 		device_printf(ocs->dev, "hw_war_version = %s\n", hw_war_version);
266 		ocs->hw_war_version = strdup(hw_war_version, M_OCS);
267 	}
268 
269 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
270 				    "explicit_buffer_list", &i)) {
271 		ocs->explicit_buffer_list = i;
272 	}
273 
274 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
275 					"ethernet_license", &i)) {
276 		ocs->ethernet_license = i;
277 	}
278 
279 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
280 					"speed", &i)) {
281 		device_printf(ocs->dev, "speed = %d Mbps\n", i);
282 		ocs->speed = i;
283 	}
284 	ocs->desc = device_get_desc(ocs->dev);
285 
286 	ocs_device_lock_init(ocs);
287 	ocs->driver_version = STR_BE_MAJOR "." STR_BE_MINOR "." STR_BE_BUILD "." STR_BE_BRANCH;
288 	ocs->model = ocs_pci_model(ocs->pci_vendor, ocs->pci_device);
289 
290 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
291 				    "enable_hlm", &i)) {
292 		device_printf(ocs->dev, "enable_hlm = %d\n", i);
293 		ocs->enable_hlm = i;
294 		if (ocs->enable_hlm) {
295 			ocs->hlm_group_size = 8;
296 
297 			if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
298 						    "hlm_group_size", &i)) {
299 				ocs->hlm_group_size = i;
300 			}
301 			device_printf(ocs->dev, "hlm_group_size = %d\n", i);
302 		}
303 	}
304 
305 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
306 					"num_scsi_ios", &i)) {
307 		ocs->num_scsi_ios = i;
308 		device_printf(ocs->dev, "num_scsi_ios = %d\n", ocs->num_scsi_ios);
309 	} else {
310 		ocs->num_scsi_ios = 8192;
311 	}
312 
313 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
314 					"topology", &i)) {
315 		ocs->topology = i;
316 		device_printf(ocs->dev, "Setting topology=%#x\n", i);
317 	}
318 
319 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
320 				    "num_vports", &i)) {
321 		if (i >= 0 && i <= 254) {
322 			device_printf(ocs->dev, "num_vports = %d\n", i);
323 			ocs->num_vports = i;
324 		} else {
325 			device_printf(ocs->dev, "num_vports: %d not supported \n", i);
326 		}
327 	}
328 
329 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
330 				    "external_loopback", &i)) {
331 		device_printf(ocs->dev, "external_loopback = %d\n", i);
332 		ocs->external_loopback = i;
333 	}
334 
335 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
336 				    "tgt_rscn_delay", &i)) {
337 		device_printf(ocs->dev, "tgt_rscn_delay = %d\n", i);
338 		ocs->tgt_rscn_delay_msec = i * 1000;
339 	}
340 
341 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
342 				    "tgt_rscn_period", &i)) {
343 		device_printf(ocs->dev, "tgt_rscn_period = %d\n", i);
344 		ocs->tgt_rscn_period_msec = i * 1000;
345 	}
346 
347 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
348 				    "target_io_timer", &i)) {
349 		device_printf(ocs->dev, "target_io_timer = %d\n", i);
350 		ocs->target_io_timer_sec = i;
351 	}
352 
353 	hw_global.queue_topology_string = queue_topology;
354 	ocs->rq_selection_policy = 0;
355 	ocs->rr_quanta = 1;
356 	ocs->filter_def = "0,0,0,0";
357 
358 	return 0;
359 }
360 
361 static int32_t
362 ocs_setup_fcports(ocs_t *ocs)
363 {
364 	uint32_t        i = 0, role = 0;
365 	uint64_t sli_wwpn, sli_wwnn;
366 	size_t size;
367 	ocs_xport_t *xport = ocs->xport;
368 	ocs_vport_spec_t *vport;
369 	ocs_fcport *fcp = NULL;
370 
371 	size = sizeof(ocs_fcport) * (ocs->num_vports + 1);
372 
373 	ocs->fcports = ocs_malloc(ocs, size, M_ZERO|M_NOWAIT);
374 	if (ocs->fcports == NULL) {
375 		device_printf(ocs->dev, "Can't allocate fcport \n");
376 		return 1;
377 	}
378 
379 	role = (ocs->enable_ini)? KNOB_ROLE_INITIATOR: 0 |
380 		(ocs->enable_tgt)? KNOB_ROLE_TARGET: 0;
381 
382 	fcp = FCPORT(ocs, i);
383 	fcp->role = role;
384 	i++;
385 
386 	ocs_list_foreach(&xport->vport_list, vport) {
387 		fcp = FCPORT(ocs, i);
388 		vport->tgt_data = fcp;
389 		fcp->vport = vport;
390 		fcp->role = role;
391 
392 		if (ocs_hw_get_def_wwn(ocs, i, &sli_wwpn, &sli_wwnn)) {
393 			ocs_log_err(ocs, "Get default wwn failed \n");
394 			i++;
395 			continue;
396 		}
397 
398 		vport->wwpn = ocs_be64toh(sli_wwpn);
399 		vport->wwnn = ocs_be64toh(sli_wwnn);
400 		i++;
401 		ocs_log_debug(ocs, "VPort wwpn: %lx wwnn: %lx \n", vport->wwpn, vport->wwnn);
402 	}
403 
404 	return 0;
405 }
406 
407 int32_t
408 ocs_device_attach(ocs_t *ocs)
409 {
410         int32_t i;
411 	ocs_io_t *io = NULL;
412 
413         if (ocs->attached) {
414                 ocs_log_warn(ocs, "%s: Device is already attached\n", __func__);
415                 return -1;
416         }
417 
418 	/* Allocate transport object and bring online */
419 	ocs->xport = ocs_xport_alloc(ocs);
420 	if (ocs->xport == NULL) {
421 		device_printf(ocs->dev, "failed to allocate transport object\n");
422 		return ENOMEM;
423 	} else if (ocs_xport_attach(ocs->xport) != 0) {
424 		device_printf(ocs->dev, "%s: failed to attach transport object\n", __func__);
425 		goto fail_xport_attach;
426 	} else if (ocs_xport_initialize(ocs->xport) != 0) {
427 		device_printf(ocs->dev, "%s: failed to initialize transport object\n", __func__);
428 		goto fail_xport_init;
429 	}
430 
431 	if (ocs_init_dma_tag(ocs)) {
432 		goto fail_intr_setup;
433 	}
434 
435 	for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) {
436 		if (bus_dmamap_create(ocs->buf_dmat, 0, &io->tgt_io.dmap)) {
437 			device_printf(ocs->dev, "%s: bad dma map create\n", __func__);
438 		}
439 
440 		io->tgt_io.state = OCS_CAM_IO_FREE;
441 	}
442 
443 	if (ocs_setup_fcports(ocs)) {
444 		device_printf(ocs->dev, "FCports creation failed\n");
445 		goto fail_intr_setup;
446 	}
447 
448 	if (ocs_cam_attach(ocs)) {
449 		device_printf(ocs->dev, "cam attach failed \n");
450 		goto fail_intr_setup;
451 	}
452 
453 	if (ocs_intr_setup(ocs)) {
454 		device_printf(ocs->dev, "Interrupt setup failed\n");
455 		goto fail_intr_setup;
456 	}
457 
458 	if (ocs->enable_ini || ocs->enable_tgt) {
459 		if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE)) {
460 			device_printf(ocs->dev, "Can't init port\n");
461 			goto fail_xport_online;
462 		}
463 	}
464 
465 	ocs->attached = true;
466 
467 	return 0;
468 
469 fail_xport_online:
470 	if (ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN)) {
471 		device_printf(ocs->dev, "Transport Shutdown timed out\n");
472 	}
473 	ocs_intr_teardown(ocs);
474 fail_intr_setup:
475 fail_xport_init:
476 	ocs_xport_detach(ocs->xport);
477 	if (ocs->config_tgt)
478 		ocs_scsi_tgt_del_device(ocs);
479 
480 	ocs_xport_free(ocs->xport);
481 	ocs->xport = NULL;
482 fail_xport_attach:
483 	if (ocs->xport)
484 		ocs_free(ocs, ocs->xport, sizeof(*(ocs->xport)));
485 	ocs->xport = NULL;
486 	return ENXIO;
487 }
488 
489 /**
490  * @brief Connect the driver to the given device
491  *
492  * If the probe routine is successful, the OS will give the driver
493  * the opportunity to connect itself to the device. This routine
494  * maps PCI resources (memory BARs and interrupts) and initialize a
495  * hardware object.
496  *
497  * @param dev device abstraction
498  *
499  * @return 0 if the driver attaches to the device, ENXIO otherwise
500  */
501 
502 static int
503 ocs_pci_attach(device_t dev)
504 {
505 	struct ocs_softc	*ocs;
506 	int			instance;
507 
508 	instance = device_get_unit(dev);
509 
510 	ocs = (struct ocs_softc *)device_get_softc(dev);
511 	if (NULL == ocs) {
512 		device_printf(dev, "cannot allocate softc\n");
513 		return ENOMEM;
514 	}
515 	memset(ocs, 0, sizeof(struct ocs_softc));
516 
517 	if (instance < ARRAY_SIZE(ocs_devices)) {
518 		ocs_devices[instance] = ocs;
519 	} else {
520 		device_printf(dev, "got unexpected ocs instance number %d\n", instance);
521 	}
522 
523 	ocs->instance_index = instance;
524 
525 	ocs->dev = dev;
526 
527 	pci_enable_io(dev, SYS_RES_MEMORY);
528 	pci_enable_busmaster(dev);
529 
530 	ocs->pci_vendor = pci_get_vendor(dev);
531 	ocs->pci_device = pci_get_device(dev);
532 	ocs->pci_subsystem_vendor = pci_get_subvendor(dev);
533 	ocs->pci_subsystem_device = pci_get_subdevice(dev);
534 
535 	snprintf(ocs->businfo, sizeof(ocs->businfo), "%02X:%02X:%02X",
536 		pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
537 
538 	/* Map all memory BARs */
539 	if (ocs->pci_device == PCI_PRODUCT_EMULEX_LANCER_G7) {
540 		if(ocs_map_g7_bars(dev,ocs)) {
541 			device_printf(dev, "Failed to map pci bars\n");
542 			goto release_bus;
543 		}
544 	} else {
545 		if (ocs_map_bars(dev, ocs)) {
546 			device_printf(dev, "Failed to map pci bars\n");
547 			goto release_bus;
548 		}
549 	}
550 
551 	/* create a root DMA tag for the device */
552 	if (bus_dma_tag_create(bus_get_dma_tag(dev),
553 				1,		/* byte alignment */
554 				0,		/* no boundary restrictions */
555 				BUS_SPACE_MAXADDR, /* no minimum low address */
556 				BUS_SPACE_MAXADDR, /* no maximum high address */
557 				NULL,		/* no filter function */
558 				NULL,		/* or arguments */
559 				BUS_SPACE_MAXSIZE, /* max size covered by tag */
560 				BUS_SPACE_UNRESTRICTED, /* no segment count restrictions */
561 				BUS_SPACE_MAXSIZE, /* no segment length restrictions */
562 				0,		/* flags */
563 				NULL,		/* no lock manipulation function */
564 				NULL,		/* or arguments */
565 				&ocs->dmat)) {
566 		device_printf(dev, "parent DMA tag allocation failed\n");
567 		goto release_bus;
568 	}
569 
570 	if (ocs_intr_alloc(ocs)) {
571 		device_printf(dev, "Interrupt allocation failed\n");
572 		goto release_bus;
573 	}
574 
575 	if (PCIC_SERIALBUS == pci_get_class(dev) &&
576 			PCIS_SERIALBUS_FC == pci_get_subclass(dev))
577 		ocs->ocs_xport = OCS_XPORT_FC;
578 	else {
579 		device_printf(dev, "unsupported class (%#x : %#x)\n",
580 				pci_get_class(dev),
581 				pci_get_class(dev));
582 		goto release_bus;
583 	}
584 
585 	/* Setup tunable parameters */
586 	if (ocs_setup_params(ocs)) {
587 		device_printf(ocs->dev, "failed to setup params\n");
588 		goto release_bus;
589 	}
590 
591 	if (ocs_device_attach(ocs)) {
592 		device_printf(ocs->dev, "failed to attach device\n");
593 		goto release_params;
594 	}
595 
596 	ocs->fc_type = FC_TYPE_FCP;
597 
598 	ocs_debug_attach(ocs);
599 
600 	return 0;
601 
602 release_params:
603 	ocs_ramlog_free(ocs, ocs->ramlog);
604 	ocs_device_lock_free(ocs);
605 	free(ocs->hw_war_version, M_OCS);
606 release_bus:
607 	ocs_release_bus(ocs);
608 	return ENXIO;
609 }
610 
611 /**
612  * @brief free resources when pci device detach
613  *
614  * @param ocs pointer to ocs structure
615  *
616  * @return 0 for success, a negative error code value for failure.
617  */
618 
619 int32_t
620 ocs_device_detach(ocs_t *ocs)
621 {
622         int32_t rc = 0, i;
623 	ocs_io_t *io = NULL;
624 
625         if (ocs != NULL) {
626                 if (!ocs->attached) {
627                         ocs_log_warn(ocs, "%s: Device is not attached\n", __func__);
628                         return -1;
629                 }
630 
631                 ocs->attached = FALSE;
632 
633                 rc = ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN);
634                 if (rc) {
635                         ocs_log_err(ocs, "%s: Transport Shutdown timed out\n", __func__);
636                 }
637 
638 		ocs_intr_teardown(ocs);
639 
640                 if (ocs_xport_detach(ocs->xport) != 0) {
641                         ocs_log_err(ocs, "%s: Transport detach failed\n", __func__);
642                 }
643 
644 		ocs_cam_detach(ocs);
645 		ocs_free(ocs, ocs->fcports, sizeof(*(ocs->fcports)));
646 
647 		for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) {
648 			if (bus_dmamap_destroy(ocs->buf_dmat, io->tgt_io.dmap)) {
649 				device_printf(ocs->dev, "%s: bad dma map destroy\n", __func__);
650 			}
651 		}
652 		bus_dma_tag_destroy(ocs->dmat);
653                 ocs_xport_free(ocs->xport);
654                 ocs->xport = NULL;
655         }
656 
657         return 0;
658 }
659 
660 /**
661  * @brief Detach the driver from the given device
662  *
663  * If the driver is a loadable module, this routine gets called at unload
664  * time. This routine will stop the device and free any allocated resources.
665  *
666  * @param dev device abstraction
667  *
668  * @return 0 if the driver detaches from the device, ENXIO otherwise
669  */
670 static int
671 ocs_pci_detach(device_t dev)
672 {
673 	struct ocs_softc	*ocs;
674 
675 	ocs = (struct ocs_softc *)device_get_softc(dev);
676 	if (!ocs) {
677 		device_printf(dev, "no driver context?!?\n");
678 		return -1;
679 	}
680 
681 	if (ocs->config_tgt && ocs->enable_tgt) {
682 		device_printf(dev, "can't detach with target mode enabled\n");
683 		return EBUSY;
684 	}
685 
686 	ocs_device_detach(ocs);
687 
688 	/*
689 	 * Workaround for OCS SCSI Transport quirk.
690 	 *
691 	 * CTL requires that target mode is disabled prior to unloading the
692 	 * driver (ie ocs->enable_tgt = FALSE), but once the target is disabled,
693 	 * the transport will not call ocs_scsi_tgt_del_device() which deallocates
694 	 * CAM resources. The workaround is to explicitly make the call here.
695 	 */
696 	if (ocs->config_tgt)
697 		ocs_scsi_tgt_del_device(ocs);
698 
699 	/* free strdup created buffer.*/
700 	free(ocs->hw_war_version, M_OCS);
701 
702 	ocs_device_lock_free(ocs);
703 
704 	ocs_debug_detach(ocs);
705 
706 	ocs_ramlog_free(ocs, ocs->ramlog);
707 
708 	ocs_release_bus(ocs);
709 
710 	return 0;
711 }
712 
713 /**
714  * @brief Notify driver of system shutdown
715  *
716  * @param dev device abstraction
717  *
718  * @return 0 if the driver attaches to the device, ENXIO otherwise
719  */
720 static int
721 ocs_pci_shutdown(device_t dev)
722 {
723 	device_printf(dev, "%s\n", __func__);
724 	return 0;
725 }
726 
727 /**
728  * @brief Release bus resources allocated within the soft context
729  *
730  * @param ocs Pointer to the driver's context
731  *
732  * @return none
733  */
734 static void
735 ocs_release_bus(struct ocs_softc *ocs)
736 {
737 
738 	if (NULL != ocs) {
739 		uint32_t	i;
740 
741 		ocs_intr_teardown(ocs);
742 
743 		if (ocs->irq) {
744 			bus_release_resource(ocs->dev, SYS_RES_IRQ,
745 					rman_get_rid(ocs->irq), ocs->irq);
746 
747 			if (ocs->n_vec) {
748 				pci_release_msi(ocs->dev);
749 				ocs->n_vec = 0;
750 			}
751 
752 			ocs->irq = NULL;
753 		}
754 
755 		bus_dma_tag_destroy(ocs->dmat);
756 
757 		for (i = 0; i < PCI_MAX_BAR; i++) {
758 			if (ocs->reg[i].res) {
759 				bus_release_resource(ocs->dev, SYS_RES_MEMORY,
760 						ocs->reg[i].rid,
761 						ocs->reg[i].res);
762 			}
763 		}
764 	}
765 }
766 
767 /**
768  * @brief Allocate and initialize interrupts
769  *
770  * @param ocs Pointer to the driver's context
771  *
772  * @return none
773  */
774 static int32_t
775 ocs_intr_alloc(struct ocs_softc *ocs)
776 {
777 
778 	ocs->n_vec = 1;
779 	if (pci_alloc_msix(ocs->dev, &ocs->n_vec)) {
780 		device_printf(ocs->dev, "MSI-X allocation failed\n");
781 		if (pci_alloc_msi(ocs->dev, &ocs->n_vec)) {
782 			device_printf(ocs->dev, "MSI allocation failed \n");
783 			ocs->irqid = 0;
784 			ocs->n_vec = 0;
785 		} else
786 			ocs->irqid = 1;
787 	} else {
788 		ocs->irqid = 1;
789 	}
790 
791 	ocs->irq = bus_alloc_resource_any(ocs->dev, SYS_RES_IRQ, &ocs->irqid,
792 			RF_ACTIVE | RF_SHAREABLE);
793 	if (NULL == ocs->irq) {
794 		device_printf(ocs->dev, "could not allocate interrupt\n");
795 		return -1;
796 	}
797 
798 	ocs->intr_ctx.vec = 0;
799 	ocs->intr_ctx.softc = ocs;
800 	snprintf(ocs->intr_ctx.name, sizeof(ocs->intr_ctx.name),
801 			"%s_intr_%d",
802 			device_get_nameunit(ocs->dev),
803 			ocs->intr_ctx.vec);
804 
805 	return 0;
806 }
807 
808 /**
809  * @brief Create and attach an interrupt handler
810  *
811  * @param ocs Pointer to the driver's context
812  *
813  * @return 0 on success, non-zero otherwise
814  */
815 static int32_t
816 ocs_intr_setup(struct ocs_softc *ocs)
817 {
818 	driver_filter_t	*filter = NULL;
819 
820 	if (0 == ocs->n_vec) {
821 		filter = ocs_pci_intx_filter;
822 	}
823 
824 	if (bus_setup_intr(ocs->dev, ocs->irq, INTR_MPSAFE | INTR_TYPE_CAM,
825 				filter, ocs_pci_intr, &ocs->intr_ctx,
826 				&ocs->tag)) {
827 		device_printf(ocs->dev, "could not initialize interrupt\n");
828 		return -1;
829 	}
830 
831 	return 0;
832 }
833 
834 /**
835  * @brief Detach an interrupt handler
836  *
837  * @param ocs Pointer to the driver's context
838  *
839  * @return 0 on success, non-zero otherwise
840  */
841 static int32_t
842 ocs_intr_teardown(struct ocs_softc *ocs)
843 {
844 
845 	if (!ocs) {
846 		printf("%s: bad driver context?!?\n", __func__);
847 		return -1;
848 	}
849 
850 	if (ocs->tag) {
851 		bus_teardown_intr(ocs->dev, ocs->irq, ocs->tag);
852 		ocs->tag = NULL;
853 	}
854 
855 	return 0;
856 }
857 
858 /**
859  * @brief PCI interrupt handler
860  *
861  * @param arg pointer to the driver's software context
862  *
863  * @return FILTER_HANDLED if interrupt is processed, FILTER_STRAY otherwise
864  */
865 static int
866 ocs_pci_intx_filter(void *arg)
867 {
868 	ocs_intr_ctx_t	*intr = arg;
869 	struct ocs_softc *ocs = NULL;
870 	uint16_t	val = 0;
871 
872 	if (NULL == intr) {
873 		return FILTER_STRAY;
874 	}
875 
876 	ocs = intr->softc;
877 #ifndef PCIM_STATUS_INTR
878 #define PCIM_STATUS_INTR	0x0008
879 #endif
880 	val = pci_read_config(ocs->dev, PCIR_STATUS, 2);
881 	if (0xffff == val) {
882 		device_printf(ocs->dev, "%s: pci_read_config(PCIR_STATUS) failed\n", __func__);
883 		return FILTER_STRAY;
884 	}
885 	if (0 == (val & PCIM_STATUS_INTR)) {
886 		return FILTER_STRAY;
887 	}
888 
889 	val = pci_read_config(ocs->dev, PCIR_COMMAND, 2);
890 	val |= PCIM_CMD_INTxDIS;
891 	pci_write_config(ocs->dev, PCIR_COMMAND, val, 2);
892 
893 	return FILTER_SCHEDULE_THREAD;
894 }
895 
896 /**
897  * @brief interrupt handler
898  *
899  * @param context pointer to the interrupt context
900  */
901 static void
902 ocs_pci_intr(void *context)
903 {
904 	ocs_intr_ctx_t	*intr = context;
905 	struct ocs_softc *ocs = intr->softc;
906 
907 	mtx_lock(&ocs->sim_lock);
908 		ocs_hw_process(&ocs->hw, intr->vec, OCS_OS_MAX_ISR_TIME_MSEC);
909 	mtx_unlock(&ocs->sim_lock);
910 }
911 
912 /**
913  * @brief Initialize DMA tag
914  *
915  * @param ocs the driver instance's software context
916  *
917  * @return 0 on success, non-zero otherwise
918  */
919 static int32_t
920 ocs_init_dma_tag(struct ocs_softc *ocs)
921 {
922 	uint32_t	max_sgl = 0;
923 	uint32_t	max_sge = 0;
924 
925 	/*
926 	 * IOs can't use the parent DMA tag and must create their
927 	 * own, based primarily on a restricted number of DMA segments.
928 	 * This is more of a BSD requirement than a SLI Port requirement
929 	 */
930 	ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &max_sgl);
931 	ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge);
932 
933 	if (bus_dma_tag_create(ocs->dmat,
934 				1,		/* byte alignment */
935 				0,		/* no boundary restrictions */
936 				BUS_SPACE_MAXADDR, /* no minimum low address */
937 				BUS_SPACE_MAXADDR, /* no maximum high address */
938 				NULL,		/* no filter function */
939 				NULL,		/* or arguments */
940 				BUS_SPACE_MAXSIZE, /* max size covered by tag */
941 				max_sgl, 	/* segment count restrictions */
942 				max_sge,	/* segment length restrictions */
943 				0,		/* flags */
944 				NULL,		/* no lock manipulation function */
945 				NULL,		/* or arguments */
946 				&ocs->buf_dmat)) {
947 		device_printf(ocs->dev, "%s: bad bus_dma_tag_create(buf_dmat)\n", __func__);
948 		return -1;
949 	}
950 	return 0;
951 }
952 
953 int32_t
954 ocs_get_property(const char *prop_name, char *buffer, uint32_t buffer_len)
955 {
956 	return -1;
957 }
958 
959 /**
960  * @brief return pointer to ocs structure given instance index
961  *
962  * A pointer to an ocs structure is returned given an instance index.
963  *
964  * @param index index to ocs_devices array
965  *
966  * @return ocs pointer
967  */
968 
969 ocs_t *ocs_get_instance(uint32_t index)
970 {
971 	if (index < ARRAY_SIZE(ocs_devices)) {
972 		return ocs_devices[index];
973 	}
974 	return NULL;
975 }
976 
977 /**
978  * @brief Return instance index of an opaque ocs structure
979  *
980  * Returns the ocs instance index
981  *
982  * @param os pointer to ocs instance
983  *
984  * @return pointer to ocs instance index
985  */
986 uint32_t
987 ocs_instance(void *os)
988 {
989 	ocs_t *ocs = os;
990 	return ocs->instance_index;
991 }
992 
993 static device_method_t ocs_methods[] = {
994 	DEVMETHOD(device_probe,		ocs_pci_probe),
995 	DEVMETHOD(device_attach,	ocs_pci_attach),
996 	DEVMETHOD(device_detach,	ocs_pci_detach),
997 	DEVMETHOD(device_shutdown,	ocs_pci_shutdown),
998 	{0, 0}
999 };
1000 
1001 static driver_t ocs_driver = {
1002 	"ocs_fc",
1003 	ocs_methods,
1004 	sizeof(struct ocs_softc)
1005 };
1006 
1007 DRIVER_MODULE(ocs_fc, pci, ocs_driver, 0, 0);
1008 MODULE_VERSION(ocs_fc, 1);
1009