xref: /dragonfly/sys/dev/virtual/vmware/pvscsi/pvscsi.c (revision b866b1da)
1 /*-
2  * Copyright (c) 2018 VMware, Inc.
3  *
4  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5  */
6 
7 #include "opt_pvscsi.h"
8 
9 #include <sys/param.h>
10 #include <sys/bus.h>
11 #include <sys/errno.h>
12 #include <sys/kernel.h>
13 #include <sys/malloc.h>
14 #include <sys/module.h>
15 #include <sys/queue.h>
16 #include <sys/rman.h>
17 #include <sys/sysctl.h>
18 #include <sys/systm.h>
19 
20 #include <bus/pci/pcireg.h>
21 #include <bus/pci/pcivar.h>
22 
23 #include <bus/cam/cam.h>
24 #include <bus/cam/cam_ccb.h>
25 #include <bus/cam/cam_debug.h>
26 #include <bus/cam/cam_periph.h>
27 #include <bus/cam/cam_sim.h>
28 #include <bus/cam/cam_xpt_periph.h>
29 #include <bus/cam/cam_xpt_sim.h>
30 #include <bus/cam/scsi/scsi_message.h>
31 
32 #include "pvscsi.h"
33 
34 #define	PVSCSI_DEFAULT_NUM_PAGES_REQ_RING	8
35 #define	PVSCSI_SENSE_LENGTH			256
36 
37 MALLOC_DECLARE(M_PVSCSI);
38 MALLOC_DEFINE(M_PVSCSI, "pvscsi", "PVSCSI memory");
39 
40 #ifdef PVSCSI_DEBUG_LOGGING
41 #define	DEBUG_PRINTF(level, dev, fmt, ...)				\
42 	do {								\
43 		if (pvscsi_log_level >= (level)) {			\
44 			device_printf((dev), (fmt), ##__VA_ARGS__);	\
45 		}							\
46 	} while(0)
47 #else
48 #define DEBUG_PRINTF(level, dev, fmt, ...)
49 #endif /* PVSCSI_DEBUG_LOGGING */
50 
51 #define	ccb_pvscsi_hcb	spriv_ptr0
52 #define	ccb_pvscsi_sc	spriv_ptr1
53 
54 struct pvscsi_softc;
55 struct pvscsi_hcb;
56 struct pvscsi_dma;
57 
58 static inline uint32_t pvscsi_reg_read(struct pvscsi_softc *sc,
59     uint32_t offset);
60 static inline void pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset,
61     uint32_t val);
62 static inline uint32_t pvscsi_read_intr_status(struct pvscsi_softc *sc);
63 static inline void pvscsi_write_intr_status(struct pvscsi_softc *sc,
64     uint32_t val);
65 static inline void pvscsi_intr_enable(struct pvscsi_softc *sc);
66 static inline void pvscsi_intr_disable(struct pvscsi_softc *sc);
67 static void pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0);
68 static void pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
69     uint32_t len);
70 static uint32_t pvscsi_get_max_targets(struct pvscsi_softc *sc);
71 static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable);
72 static void pvscsi_setup_rings(struct pvscsi_softc *sc);
73 static void pvscsi_setup_msg_ring(struct pvscsi_softc *sc);
74 static int pvscsi_hw_supports_msg(struct pvscsi_softc *sc);
75 
76 static void pvscsi_timeout(void *arg);
77 static void pvscsi_freeze(struct pvscsi_softc *sc);
78 static void pvscsi_adapter_reset(struct pvscsi_softc *sc);
79 static void pvscsi_bus_reset(struct pvscsi_softc *sc);
80 static void pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target);
81 static void pvscsi_abort(struct pvscsi_softc *sc, uint32_t target,
82     union ccb *ccb);
83 
84 static void pvscsi_process_completion(struct pvscsi_softc *sc,
85     struct pvscsi_ring_cmp_desc *e);
86 static void pvscsi_process_cmp_ring(struct pvscsi_softc *sc);
87 static void pvscsi_process_msg(struct pvscsi_softc *sc,
88     struct pvscsi_ring_msg_desc *e);
89 static void pvscsi_process_msg_ring(struct pvscsi_softc *sc);
90 static void pvscsi_rescan_callback(struct cam_periph *periph, union ccb *ccb);
91 
92 static void pvscsi_intr_locked(struct pvscsi_softc *sc);
93 static void pvscsi_intr(void *xsc);
94 static void pvscsi_poll(struct cam_sim *sim);
95 
96 static void pvscsi_execute_ccb(void *arg, bus_dma_segment_t *segs, int nseg,
97     int error);
98 static void pvscsi_action(struct cam_sim *sim, union ccb *ccb);
99 
100 static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
101     struct pvscsi_hcb *hcb);
102 static inline struct pvscsi_hcb* pvscsi_context_to_hcb(struct pvscsi_softc *sc,
103     uint64_t context);
104 static struct pvscsi_hcb * pvscsi_hcb_get(struct pvscsi_softc *sc);
105 static void pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb);
106 
107 static void pvscsi_dma_cb(void *arg, bus_dma_segment_t *segs, int nseg,
108     int error);
109 static void pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma);
110 static int pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
111     bus_size_t size, bus_size_t alignment);
112 static int pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc,
113     struct pvscsi_dma *dma, uint64_t *ppn_list, uint32_t num_pages);
114 static void pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc,
115     uint32_t hcbs_allocated);
116 static int pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc);
117 static void pvscsi_free_rings(struct pvscsi_softc *sc);
118 static int pvscsi_allocate_rings(struct pvscsi_softc *sc);
119 static void pvscsi_free_interrupts(struct pvscsi_softc *sc);
120 static int pvscsi_setup_interrupts(struct pvscsi_softc *sc);
121 static void pvscsi_free_all(struct pvscsi_softc *sc);
122 
123 static int pvscsi_attach(device_t dev);
124 static int pvscsi_detach(device_t dev);
125 static int pvscsi_probe(device_t dev);
126 static int pvscsi_shutdown(device_t dev);
127 static int pvscsi_get_tunable(struct pvscsi_softc *sc, char *name, int value);
128 
129 #ifdef PVSCSI_DEBUG_LOGGING
130 static int pvscsi_log_level = 0;
131 static SYSCTL_NODE(_hw, OID_AUTO, pvscsi, CTLFLAG_RD, 0,
132     "PVSCSI driver parameters");
133 SYSCTL_INT(_hw_pvscsi, OID_AUTO, log_level, CTLFLAG_RW, &pvscsi_log_level,
134     0, "PVSCSI debug log level");
135 TUNABLE_INT("hw.pvscsi.log_level", &pvscsi_log_level);
136 #endif
137 
138 static int pvscsi_request_ring_pages = 0;
139 TUNABLE_INT("hw.pvscsi.request_ring_pages", &pvscsi_request_ring_pages);
140 
141 static int pvscsi_use_msg = 1;
142 TUNABLE_INT("hw.pvscsi.use_msg", &pvscsi_use_msg);
143 
144 static int pvscsi_use_msi = 1;
145 TUNABLE_INT("hw.pvscsi.use_msi", &pvscsi_use_msi);
146 
147 #if 0 /* XXX swildner: MSI-X support */
148 static int pvscsi_use_msix = 1;
149 TUNABLE_INT("hw.pvscsi.use_msix", &pvscsi_use_msix);
150 #endif
151 
152 static int pvscsi_use_req_call_threshold = 1;
153 TUNABLE_INT("hw.pvscsi.use_req_call_threshold", &pvscsi_use_req_call_threshold);
154 
155 static int pvscsi_max_queue_depth = 0;
156 TUNABLE_INT("hw.pvscsi.max_queue_depth", &pvscsi_max_queue_depth);
157 
158 struct pvscsi_sg_list {
159 	struct pvscsi_sg_element sge[PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT];
160 };
161 
162 #define	PVSCSI_ABORT_TIMEOUT	2
163 #define	PVSCSI_RESET_TIMEOUT	10
164 
165 #define	PVSCSI_HCB_NONE		0
166 #define	PVSCSI_HCB_ABORT	1
167 #define	PVSCSI_HCB_DEVICE_RESET	2
168 #define	PVSCSI_HCB_BUS_RESET	3
169 
170 struct pvscsi_hcb {
171 	union ccb			*ccb;
172 	struct pvscsi_ring_req_desc	*e;
173 	int				 recovery;
174 	SLIST_ENTRY(pvscsi_hcb)		 links;
175 
176 	struct callout			 callout;
177 	bus_dmamap_t			 dma_map;
178 	void				*sense_buffer;
179 	bus_addr_t			 sense_buffer_paddr;
180 	struct pvscsi_sg_list		*sg_list;
181 	bus_addr_t			 sg_list_paddr;
182 };
183 
184 struct pvscsi_dma
185 {
186 	bus_dma_tag_t	 tag;
187 	bus_dmamap_t	 map;
188 	void		*vaddr;
189 	bus_addr_t	 paddr;
190 	bus_size_t	 size;
191 };
192 
193 struct pvscsi_softc {
194 	device_t		 dev;
195 	struct lock		 lock;
196 	struct cam_sim		*sim;
197 	struct cam_path		*bus_path;
198 	int			 frozen;
199 	struct pvscsi_rings_state	*rings_state;
200 	struct pvscsi_ring_req_desc	*req_ring;
201 	struct pvscsi_ring_cmp_desc	*cmp_ring;
202 	struct pvscsi_ring_msg_desc	*msg_ring;
203 	uint32_t		 hcb_cnt;
204 	struct pvscsi_hcb	*hcbs;
205 	SLIST_HEAD(, pvscsi_hcb)	free_list;
206 	bus_dma_tag_t		parent_dmat;
207 	bus_dma_tag_t		buffer_dmat;
208 
209 	bool		 use_msg;
210 	uint32_t	 max_targets;
211 	int		 mm_rid;
212 	struct resource	*mm_res;
213 	int		 irq_id;
214 	int		 irq_type;
215 	struct resource	*irq_res;
216 	void		*irq_handler;
217 	int		 use_req_call_threshold;
218 	int		 use_msi_or_msix;
219 
220 	uint64_t	rings_state_ppn;
221 	uint32_t	req_ring_num_pages;
222 	uint64_t	req_ring_ppn[PVSCSI_MAX_NUM_PAGES_REQ_RING];
223 	uint32_t	cmp_ring_num_pages;
224 	uint64_t	cmp_ring_ppn[PVSCSI_MAX_NUM_PAGES_CMP_RING];
225 	uint32_t	msg_ring_num_pages;
226 	uint64_t	msg_ring_ppn[PVSCSI_MAX_NUM_PAGES_MSG_RING];
227 
228 	struct	pvscsi_dma rings_state_dma;
229 	struct	pvscsi_dma req_ring_dma;
230 	struct	pvscsi_dma cmp_ring_dma;
231 	struct	pvscsi_dma msg_ring_dma;
232 
233 	struct	pvscsi_dma sg_list_dma;
234 	struct	pvscsi_dma sense_buffer_dma;
235 };
236 
237 static int pvscsi_get_tunable(struct pvscsi_softc *sc, char *name, int value)
238 {
239 	char cfg[64];
240 
241 	ksnprintf(cfg, sizeof(cfg), "hw.pvscsi.%d.%s", device_get_unit(sc->dev),
242 	    name);
243 	TUNABLE_INT_FETCH(cfg, &value);
244 
245 	return (value);
246 }
247 
248 static void
249 pvscsi_freeze(struct pvscsi_softc *sc)
250 {
251 
252 	if (!sc->frozen) {
253 		xpt_freeze_simq(sc->sim, 1);
254 		sc->frozen = 1;
255 	}
256 }
257 
258 static inline uint32_t
259 pvscsi_reg_read(struct pvscsi_softc *sc, uint32_t offset)
260 {
261 
262 	return (bus_read_4(sc->mm_res, offset));
263 }
264 
265 static inline void
266 pvscsi_reg_write(struct pvscsi_softc *sc, uint32_t offset, uint32_t val)
267 {
268 
269 	bus_write_4(sc->mm_res, offset, val);
270 }
271 
272 static inline uint32_t
273 pvscsi_read_intr_status(struct pvscsi_softc *sc)
274 {
275 
276 	return (pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_INTR_STATUS));
277 }
278 
279 static inline void
280 pvscsi_write_intr_status(struct pvscsi_softc *sc, uint32_t val)
281 {
282 
283 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_STATUS, val);
284 }
285 
286 static inline void
287 pvscsi_intr_enable(struct pvscsi_softc *sc)
288 {
289 	uint32_t mask;
290 
291 	mask = PVSCSI_INTR_CMPL_MASK;
292 	if (sc->use_msg) {
293 		mask |= PVSCSI_INTR_MSG_MASK;
294 	}
295 
296 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, mask);
297 }
298 
299 static inline void
300 pvscsi_intr_disable(struct pvscsi_softc *sc)
301 {
302 
303 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_INTR_MASK, 0);
304 }
305 
306 static void
307 pvscsi_kick_io(struct pvscsi_softc *sc, uint8_t cdb0)
308 {
309 	struct pvscsi_rings_state *s;
310 
311 	if (cdb0 == READ_6  || cdb0 == READ_10  ||
312 	    cdb0 == READ_12  || cdb0 == READ_16 ||
313 	    cdb0 == WRITE_6 || cdb0 == WRITE_10 ||
314 	    cdb0 == WRITE_12 || cdb0 == WRITE_16) {
315 		s = sc->rings_state;
316 
317 		if (!sc->use_req_call_threshold ||
318 		    (s->req_prod_idx - s->req_cons_idx) >=
319 		     s->req_call_threshold) {
320 			pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
321 		}
322 	} else {
323 		pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
324 	}
325 }
326 
327 static void
328 pvscsi_write_cmd(struct pvscsi_softc *sc, uint32_t cmd, void *data,
329 		 uint32_t len)
330 {
331 	uint32_t *data_ptr;
332 	int i;
333 
334 	KASSERT(len % sizeof(uint32_t) == 0,
335 		("command size not a multiple of 4"));
336 
337 	data_ptr = data;
338 	len /= sizeof(uint32_t);
339 
340 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND, cmd);
341 	for (i = 0; i < len; ++i) {
342 		pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND_DATA,
343 		   data_ptr[i]);
344 	}
345 }
346 
347 static inline uint64_t pvscsi_hcb_to_context(struct pvscsi_softc *sc,
348     struct pvscsi_hcb *hcb)
349 {
350 
351 	/* Offset by 1 because context must not be 0 */
352 	return (hcb - sc->hcbs + 1);
353 }
354 
355 static inline struct pvscsi_hcb* pvscsi_context_to_hcb(struct pvscsi_softc *sc,
356     uint64_t context)
357 {
358 
359 	return (sc->hcbs + (context - 1));
360 }
361 
362 static struct pvscsi_hcb *
363 pvscsi_hcb_get(struct pvscsi_softc *sc)
364 {
365 	struct pvscsi_hcb *hcb;
366 
367 	KKASSERT(lockowned(&sc->lock));
368 
369 	hcb = SLIST_FIRST(&sc->free_list);
370 	if (hcb) {
371 		SLIST_REMOVE_HEAD(&sc->free_list, links);
372 	}
373 
374 	return (hcb);
375 }
376 
377 static void
378 pvscsi_hcb_put(struct pvscsi_softc *sc, struct pvscsi_hcb *hcb)
379 {
380 
381 	KKASSERT(lockowned(&sc->lock));
382 	hcb->ccb = NULL;
383 	hcb->e = NULL;
384 	hcb->recovery = PVSCSI_HCB_NONE;
385 	SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
386 }
387 
388 static uint32_t
389 pvscsi_get_max_targets(struct pvscsi_softc *sc)
390 {
391 	uint32_t max_targets;
392 
393 	pvscsi_write_cmd(sc, PVSCSI_CMD_GET_MAX_TARGETS, NULL, 0);
394 
395 	max_targets = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
396 
397 	if (max_targets == ~0) {
398 		max_targets = 16;
399 	}
400 
401 	return (max_targets);
402 }
403 
404 static int pvscsi_setup_req_call(struct pvscsi_softc *sc, uint32_t enable)
405 {
406 	uint32_t status;
407 	struct pvscsi_cmd_desc_setup_req_call cmd;
408 
409 	if (!pvscsi_get_tunable(sc, "pvscsi_use_req_call_threshold",
410 	    pvscsi_use_req_call_threshold)) {
411 		return (0);
412 	}
413 
414 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
415 	    PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
416 	status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
417 
418 	if (status != -1) {
419 		bzero(&cmd, sizeof(cmd));
420 		cmd.enable = enable;
421 		pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
422 		    &cmd, sizeof(cmd));
423 		status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
424 
425 		return (status != 0);
426 	} else {
427 		return (0);
428 	}
429 }
430 
431 static void
432 pvscsi_dma_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
433 {
434 	bus_addr_t *dest;
435 
436 	KASSERT(nseg == 1, ("more than one segment"));
437 
438 	dest = arg;
439 
440 	if (!error) {
441 		*dest = segs->ds_addr;
442 	}
443 }
444 
445 static void
446 pvscsi_dma_free(struct pvscsi_softc *sc, struct pvscsi_dma *dma)
447 {
448 
449 	if (dma->tag != NULL) {
450 		if (dma->paddr != 0) {
451 			bus_dmamap_unload(dma->tag, dma->map);
452 		}
453 
454 		if (dma->vaddr != NULL) {
455 			bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
456 		}
457 
458 		bus_dma_tag_destroy(dma->tag);
459 	}
460 
461 	bzero(dma, sizeof(*dma));
462 }
463 
464 static int
465 pvscsi_dma_alloc(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
466     bus_size_t size, bus_size_t alignment)
467 {
468 	int error;
469 
470 	bzero(dma, sizeof(*dma));
471 
472 	error = bus_dma_tag_create(sc->parent_dmat, alignment, 0,
473 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size,
474 	    BUS_DMA_ALLOCNOW, &dma->tag);
475 	if (error) {
476 		device_printf(sc->dev, "error creating dma tag, error %d\n",
477 		    error);
478 		goto fail;
479 	}
480 
481 	error = bus_dmamem_alloc(dma->tag, &dma->vaddr,
482 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dma->map);
483 	if (error) {
484 		device_printf(sc->dev, "error allocating dma mem, error %d\n",
485 		    error);
486 		goto fail;
487 	}
488 
489 	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
490 	    pvscsi_dma_cb, &dma->paddr, BUS_DMA_NOWAIT);
491 	if (error) {
492 		device_printf(sc->dev, "error mapping dma mem, error %d\n",
493 		    error);
494 		goto fail;
495 	}
496 
497 	dma->size = size;
498 
499 fail:
500 	if (error) {
501 		pvscsi_dma_free(sc, dma);
502 	}
503 	return (error);
504 }
505 
506 static int
507 pvscsi_dma_alloc_ppns(struct pvscsi_softc *sc, struct pvscsi_dma *dma,
508     uint64_t *ppn_list, uint32_t num_pages)
509 {
510 	int error;
511 	uint32_t i;
512 	uint64_t ppn;
513 
514 	error = pvscsi_dma_alloc(sc, dma, num_pages * PAGE_SIZE, PAGE_SIZE);
515 	if (error) {
516 		device_printf(sc->dev, "Error allocating pages, error %d\n",
517 		    error);
518 		return (error);
519 	}
520 
521 	ppn = dma->paddr >> PAGE_SHIFT;
522 	for (i = 0; i < num_pages; i++) {
523 		ppn_list[i] = ppn + i;
524 	}
525 
526 	return (0);
527 }
528 
529 static void
530 pvscsi_dma_free_per_hcb(struct pvscsi_softc *sc, uint32_t hcbs_allocated)
531 {
532 	int i;
533 	int lock_owned;
534 	struct pvscsi_hcb *hcb;
535 
536 	lock_owned = lockowned(&sc->lock);
537 
538 	if (lock_owned) {
539 		lockmgr(&sc->lock, LK_RELEASE);
540 	}
541 	for (i = 0; i < hcbs_allocated; ++i) {
542 		hcb = sc->hcbs + i;
543 		callout_drain(&hcb->callout);
544 	};
545 	if (lock_owned) {
546 		lockmgr(&sc->lock, LK_EXCLUSIVE);
547 	}
548 
549 	for (i = 0; i < hcbs_allocated; ++i) {
550 		hcb = sc->hcbs + i;
551 		bus_dmamap_destroy(sc->buffer_dmat, hcb->dma_map);
552 	};
553 
554 	pvscsi_dma_free(sc, &sc->sense_buffer_dma);
555 	pvscsi_dma_free(sc, &sc->sg_list_dma);
556 }
557 
558 static int
559 pvscsi_dma_alloc_per_hcb(struct pvscsi_softc *sc)
560 {
561 	int i;
562 	int error;
563 	struct pvscsi_hcb *hcb;
564 
565 	i = 0;
566 
567 	error = pvscsi_dma_alloc(sc, &sc->sg_list_dma,
568 	    sizeof(struct pvscsi_sg_list) * sc->hcb_cnt, 1);
569 	if (error) {
570 		device_printf(sc->dev,
571 		    "Error allocation sg list DMA memory, error %d\n", error);
572 		goto fail;
573 	}
574 
575 	error = pvscsi_dma_alloc(sc, &sc->sense_buffer_dma,
576 				 PVSCSI_SENSE_LENGTH * sc->hcb_cnt, 1);
577 	if (error) {
578 		device_printf(sc->dev,
579 		    "Error allocation sg list DMA memory, error %d\n", error);
580 		goto fail;
581 	}
582 
583 	for (i = 0; i < sc->hcb_cnt; ++i) {
584 		hcb = sc->hcbs + i;
585 
586 		error = bus_dmamap_create(sc->buffer_dmat, 0, &hcb->dma_map);
587 		if (error) {
588 			device_printf(sc->dev,
589 			    "Error creating dma map for hcb %d, error %d\n",
590 			    i, error);
591 			goto fail;
592 		}
593 
594 		hcb->sense_buffer =
595 		    (void *)((caddr_t)sc->sense_buffer_dma.vaddr +
596 		    PVSCSI_SENSE_LENGTH * i);
597 		hcb->sense_buffer_paddr =
598 		    sc->sense_buffer_dma.paddr + PVSCSI_SENSE_LENGTH * i;
599 
600 		hcb->sg_list =
601 		    (struct pvscsi_sg_list *)((caddr_t)sc->sg_list_dma.vaddr +
602 		    sizeof(struct pvscsi_sg_list) * i);
603 		hcb->sg_list_paddr =
604 		    sc->sg_list_dma.paddr + sizeof(struct pvscsi_sg_list) * i;
605 
606 		callout_init_lk(&hcb->callout, &sc->lock);
607 	}
608 
609 	SLIST_INIT(&sc->free_list);
610 	for (i = (sc->hcb_cnt - 1); i >= 0; --i) {
611 		hcb = sc->hcbs + i;
612 		SLIST_INSERT_HEAD(&sc->free_list, hcb, links);
613 	}
614 
615 fail:
616 	if (error) {
617 		pvscsi_dma_free_per_hcb(sc, i);
618 	}
619 
620 	return (error);
621 }
622 
623 static void
624 pvscsi_free_rings(struct pvscsi_softc *sc)
625 {
626 
627 	pvscsi_dma_free(sc, &sc->rings_state_dma);
628 	pvscsi_dma_free(sc, &sc->req_ring_dma);
629 	pvscsi_dma_free(sc, &sc->cmp_ring_dma);
630 	if (sc->use_msg) {
631 		pvscsi_dma_free(sc, &sc->msg_ring_dma);
632 	}
633 }
634 
635 static int
636 pvscsi_allocate_rings(struct pvscsi_softc *sc)
637 {
638 	int error;
639 
640 	error = pvscsi_dma_alloc_ppns(sc, &sc->rings_state_dma,
641 	    &sc->rings_state_ppn, 1);
642 	if (error) {
643 		device_printf(sc->dev,
644 		    "Error allocating rings state, error = %d\n", error);
645 		goto fail;
646 	}
647 	sc->rings_state = sc->rings_state_dma.vaddr;
648 
649 	error = pvscsi_dma_alloc_ppns(sc, &sc->req_ring_dma, sc->req_ring_ppn,
650 	    sc->req_ring_num_pages);
651 	if (error) {
652 		device_printf(sc->dev,
653 		    "Error allocating req ring pages, error = %d\n", error);
654 		goto fail;
655 	}
656 	sc->req_ring = sc->req_ring_dma.vaddr;
657 
658 	error = pvscsi_dma_alloc_ppns(sc, &sc->cmp_ring_dma, sc->cmp_ring_ppn,
659 	    sc->cmp_ring_num_pages);
660 	if (error) {
661 		device_printf(sc->dev,
662 		    "Error allocating cmp ring pages, error = %d\n", error);
663 		goto fail;
664 	}
665 	sc->cmp_ring = sc->cmp_ring_dma.vaddr;
666 
667 	sc->msg_ring = NULL;
668 	if (sc->use_msg) {
669 		error = pvscsi_dma_alloc_ppns(sc, &sc->msg_ring_dma,
670 		    sc->msg_ring_ppn, sc->msg_ring_num_pages);
671 		if (error) {
672 			device_printf(sc->dev,
673 			    "Error allocating cmp ring pages, error = %d\n",
674 			    error);
675 			goto fail;
676 		}
677 		sc->msg_ring = sc->msg_ring_dma.vaddr;
678 	}
679 
680 	DEBUG_PRINTF(1, sc->dev, "rings_state: %p\n", sc->rings_state);
681 	DEBUG_PRINTF(1, sc->dev, "req_ring: %p - %u pages\n", sc->req_ring,
682 	    sc->req_ring_num_pages);
683 	DEBUG_PRINTF(1, sc->dev, "cmp_ring: %p - %u pages\n", sc->cmp_ring,
684 	    sc->cmp_ring_num_pages);
685 	DEBUG_PRINTF(1, sc->dev, "msg_ring: %p - %u pages\n", sc->msg_ring,
686 	    sc->msg_ring_num_pages);
687 
688 fail:
689 	if (error) {
690 		pvscsi_free_rings(sc);
691 	}
692 	return (error);
693 }
694 
695 static void
696 pvscsi_setup_rings(struct pvscsi_softc *sc)
697 {
698 	struct pvscsi_cmd_desc_setup_rings cmd;
699 	uint32_t i;
700 
701 	bzero(&cmd, sizeof(cmd));
702 
703 	cmd.rings_state_ppn = sc->rings_state_ppn;
704 
705 	cmd.req_ring_num_pages = sc->req_ring_num_pages;
706 	for (i = 0; i < sc->req_ring_num_pages; ++i) {
707 		cmd.req_ring_ppns[i] = sc->req_ring_ppn[i];
708 	}
709 
710 	cmd.cmp_ring_num_pages = sc->cmp_ring_num_pages;
711 	for (i = 0; i < sc->cmp_ring_num_pages; ++i) {
712 		cmd.cmp_ring_ppns[i] = sc->cmp_ring_ppn[i];
713 	}
714 
715 	pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof(cmd));
716 }
717 
718 static int
719 pvscsi_hw_supports_msg(struct pvscsi_softc *sc)
720 {
721 	uint32_t status;
722 
723 	pvscsi_reg_write(sc, PVSCSI_REG_OFFSET_COMMAND,
724 	    PVSCSI_CMD_SETUP_MSG_RING);
725 	status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS);
726 
727 	return (status != -1);
728 }
729 
730 static void
731 pvscsi_setup_msg_ring(struct pvscsi_softc *sc)
732 {
733 	struct pvscsi_cmd_desc_setup_msg_ring cmd;
734 	uint32_t i;
735 
736 	KASSERT(sc->use_msg, ("msg is not being used"));
737 
738 	bzero(&cmd, sizeof(cmd));
739 
740 	cmd.num_pages = sc->msg_ring_num_pages;
741 	for (i = 0; i < sc->msg_ring_num_pages; ++i) {
742 		cmd.ring_ppns[i] = sc->msg_ring_ppn[i];
743 	}
744 
745 	pvscsi_write_cmd(sc, PVSCSI_CMD_SETUP_MSG_RING, &cmd, sizeof(cmd));
746 }
747 
748 static void
749 pvscsi_adapter_reset(struct pvscsi_softc *sc)
750 {
751 	uint32_t val;
752 
753 	device_printf(sc->dev, "Adapter Reset\n");
754 
755 	pvscsi_write_cmd(sc, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
756 	val = pvscsi_read_intr_status(sc);
757 
758 	DEBUG_PRINTF(2, sc->dev, "adapter reset done: %u\n", val);
759 }
760 
761 static void
762 pvscsi_bus_reset(struct pvscsi_softc *sc)
763 {
764 
765 	device_printf(sc->dev, "Bus Reset\n");
766 
767 	pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_BUS, NULL, 0);
768 	pvscsi_process_cmp_ring(sc);
769 
770 	DEBUG_PRINTF(2, sc->dev, "bus reset done\n");
771 }
772 
773 static void
774 pvscsi_device_reset(struct pvscsi_softc *sc, uint32_t target)
775 {
776 	struct pvscsi_cmd_desc_reset_device cmd;
777 
778 	memset(&cmd, 0, sizeof(cmd));
779 
780 	cmd.target = target;
781 
782 	device_printf(sc->dev, "Device reset for target %u\n", target);
783 
784 	pvscsi_write_cmd(sc, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof cmd);
785 	pvscsi_process_cmp_ring(sc);
786 
787 	DEBUG_PRINTF(2, sc->dev, "device reset done\n");
788 }
789 
790 static void
791 pvscsi_abort(struct pvscsi_softc *sc, uint32_t target, union ccb *ccb)
792 {
793 	struct pvscsi_cmd_desc_abort_cmd cmd;
794 	struct pvscsi_hcb *hcb;
795 	uint64_t context;
796 
797 	pvscsi_process_cmp_ring(sc);
798 
799 	hcb = ccb->ccb_h.ccb_pvscsi_hcb;
800 
801 	if (hcb != NULL) {
802 		context = pvscsi_hcb_to_context(sc, hcb);
803 
804 		memset(&cmd, 0, sizeof cmd);
805 		cmd.target = target;
806 		cmd.context = context;
807 
808 		device_printf(sc->dev, "Abort for target %u context %llx\n",
809 		    target, (unsigned long long)context);
810 
811 		pvscsi_write_cmd(sc, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
812 		pvscsi_process_cmp_ring(sc);
813 
814 		DEBUG_PRINTF(2, sc->dev, "abort done\n");
815 	} else {
816 		DEBUG_PRINTF(1, sc->dev,
817 		    "Target %u ccb %p not found for abort\n", target, ccb);
818 	}
819 }
820 
821 static int
822 pvscsi_probe(device_t dev)
823 {
824 
825 	if (pci_get_vendor(dev) == PCI_VENDOR_ID_VMWARE &&
826 	    pci_get_device(dev) == PCI_DEVICE_ID_VMWARE_PVSCSI) {
827 		device_set_desc(dev, "VMware Paravirtual SCSI Controller");
828 		return (BUS_PROBE_DEFAULT);
829 	}
830 	return (ENXIO);
831 }
832 
833 static int
834 pvscsi_shutdown(device_t dev)
835 {
836 
837 	return (0);
838 }
839 
840 static void
841 pvscsi_timeout(void *arg)
842 {
843 	struct pvscsi_hcb *hcb;
844 	struct pvscsi_softc *sc;
845 	union ccb *ccb;
846 
847 	hcb = arg;
848 	ccb = hcb->ccb;
849 
850 	if (ccb == NULL) {
851 		/* Already completed */
852 		return;
853 	}
854 
855 	sc = ccb->ccb_h.ccb_pvscsi_sc;
856 	KKASSERT(lockowned(&sc->lock));
857 
858 	device_printf(sc->dev, "Command timed out hcb=%p ccb=%p.\n", hcb, ccb);
859 
860 	switch (hcb->recovery) {
861 	case PVSCSI_HCB_NONE:
862 		hcb->recovery = PVSCSI_HCB_ABORT;
863 		pvscsi_abort(sc, ccb->ccb_h.target_id, ccb);
864 		callout_reset(&hcb->callout, (PVSCSI_ABORT_TIMEOUT * hz) / 1000,
865 		    pvscsi_timeout, hcb);
866 		break;
867 	case PVSCSI_HCB_ABORT:
868 		hcb->recovery = PVSCSI_HCB_DEVICE_RESET;
869 		pvscsi_freeze(sc);
870 		pvscsi_device_reset(sc, ccb->ccb_h.target_id);
871 		callout_reset(&hcb->callout, (PVSCSI_RESET_TIMEOUT * hz) / 1000,
872 		    pvscsi_timeout, hcb);
873 		break;
874 	case PVSCSI_HCB_DEVICE_RESET:
875 		hcb->recovery = PVSCSI_HCB_BUS_RESET;
876 		pvscsi_freeze(sc);
877 		pvscsi_bus_reset(sc);
878 		callout_reset(&hcb->callout, (PVSCSI_RESET_TIMEOUT * hz) / 1000,
879 		    pvscsi_timeout, hcb);
880 		break;
881 	case PVSCSI_HCB_BUS_RESET:
882 		pvscsi_freeze(sc);
883 		pvscsi_adapter_reset(sc);
884 		break;
885 	};
886 }
887 
888 static void
889 pvscsi_process_completion(struct pvscsi_softc *sc,
890     struct pvscsi_ring_cmp_desc *e)
891 {
892 	struct pvscsi_hcb *hcb;
893 	union ccb *ccb;
894 	uint32_t status;
895 	uint32_t btstat;
896 	uint32_t sdstat;
897 	bus_dmasync_op_t op;
898 
899 	hcb = pvscsi_context_to_hcb(sc, e->context);
900 
901 	callout_stop(&hcb->callout);
902 
903 	ccb = hcb->ccb;
904 
905 	btstat = e->host_status;
906 	sdstat = e->scsi_status;
907 
908 	ccb->csio.scsi_status = sdstat;
909 	ccb->csio.resid = ccb->csio.dxfer_len - e->data_len;
910 
911 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
912 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
913 			op = BUS_DMASYNC_POSTREAD;
914 		} else {
915 			op = BUS_DMASYNC_POSTWRITE;
916 		}
917 		bus_dmamap_sync(sc->buffer_dmat, hcb->dma_map, op);
918 		bus_dmamap_unload(sc->buffer_dmat, hcb->dma_map);
919 	}
920 
921 	if (btstat == BTSTAT_SUCCESS && sdstat == SCSI_STATUS_OK) {
922 		DEBUG_PRINTF(3, sc->dev,
923 		    "completing command context %llx success\n",
924 		    (unsigned long long)e->context);
925 		ccb->csio.resid = 0;
926 		status = CAM_REQ_CMP;
927 	} else {
928 		switch (btstat) {
929 		case BTSTAT_SUCCESS:
930 		case BTSTAT_LINKED_COMMAND_COMPLETED:
931 		case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
932 			switch (sdstat) {
933 			case SCSI_STATUS_OK:
934 				ccb->csio.resid = 0;
935 				status = CAM_REQ_CMP;
936 				break;
937 			case SCSI_STATUS_CHECK_COND:
938 				status = CAM_SCSI_STATUS_ERROR;
939 
940 				if (ccb->csio.sense_len != 0) {
941 					status |= CAM_AUTOSNS_VALID;
942 
943 					memset(&ccb->csio.sense_data, 0,
944 					    sizeof(ccb->csio.sense_data));
945 					memcpy(&ccb->csio.sense_data,
946 					    hcb->sense_buffer,
947 					    MIN(ccb->csio.sense_len,
948 						e->sense_len));
949 				}
950 				break;
951 			case SCSI_STATUS_BUSY:
952 			case SCSI_STATUS_QUEUE_FULL:
953 				status = CAM_REQUEUE_REQ;
954 				break;
955 			case SCSI_STATUS_CMD_TERMINATED:
956 			case SCSI_STATUS_TASK_ABORTED:
957 				status = CAM_REQ_ABORTED;
958 				break;
959 			default:
960 				DEBUG_PRINTF(1, sc->dev,
961 				    "ccb: %p sdstat=0x%x\n", ccb, sdstat);
962 				status = CAM_SCSI_STATUS_ERROR;
963 				break;
964 			}
965 			break;
966 		case BTSTAT_SELTIMEO:
967 			status = CAM_SEL_TIMEOUT;
968 			break;
969 		case BTSTAT_DATARUN:
970 		case BTSTAT_DATA_UNDERRUN:
971 			status = CAM_DATA_RUN_ERR;
972 			break;
973 		case BTSTAT_ABORTQUEUE:
974 		case BTSTAT_HATIMEOUT:
975 			status = CAM_REQUEUE_REQ;
976 			break;
977 		case BTSTAT_NORESPONSE:
978 		case BTSTAT_SENTRST:
979 		case BTSTAT_RECVRST:
980 		case BTSTAT_BUSRESET:
981 			status = CAM_SCSI_BUS_RESET;
982 			break;
983 		case BTSTAT_SCSIPARITY:
984 			status = CAM_UNCOR_PARITY;
985 			break;
986 		case BTSTAT_BUSFREE:
987 			status = CAM_UNEXP_BUSFREE;
988 			break;
989 		case BTSTAT_INVPHASE:
990 			status = CAM_SEQUENCE_FAIL;
991 			break;
992 		case BTSTAT_SENSFAILED:
993 			status = CAM_AUTOSENSE_FAIL;
994 			break;
995 		case BTSTAT_LUNMISMATCH:
996 		case BTSTAT_TAGREJECT:
997 		case BTSTAT_DISCONNECT:
998 		case BTSTAT_BADMSG:
999 		case BTSTAT_INVPARAM:
1000 			status = CAM_REQ_CMP_ERR;
1001 			break;
1002 		case BTSTAT_HASOFTWARE:
1003 		case BTSTAT_HAHARDWARE:
1004 			status = CAM_NO_HBA;
1005 			break;
1006 		default:
1007 			device_printf(sc->dev, "unknown hba status: 0x%x\n",
1008 			    btstat);
1009 			status = CAM_NO_HBA;
1010 			break;
1011 		}
1012 
1013 		DEBUG_PRINTF(3, sc->dev,
1014 		    "completing command context %llx btstat %x sdstat %x - status %x\n",
1015 		    (unsigned long long)e->context, btstat, sdstat, status);
1016 	}
1017 
1018 	ccb->ccb_h.ccb_pvscsi_hcb = NULL;
1019 	ccb->ccb_h.ccb_pvscsi_sc = NULL;
1020 	pvscsi_hcb_put(sc, hcb);
1021 
1022 	ccb->ccb_h.status =
1023 	    status | (ccb->ccb_h.status & ~(CAM_STATUS_MASK | CAM_SIM_QUEUED));
1024 
1025 	if (sc->frozen) {
1026 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1027 		sc->frozen = 0;
1028 	}
1029 
1030 	if (status != CAM_REQ_CMP) {
1031 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1032 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1033 	}
1034 	xpt_done(ccb);
1035 }
1036 
1037 static void
1038 pvscsi_process_cmp_ring(struct pvscsi_softc *sc)
1039 {
1040 	struct pvscsi_ring_cmp_desc *ring;
1041 	struct pvscsi_rings_state *s;
1042 	struct pvscsi_ring_cmp_desc *e;
1043 	uint32_t mask;
1044 
1045 	KKASSERT(lockowned(&sc->lock));
1046 
1047 	s = sc->rings_state;
1048 	ring = sc->cmp_ring;
1049 	mask = MASK(s->cmp_num_entries_log2);
1050 
1051 	while (s->cmp_cons_idx != s->cmp_prod_idx) {
1052 		e = ring + (s->cmp_cons_idx & mask);
1053 
1054 		pvscsi_process_completion(sc, e);
1055 
1056 		cpu_mfence();
1057 		s->cmp_cons_idx++;
1058 	}
1059 }
1060 
1061 static void
1062 pvscsi_rescan_callback(struct cam_periph *periph, union ccb *ccb)
1063 {
1064 	if (ccb->ccb_h.status != CAM_REQ_CMP)
1065 		kprintf("%s: failure status = %x\n", __func__,
1066 		    ccb->ccb_h.status);
1067 
1068 	xpt_free_path(ccb->ccb_h.path);
1069 	xpt_free_ccb(&ccb->ccb_h);
1070 }
1071 
1072 static void
1073 pvscsi_process_msg(struct pvscsi_softc *sc, struct pvscsi_ring_msg_desc *e)
1074 {
1075 	struct pvscsi_ring_msg_dev_status_changed *desc;
1076 	union ccb *ccb;
1077 
1078 	switch (e->type) {
1079 	case PVSCSI_MSG_DEV_ADDED:
1080 	case PVSCSI_MSG_DEV_REMOVED: {
1081 		desc = (struct pvscsi_ring_msg_dev_status_changed *)e;
1082 
1083 		device_printf(sc->dev, "MSG: device %s at scsi%u:%u:%u\n",
1084 		    desc->type == PVSCSI_MSG_DEV_ADDED ? "addition" : "removal",
1085 		    desc->bus, desc->target, desc->lun[1]);
1086 
1087 		ccb = xpt_alloc_ccb();
1088 		if (ccb == NULL) {
1089 			device_printf(sc->dev,
1090 			    "Error allocating CCB for dev change.\n");
1091 			break;
1092 		}
1093 
1094 		if (xpt_create_path(&ccb->ccb_h.path, NULL,
1095 		    cam_sim_path(sc->sim), desc->target, desc->lun[1])
1096 		    != CAM_REQ_CMP) {
1097 			device_printf(sc->dev,
1098 			    "Error creating path for dev change.\n");
1099 			xpt_free_ccb(&ccb->ccb_h);
1100 			break;
1101 		}
1102 
1103 		xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, /*lowpri*/5);
1104 		ccb->ccb_h.func_code = XPT_SCAN_LUN;
1105 		ccb->ccb_h.cbfcnp = pvscsi_rescan_callback;
1106 		ccb->crcn.flags = CAM_FLAG_NONE;
1107 		xpt_action(ccb);
1108 	} break;
1109 	default:
1110 		device_printf(sc->dev, "Unknown msg type 0x%x\n", e->type);
1111 	};
1112 }
1113 
1114 static void
1115 pvscsi_process_msg_ring(struct pvscsi_softc *sc)
1116 {
1117 	struct pvscsi_ring_msg_desc *ring;
1118 	struct pvscsi_rings_state *s;
1119 	struct pvscsi_ring_msg_desc *e;
1120 	uint32_t mask;
1121 
1122 	KKASSERT(lockowned(&sc->lock));
1123 
1124 	s = sc->rings_state;
1125 	ring = sc->msg_ring;
1126 	mask = MASK(s->msg_num_entries_log2);
1127 
1128 	while (s->msg_cons_idx != s->msg_prod_idx) {
1129 		e = ring + (s->msg_cons_idx & mask);
1130 
1131 		pvscsi_process_msg(sc, e);
1132 
1133 		cpu_mfence();
1134 		s->msg_cons_idx++;
1135 	}
1136 }
1137 
1138 static void
1139 pvscsi_intr_locked(struct pvscsi_softc *sc)
1140 {
1141 	uint32_t val;
1142 
1143 	KKASSERT(lockowned(&sc->lock));
1144 
1145 	val = pvscsi_read_intr_status(sc);
1146 
1147 	if ((val & PVSCSI_INTR_ALL_SUPPORTED) != 0) {
1148 		pvscsi_write_intr_status(sc, val & PVSCSI_INTR_ALL_SUPPORTED);
1149 		pvscsi_process_cmp_ring(sc);
1150 		if (sc->use_msg) {
1151 			pvscsi_process_msg_ring(sc);
1152 		}
1153 	}
1154 }
1155 
1156 static void
1157 pvscsi_intr(void *xsc)
1158 {
1159 	struct pvscsi_softc *sc;
1160 
1161 	sc = xsc;
1162 
1163 	KKASSERT(!lockowned(&sc->lock));
1164 
1165 	lockmgr(&sc->lock, LK_EXCLUSIVE);
1166 	pvscsi_intr_locked(xsc);
1167 	lockmgr(&sc->lock, LK_RELEASE);
1168 }
1169 
1170 static void
1171 pvscsi_poll(struct cam_sim *sim)
1172 {
1173 	struct pvscsi_softc *sc;
1174 
1175 	sc = cam_sim_softc(sim);
1176 
1177 	KKASSERT(lockowned(&sc->lock));
1178 	pvscsi_intr_locked(sc);
1179 }
1180 
1181 static void
1182 pvscsi_execute_ccb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1183 {
1184 	struct pvscsi_hcb *hcb;
1185 	struct pvscsi_ring_req_desc *e;
1186 	union ccb *ccb;
1187 	struct pvscsi_softc *sc;
1188 	struct pvscsi_rings_state *s;
1189 	uint8_t cdb0;
1190 	bus_dmasync_op_t op;
1191 
1192 	hcb = arg;
1193 	ccb = hcb->ccb;
1194 	e = hcb->e;
1195 	sc = ccb->ccb_h.ccb_pvscsi_sc;
1196 	s = sc->rings_state;
1197 
1198 	KKASSERT(lockowned(&sc->lock));
1199 
1200 	if (error) {
1201 		device_printf(sc->dev, "pvscsi_execute_ccb error %d\n", error);
1202 
1203 		if (error == EFBIG) {
1204 			ccb->ccb_h.status = CAM_REQ_TOO_BIG;
1205 		} else {
1206 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1207 		}
1208 
1209 		pvscsi_hcb_put(sc, hcb);
1210 		xpt_done(ccb);
1211 		return;
1212 	}
1213 
1214 	e->flags = 0;
1215 	op = 0;
1216 	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1217 	case CAM_DIR_NONE:
1218 		e->flags |= PVSCSI_FLAG_CMD_DIR_NONE;
1219 		break;
1220 	case CAM_DIR_IN:
1221 		e->flags |= PVSCSI_FLAG_CMD_DIR_TOHOST;
1222 		op = BUS_DMASYNC_PREREAD;
1223 		break;
1224 	case CAM_DIR_OUT:
1225 		e->flags |= PVSCSI_FLAG_CMD_DIR_TODEVICE;
1226 		op = BUS_DMASYNC_PREWRITE;
1227 		break;
1228 	}
1229 
1230 	if (nseg != 0) {
1231 		if (nseg > 1) {
1232 			int i;
1233 			struct pvscsi_sg_element *sge;
1234 
1235 			KASSERT(nseg <= PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT,
1236 			    ("too many sg segments"));
1237 
1238 			sge = hcb->sg_list->sge;
1239 			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
1240 
1241 			for (i = 0; i < nseg; ++i) {
1242 				sge[i].addr = segs[i].ds_addr;
1243 				sge[i].length = segs[i].ds_len;
1244 				sge[i].flags = 0;
1245 			}
1246 
1247 			e->data_addr = hcb->sg_list_paddr;
1248 		} else {
1249 			e->data_addr = segs->ds_addr;
1250 		}
1251 
1252 		bus_dmamap_sync(sc->buffer_dmat, hcb->dma_map, op);
1253 	} else {
1254 		e->data_addr = 0;
1255 	}
1256 
1257 	cdb0 = e->cdb[0];
1258 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1259 
1260 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1261 		callout_reset(&hcb->callout, (ccb->ccb_h.timeout * hz) / 1000,
1262 		    pvscsi_timeout, hcb);
1263 	}
1264 
1265 	cpu_mfence();
1266 	s->req_prod_idx++;
1267 	pvscsi_kick_io(sc, cdb0);
1268 }
1269 
1270 static void
1271 pvscsi_action(struct cam_sim *sim, union ccb *ccb)
1272 {
1273 	struct pvscsi_softc *sc;
1274 	struct ccb_hdr *ccb_h;
1275 
1276 	sc = cam_sim_softc(sim);
1277 	ccb_h = &ccb->ccb_h;
1278 
1279 	KKASSERT(lockowned(&sc->lock));
1280 
1281 	switch (ccb_h->func_code) {
1282 	case XPT_SCSI_IO:
1283 	{
1284 		struct ccb_scsiio *csio;
1285 		uint32_t req_num_entries_log2;
1286 		struct pvscsi_ring_req_desc *ring;
1287 		struct pvscsi_ring_req_desc *e;
1288 		struct pvscsi_rings_state *s;
1289 		struct pvscsi_hcb *hcb;
1290 
1291 		csio = &ccb->csio;
1292 		ring = sc->req_ring;
1293 		s = sc->rings_state;
1294 
1295 		hcb = NULL;
1296 
1297 		/*
1298 		 * Check if it was completed already (such as aborted
1299 		 * by upper layers)
1300 		 */
1301 		if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1302 			xpt_done(ccb);
1303 			return;
1304 		}
1305 
1306 		req_num_entries_log2 = s->req_num_entries_log2;
1307 
1308 		if (s->req_prod_idx - s->cmp_cons_idx >=
1309 		    (1 << req_num_entries_log2)) {
1310 			device_printf(sc->dev,
1311 			    "Not enough room on completion ring.\n");
1312 			pvscsi_freeze(sc);
1313 			ccb_h->status = CAM_REQUEUE_REQ;
1314 			goto finish_ccb;
1315 		}
1316 
1317 		hcb = pvscsi_hcb_get(sc);
1318 		if (hcb == NULL) {
1319 			device_printf(sc->dev, "No free hcbs.\n");
1320 			pvscsi_freeze(sc);
1321 			ccb_h->status = CAM_REQUEUE_REQ;
1322 			goto finish_ccb;
1323 		}
1324 
1325 		hcb->ccb = ccb;
1326 		ccb_h->ccb_pvscsi_hcb = hcb;
1327 		ccb_h->ccb_pvscsi_sc = sc;
1328 
1329 		if (csio->cdb_len > sizeof(e->cdb)) {
1330 			DEBUG_PRINTF(2, sc->dev, "cdb length %u too large\n",
1331 			    csio->cdb_len);
1332 			ccb_h->status = CAM_REQ_INVALID;
1333 			goto finish_ccb;
1334 		}
1335 
1336 		if (ccb_h->flags & CAM_CDB_PHYS) {
1337 			DEBUG_PRINTF(2, sc->dev,
1338 			    "CAM_CDB_PHYS not implemented\n");
1339 			ccb_h->status = CAM_REQ_INVALID;
1340 			goto finish_ccb;
1341 		}
1342 
1343 		e = ring + (s->req_prod_idx & MASK(req_num_entries_log2));
1344 
1345 		e->bus = cam_sim_bus(sim);
1346 		e->target = ccb_h->target_id;
1347 		memset(e->lun, 0, sizeof(e->lun));
1348 		e->lun[1] = ccb_h->target_lun;
1349 		e->data_addr = 0;
1350 		e->data_len = csio->dxfer_len;
1351 		e->vcpu_hint = mycpuid;
1352 
1353 		e->cdb_len = csio->cdb_len;
1354 		if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1355 			memcpy(e->cdb, ccb->csio.cdb_io.cdb_ptr,
1356 			    csio->cdb_len);
1357 		} else {
1358 			memcpy(e->cdb, ccb->csio.cdb_io.cdb_bytes,
1359 			    csio->cdb_len);
1360 		}
1361 
1362 		e->sense_addr = 0;
1363 		e->sense_len = csio->sense_len;
1364 		if (e->sense_len > 0) {
1365 			e->sense_addr = hcb->sense_buffer_paddr;
1366 		}
1367 
1368 		e->tag = MSG_SIMPLE_Q_TAG;
1369 		if (ccb_h->flags & CAM_TAG_ACTION_VALID) {
1370 			e->tag = csio->tag_action;
1371 		}
1372 
1373 		e->context = pvscsi_hcb_to_context(sc, hcb);
1374 		hcb->e = e;
1375 
1376 		DEBUG_PRINTF(3, sc->dev,
1377 		    " queuing command %02x context %llx\n", e->cdb[0],
1378 		    (unsigned long long)e->context);
1379 		bus_dmamap_load_ccb(sc->buffer_dmat, hcb->dma_map, ccb,
1380 		    pvscsi_execute_ccb, hcb, 0);
1381 		break;
1382 
1383 finish_ccb:
1384 		if (hcb != NULL) {
1385 			pvscsi_hcb_put(sc, hcb);
1386 		}
1387 		xpt_done(ccb);
1388 	} break;
1389 	case XPT_ABORT:
1390 	{
1391 		struct pvscsi_hcb *abort_hcb;
1392 		union ccb *abort_ccb;
1393 
1394 		abort_ccb = ccb->cab.abort_ccb;
1395 		abort_hcb = abort_ccb->ccb_h.ccb_pvscsi_hcb;
1396 
1397 		if (abort_hcb->ccb != NULL && abort_hcb->ccb == abort_ccb) {
1398 			if (abort_ccb->ccb_h.func_code == XPT_SCSI_IO) {
1399 				pvscsi_abort(sc, ccb_h->target_id, abort_ccb);
1400 				ccb_h->status = CAM_REQ_CMP;
1401 			} else {
1402 				ccb_h->status = CAM_UA_ABORT;
1403 			}
1404 		} else {
1405 			device_printf(sc->dev,
1406 			    "Could not find hcb for ccb %p (tgt %u)\n",
1407 			    ccb, ccb_h->target_id);
1408 			ccb_h->status = CAM_REQ_CMP;
1409 		}
1410 		xpt_done(ccb);
1411 	} break;
1412 	case XPT_RESET_DEV:
1413 	{
1414 		pvscsi_device_reset(sc, ccb_h->target_id);
1415 		ccb_h->status = CAM_REQ_CMP;
1416 		xpt_done(ccb);
1417 	} break;
1418 	case XPT_RESET_BUS:
1419 	{
1420 		pvscsi_bus_reset(sc);
1421 		ccb_h->status = CAM_REQ_CMP;
1422 		xpt_done(ccb);
1423 	} break;
1424 	case XPT_PATH_INQ:
1425 	{
1426 		struct ccb_pathinq *cpi;
1427 
1428 		cpi = &ccb->cpi;
1429 
1430 		cpi->version_num = 1;
1431 		cpi->hba_inquiry = PI_TAG_ABLE;
1432 		cpi->target_sprt = 0;
1433 		cpi->hba_misc = PIM_NOBUSRESET;
1434 		cpi->hba_eng_cnt = 0;
1435 		/* cpi->vuhba_flags = 0; */
1436 		cpi->max_target = sc->max_targets;
1437 		cpi->max_lun = 0;
1438 		cpi->async_flags = 0;
1439 		cpi->hpath_id = 0;
1440 		cpi->unit_number = cam_sim_unit(sim);
1441 		cpi->bus_id = cam_sim_bus(sim);
1442 		cpi->initiator_id = 7;
1443 		cpi->base_transfer_speed = 750000;
1444 		strlcpy(cpi->sim_vid, "VMware", SIM_IDLEN);
1445 		strlcpy(cpi->hba_vid, "VMware", HBA_IDLEN);
1446 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1447 		/* Limit I/O to 256k since we can't do 512k unaligned I/O */
1448 		cpi->maxio = (PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT / 2) * PAGE_SIZE;
1449 		cpi->protocol = PROTO_SCSI;
1450 		cpi->protocol_version = SCSI_REV_SPC2;
1451 		cpi->transport = XPORT_SAS;
1452 		cpi->transport_version = 0;
1453 
1454 		ccb_h->status = CAM_REQ_CMP;
1455 		xpt_done(ccb);
1456 	} break;
1457 	case XPT_GET_TRAN_SETTINGS:
1458 	{
1459 		struct ccb_trans_settings *cts;
1460 
1461 		cts = &ccb->cts;
1462 
1463 		cts->protocol = PROTO_SCSI;
1464 		cts->protocol_version = SCSI_REV_SPC2;
1465 		cts->transport = XPORT_SAS;
1466 		cts->transport_version = 0;
1467 
1468 		cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1469 		cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1470 
1471 		ccb_h->status = CAM_REQ_CMP;
1472 		xpt_done(ccb);
1473 	} break;
1474 	case XPT_CALC_GEOMETRY:
1475 	{
1476 		cam_calc_geometry(&ccb->ccg, 1);
1477 		xpt_done(ccb);
1478 	} break;
1479 	default:
1480 		ccb_h->status = CAM_REQ_INVALID;
1481 		xpt_done(ccb);
1482 		break;
1483 	}
1484 }
1485 
1486 static void
1487 pvscsi_free_interrupts(struct pvscsi_softc *sc)
1488 {
1489 
1490 	if (sc->irq_handler != NULL) {
1491 		bus_teardown_intr(sc->dev, sc->irq_res, sc->irq_handler);
1492 	}
1493 	if (sc->irq_res != NULL) {
1494 		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_id,
1495 		    sc->irq_res);
1496 	}
1497 	if (sc->use_msi_or_msix) {
1498 		pci_release_msi(sc->dev);
1499 	}
1500 }
1501 
1502 static int
1503 pvscsi_setup_interrupts(struct pvscsi_softc *sc)
1504 {
1505 	int error;
1506 	int flags;
1507 #if 0 /* XXX swildner: MSI-X support */
1508 	int use_msix;
1509 #endif
1510 	int use_msi;
1511 #if 0 /* XXX swildner: MSI-X support */
1512 	int count;
1513 #endif
1514 
1515 	sc->use_msi_or_msix = 0;
1516 
1517 #if 0 /* XXX swildner: MSI-X support */
1518 	use_msix = pvscsi_get_tunable(sc, "use_msix", pvscsi_use_msix);
1519 #endif
1520 	use_msi = pvscsi_get_tunable(sc, "use_msi", pvscsi_use_msi);
1521 
1522 #if 0 /* XXX swildner: MSI-X support */
1523 	if (use_msix && pci_msix_count(sc->dev) > 0) {
1524 		count = 1;
1525 		if (pci_alloc_msix(sc->dev, &count) == 0 && count == 1) {
1526 			sc->use_msi_or_msix = 1;
1527 			device_printf(sc->dev, "Interrupt: MSI-X\n");
1528 		} else {
1529 			pci_release_msi(sc->dev);
1530 		}
1531 	}
1532 #endif
1533 
1534 	sc->irq_id = 0;
1535 	sc->irq_type = pci_alloc_1intr(sc->dev, use_msi, &sc->irq_id,
1536 	    &flags);
1537 	if (use_msi) {
1538 		sc->use_msi_or_msix = 1;
1539 		device_printf(sc->dev, "Interrupt: MSI\n");
1540 	} else {
1541 		device_printf(sc->dev, "Interrupt: INT\n");
1542 	}
1543 
1544 	sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_id,
1545 	    flags);
1546 	if (sc->irq_res == NULL) {
1547 		device_printf(sc->dev, "IRQ allocation failed\n");
1548 		if (sc->use_msi_or_msix) {
1549 			pci_release_msi(sc->dev);
1550 		}
1551 		return (ENXIO);
1552 	}
1553 
1554 	error = bus_setup_intr(sc->dev, sc->irq_res,
1555 	    INTR_MPSAFE, pvscsi_intr, sc,
1556 	    &sc->irq_handler, NULL);
1557 	if (error) {
1558 		device_printf(sc->dev, "IRQ handler setup failed\n");
1559 		pvscsi_free_interrupts(sc);
1560 		return (error);
1561 	}
1562 
1563 	return (0);
1564 }
1565 
1566 static void
1567 pvscsi_free_all(struct pvscsi_softc *sc)
1568 {
1569 
1570 	if (sc->sim) {
1571 		int32_t status;
1572 
1573 		if (sc->bus_path) {
1574 			xpt_free_path(sc->bus_path);
1575 		}
1576 
1577 		status = xpt_bus_deregister(cam_sim_path(sc->sim));
1578 		if (status != CAM_REQ_CMP) {
1579 			device_printf(sc->dev,
1580 			    "Error deregistering bus, status=%d\n", status);
1581 		}
1582 
1583 		cam_sim_free(sc->sim);
1584 	}
1585 
1586 	pvscsi_dma_free_per_hcb(sc, sc->hcb_cnt);
1587 
1588 	if (sc->hcbs) {
1589 		kfree(sc->hcbs, M_PVSCSI);
1590 	}
1591 
1592 	pvscsi_free_rings(sc);
1593 
1594 	pvscsi_free_interrupts(sc);
1595 
1596 	if (sc->buffer_dmat != NULL) {
1597 		bus_dma_tag_destroy(sc->buffer_dmat);
1598 	}
1599 
1600 	if (sc->parent_dmat != NULL) {
1601 		bus_dma_tag_destroy(sc->parent_dmat);
1602 	}
1603 
1604 	if (sc->mm_res != NULL) {
1605 		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->mm_rid,
1606 		    sc->mm_res);
1607 	}
1608 }
1609 
1610 static int
1611 pvscsi_attach(device_t dev)
1612 {
1613 	struct pvscsi_softc *sc;
1614 	int rid;
1615 	int barid;
1616 	int error;
1617 	int max_queue_depth;
1618 	int adapter_queue_size;
1619 	struct cam_devq *devq;
1620 
1621 	sc = device_get_softc(dev);
1622 	sc->dev = dev;
1623 
1624 	lockinit(&sc->lock, "pvscsi", 0, LK_CANRECURSE);
1625 
1626 	pci_enable_busmaster(dev);
1627 
1628 	sc->mm_rid = -1;
1629 	for (barid = 0; barid <= PCIR_MAX_BAR_0; ++barid) {
1630 		rid = PCIR_BAR(barid);
1631 
1632 		sc->mm_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1633 		    RF_ACTIVE);
1634 		if (sc->mm_res != NULL) {
1635 			sc->mm_rid = rid;
1636 			break;
1637 		}
1638 	}
1639 
1640 	if (sc->mm_res == NULL) {
1641 		device_printf(dev, "could not map device memory\n");
1642 		return (ENXIO);
1643 	}
1644 
1645 	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
1646 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
1647 	    BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 0,
1648 	    &sc->parent_dmat);
1649 	if (error) {
1650 		device_printf(dev, "parent dma tag create failure, error %d\n",
1651 		    error);
1652 		pvscsi_free_all(sc);
1653 		return (ENXIO);
1654 	}
1655 
1656 	error = bus_dma_tag_create(sc->parent_dmat, 1, 0,
1657 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1658 	    PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT * PAGE_SIZE,
1659 	    PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT, PAGE_SIZE, BUS_DMA_ALLOCNOW,
1660 	    &sc->buffer_dmat);
1661 	if (error) {
1662 		device_printf(dev, "parent dma tag create failure, error %d\n",
1663 		    error);
1664 		pvscsi_free_all(sc);
1665 		return (ENXIO);
1666 	}
1667 
1668 	error = pvscsi_setup_interrupts(sc);
1669 	if (error) {
1670 		device_printf(dev, "Interrupt setup failed\n");
1671 		pvscsi_free_all(sc);
1672 		return (error);
1673 	}
1674 
1675 	sc->max_targets = pvscsi_get_max_targets(sc);
1676 
1677 	sc->use_msg = pvscsi_get_tunable(sc, "use_msg", pvscsi_use_msg) &&
1678 	    pvscsi_hw_supports_msg(sc);
1679 	sc->msg_ring_num_pages = sc->use_msg ? 1 : 0;
1680 
1681 	sc->req_ring_num_pages = pvscsi_get_tunable(sc, "request_ring_pages",
1682 	    pvscsi_request_ring_pages);
1683 	if (sc->req_ring_num_pages <= 0) {
1684 		if (sc->max_targets <= 16) {
1685 			sc->req_ring_num_pages =
1686 			    PVSCSI_DEFAULT_NUM_PAGES_REQ_RING;
1687 		} else {
1688 			sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
1689 		}
1690 	} else if (sc->req_ring_num_pages > PVSCSI_MAX_NUM_PAGES_REQ_RING) {
1691 		sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING;
1692 	}
1693 	sc->cmp_ring_num_pages = sc->req_ring_num_pages;
1694 
1695 	max_queue_depth = pvscsi_get_tunable(sc, "max_queue_depth",
1696 	    pvscsi_max_queue_depth);
1697 
1698 	adapter_queue_size = (sc->req_ring_num_pages * PAGE_SIZE) /
1699 	    sizeof(struct pvscsi_ring_req_desc);
1700 	if (max_queue_depth > 0) {
1701 		adapter_queue_size = MIN(adapter_queue_size, max_queue_depth);
1702 	}
1703 	adapter_queue_size = MIN(adapter_queue_size,
1704 	    PVSCSI_MAX_REQ_QUEUE_DEPTH);
1705 
1706 	device_printf(sc->dev, "Use Msg: %d\n", sc->use_msg);
1707 	device_printf(sc->dev, "REQ num pages: %d\n", sc->req_ring_num_pages);
1708 	device_printf(sc->dev, "CMP num pages: %d\n", sc->cmp_ring_num_pages);
1709 	device_printf(sc->dev, "MSG num pages: %d\n", sc->msg_ring_num_pages);
1710 	device_printf(sc->dev, "Queue size: %d\n", adapter_queue_size);
1711 
1712 	if (pvscsi_allocate_rings(sc)) {
1713 		device_printf(dev, "ring allocation failed\n");
1714 		pvscsi_free_all(sc);
1715 		return (ENXIO);
1716 	}
1717 
1718 	sc->hcb_cnt = adapter_queue_size;
1719 	sc->hcbs = kmalloc(sc->hcb_cnt * sizeof(*sc->hcbs), M_PVSCSI,
1720 	    M_NOWAIT | M_ZERO);
1721 	if (sc->hcbs == NULL) {
1722 		device_printf(dev, "error allocating hcb array\n");
1723 		pvscsi_free_all(sc);
1724 		return (ENXIO);
1725 	}
1726 
1727 	if (pvscsi_dma_alloc_per_hcb(sc)) {
1728 		device_printf(dev, "error allocating per hcb dma memory\n");
1729 		pvscsi_free_all(sc);
1730 		return (ENXIO);
1731 	}
1732 
1733 	pvscsi_adapter_reset(sc);
1734 
1735 	devq = cam_simq_alloc(adapter_queue_size);
1736 	if (devq == NULL) {
1737 		device_printf(dev, "cam devq alloc failed\n");
1738 		pvscsi_free_all(sc);
1739 		return (ENXIO);
1740 	}
1741 
1742 	sc->sim = cam_sim_alloc(pvscsi_action, pvscsi_poll, "pvscsi", sc,
1743 	    device_get_unit(dev), &sc->lock, 1, adapter_queue_size, devq);
1744 	cam_simq_release(devq);
1745 	if (sc->sim == NULL) {
1746 		device_printf(dev, "cam sim alloc failed\n");
1747 		pvscsi_free_all(sc);
1748 		return (ENXIO);
1749 	}
1750 
1751 	lockmgr(&sc->lock, LK_EXCLUSIVE);
1752 
1753 	if (xpt_bus_register(sc->sim, 0) != CAM_SUCCESS) {
1754 		device_printf(dev, "xpt bus register failed\n");
1755 		pvscsi_free_all(sc);
1756 		lockmgr(&sc->lock, LK_RELEASE);
1757 		return (ENXIO);
1758 	}
1759 
1760 	if (xpt_create_path(&sc->bus_path, NULL, cam_sim_path(sc->sim),
1761 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1762 		device_printf(dev, "xpt create path failed\n");
1763 		pvscsi_free_all(sc);
1764 		lockmgr(&sc->lock, LK_RELEASE);
1765 		return (ENXIO);
1766 	}
1767 
1768 	pvscsi_setup_rings(sc);
1769 	if (sc->use_msg) {
1770 		pvscsi_setup_msg_ring(sc);
1771 	}
1772 
1773 	sc->use_req_call_threshold = pvscsi_setup_req_call(sc, 1);
1774 
1775 	pvscsi_intr_enable(sc);
1776 
1777 	lockmgr(&sc->lock, LK_RELEASE);
1778 
1779 	return (0);
1780 }
1781 
1782 static int
1783 pvscsi_detach(device_t dev)
1784 {
1785 	struct pvscsi_softc *sc;
1786 
1787 	sc = device_get_softc(dev);
1788 
1789 	pvscsi_intr_disable(sc);
1790 	pvscsi_adapter_reset(sc);
1791 
1792 	lockmgr(&sc->lock, LK_EXCLUSIVE);
1793 	pvscsi_free_all(sc);
1794 	lockmgr(&sc->lock, LK_RELEASE);
1795 
1796 	lockuninit(&sc->lock);
1797 
1798 	return (0);
1799 }
1800 
1801 static device_method_t pvscsi_methods[] = {
1802 	DEVMETHOD(device_probe, pvscsi_probe),
1803 	DEVMETHOD(device_shutdown, pvscsi_shutdown),
1804 	DEVMETHOD(device_attach, pvscsi_attach),
1805 	DEVMETHOD(device_detach, pvscsi_detach),
1806 	DEVMETHOD_END
1807 };
1808 
1809 static driver_t pvscsi_driver = {
1810 	"pvscsi", pvscsi_methods, sizeof(struct pvscsi_softc)
1811 };
1812 
1813 static devclass_t pvscsi_devclass;
1814 DRIVER_MODULE(pvscsi, pci, pvscsi_driver, pvscsi_devclass, NULL, NULL);
1815 MODULE_VERSION(pvscsi, 1);
1816 
1817 MODULE_DEPEND(pvscsi, pci, 1, 1, 1);
1818 MODULE_DEPEND(pvscsi, cam, 1, 1, 1);
1819