xref: /openbsd/sys/dev/pci/vmwpvs.c (revision a6445c1d)
1 /*	$OpenBSD: vmwpvs.c,v 1.10 2014/07/13 23:10:23 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2013 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/buf.h>
22 #include <sys/device.h>
23 #include <sys/ioctl.h>
24 #include <sys/malloc.h>
25 #include <sys/kernel.h>
26 #include <sys/rwlock.h>
27 #include <sys/dkio.h>
28 #include <sys/task.h>
29 
30 #include <machine/bus.h>
31 
32 #include <dev/pci/pcireg.h>
33 #include <dev/pci/pcivar.h>
34 #include <dev/pci/pcidevs.h>
35 
36 #include <scsi/scsi_all.h>
37 #include <scsi/scsi_message.h>
38 #include <scsi/scsiconf.h>
39 
40 /* pushbuttons */
41 #define VMWPVS_OPENINGS		64 /* according to the linux driver */
42 #define VMWPVS_RING_PAGES	2
43 #define VMWPVS_MAXSGL		(MAXPHYS / PAGE_SIZE)
44 #define VMWPVS_SENSELEN		roundup(sizeof(struct scsi_sense_data), 16)
45 
46 /* "chip" definitions */
47 
48 #define VMWPVS_R_COMMAND	0x0000
49 #define VMWPVS_R_COMMAND_DATA	0x0004
50 #define VMWPVS_R_COMMAND_STATUS	0x0008
51 #define VMWPVS_R_LAST_STS_0	0x0100
52 #define VMWPVS_R_LAST_STS_1	0x0104
53 #define VMWPVS_R_LAST_STS_2	0x0108
54 #define VMWPVS_R_LAST_STS_3	0x010c
55 #define VMWPVS_R_INTR_STATUS	0x100c
56 #define VMWPVS_R_INTR_MASK	0x2010
57 #define VMWPVS_R_KICK_NON_RW_IO	0x3014
58 #define VMWPVS_R_DEBUG		0x3018
59 #define VMWPVS_R_KICK_RW_IO	0x4018
60 
61 #define VMWPVS_INTR_CMPL_0	(1 << 0)
62 #define VMWPVS_INTR_CMPL_1	(1 << 1)
63 #define VMWPVS_INTR_CMPL_MASK	(VMWPVS_INTR_CMPL_0 | VMWPVS_INTR_CMPL_1)
64 #define VMWPVS_INTR_MSG_0	(1 << 2)
65 #define VMWPVS_INTR_MSG_1	(1 << 3)
66 #define VMWPVS_INTR_MSG_MASK	(VMWPVS_INTR_MSG_0 | VMWPVS_INTR_MSG_0)
67 #define VMWPVS_INTR_ALL_MASK	(VMWPVS_INTR_CMPL_MASK | VMWPVS_INTR_MSG_MASK)
68 
69 #define VMWPVS_PAGE_SHIFT	12
70 #define VMWPVS_PAGE_SIZE	(1 << VMWPVS_PAGE_SHIFT)
71 
72 #define VMWPVS_NPG_COMMAND	1
73 #define VMWPVS_NPG_INTR_STATUS	1
74 #define VMWPVS_NPG_MISC		2
75 #define VMWPVS_NPG_KICK_IO	2
76 #define VMWPVS_NPG_MSI_X	2
77 
78 #define VMWPVS_PG_COMMAND	0
79 #define VMWPVS_PG_INTR_STATUS	(VMWPVS_PG_COMMAND + \
80 				    VMWPVS_NPG_COMMAND * VMWPVS_PAGE_SIZE)
81 #define VMWPVS_PG_MISC		(VMWPVS_PG_INTR_STATUS + \
82 				    VMWPVS_NPG_INTR_STATUS * VMWPVS_PAGE_SIZE)
83 #define VMWPVS_PG_KICK_IO	(VMWPVS_PG_MISC + \
84 				    VMWPVS_NPG_MISC * VMWPVS_PAGE_SIZE)
85 #define VMWPVS_PG_MSI_X		(VMWPVS_PG_KICK_IO + \
86 				    VMWPVS_NPG_KICK_IO * VMWPVS_PAGE_SIZE)
87 #define VMMPVS_PG_LEN		(VMWPVS_PG_MSI_X + \
88 				    VMWPVS_NPG_MSI_X * VMWPVS_PAGE_SIZE)
89 
90 struct vmwpvw_ring_state {
91 	u_int32_t		req_prod;
92 	u_int32_t		req_cons;
93 	u_int32_t		req_entries; /* log 2 */
94 
95 	u_int32_t		cmp_prod;
96 	u_int32_t		cmp_cons;
97 	u_int32_t		cmp_entries; /* log 2 */
98 
99 	u_int32_t		__reserved[26];
100 
101 	u_int32_t		msg_prod;
102 	u_int32_t		msg_cons;
103 	u_int32_t		msg_entries; /* log 2 */
104 } __packed;
105 
106 struct vmwpvs_ring_req {
107 	u_int64_t		context;
108 
109 	u_int64_t		data_addr;
110 	u_int64_t		data_len;
111 
112 	u_int64_t		sense_addr;
113 	u_int32_t		sense_len;
114 
115 	u_int32_t		flags;
116 #define VMWPVS_REQ_SGL			(1 << 0)
117 #define VMWPVS_REQ_OOBCDB		(1 << 1)
118 #define VMWPVS_REQ_DIR_NONE		(1 << 2)
119 #define VMWPVS_REQ_DIR_IN		(1 << 3)
120 #define VMWPVS_REQ_DIR_OUT		(1 << 4)
121 
122 	u_int8_t		cdb[16];
123 	u_int8_t		cdblen;
124 	u_int8_t		lun[8];
125 	u_int8_t		tag;
126 	u_int8_t		bus;
127 	u_int8_t		target;
128 	u_int8_t		vcpu_hint;
129 
130 	u_int8_t		__reserved[59];
131 } __packed;
132 #define VMWPVS_REQ_COUNT	((VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) / \
133 				    sizeof(struct vmwpvs_ring_req))
134 
135 struct vmwpvs_ring_cmp {
136 	u_int64_t		context;
137 	u_int64_t		data_len;
138 	u_int32_t		sense_len;
139 	u_int16_t		host_status;
140 	u_int16_t		scsi_status;
141 	u_int32_t		__reserved[2];
142 } __packed;
143 #define VMWPVS_CMP_COUNT	((VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) / \
144 				    sizeof(struct vmwpvs_ring_cmp))
145 
146 struct vmwpvs_sge {
147 	u_int64_t		addr;
148 	u_int32_t		len;
149 	u_int32_t		flags;
150 } __packed;
151 
152 struct vmwpvs_ring_msg {
153 	u_int32_t		type;
154 	u_int32_t		__args[31];
155 } __packed;
156 #define VMWPVS_MSG_COUNT	((VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) / \
157 				    sizeof(struct vmwpvs_ring_msg))
158 
159 #define VMWPVS_MSG_T_ADDED	0
160 #define VMWPVS_MSG_T_REMOVED	1
161 
162 struct vmwpvs_ring_msg_dev {
163 	u_int32_t		type;
164 	u_int32_t		bus;
165 	u_int32_t		target;
166 	u_int8_t		lun[8];
167 
168 	u_int32_t		__pad[27];
169 } __packed;
170 
171 struct vmwpvs_cfg_cmd {
172 	u_int64_t		cmp_addr;
173 	u_int32_t		pg_addr;
174 	u_int32_t		pg_addr_type;
175 	u_int32_t		pg_num;
176 	u_int32_t		__reserved;
177 } __packed;
178 
179 #define VMWPVS_MAX_RING_PAGES		32
180 struct vmwpvs_setup_rings_cmd {
181 	u_int32_t		req_pages;
182 	u_int32_t		cmp_pages;
183 	u_int64_t		state_ppn;
184 	u_int64_t		req_page_ppn[VMWPVS_MAX_RING_PAGES];
185 	u_int64_t		cmp_page_ppn[VMWPVS_MAX_RING_PAGES];
186 } __packed;
187 
188 #define VMWPVS_MAX_MSG_RING_PAGES	16
189 struct vmwpvs_setup_rings_msg {
190 	u_int32_t		msg_pages;
191 	u_int32_t		__reserved;
192 	u_int64_t		msg_page_ppn[VMWPVS_MAX_MSG_RING_PAGES];
193 } __packed;
194 
195 #define VMWPVS_CMD_FIRST		0
196 #define VMWPVS_CMD_ADAPTER_RESET	1
197 #define VMWPVS_CMD_ISSUE_SCSI		2
198 #define VMWPVS_CMD_SETUP_RINGS		3
199 #define VMWPVS_CMD_RESET_BUS		4
200 #define VMWPVS_CMD_RESET_DEVICE		5
201 #define VMWPVS_CMD_ABORT_CMD		6
202 #define VMWPVS_CMD_CONFIG		7
203 #define VMWPVS_CMD_SETUP_MSG_RING	8
204 #define VMWPVS_CMD_DEVICE_UNPLUG	9
205 #define VMWPVS_CMD_LAST			10
206 
207 #define VMWPVS_CFGPG_CONTROLLER		0x1958
208 #define VMWPVS_CFGPG_PHY		0x1959
209 #define VMWPVS_CFGPG_DEVICE		0x195a
210 
211 #define VMWPVS_CFGPGADDR_CONTROLLER	0x2120
212 #define VMWPVS_CFGPGADDR_TARGET		0x2121
213 #define VMWPVS_CFGPGADDR_PHY		0x2122
214 
215 struct vmwpvs_cfg_pg_header {
216 	u_int32_t		pg_num;
217 	u_int16_t		num_dwords;
218 	u_int16_t		host_status;
219 	u_int16_t		scsi_status;
220 	u_int16_t		__reserved[3];
221 } __packed;
222 
223 #define VMWPVS_HOST_STATUS_SUCCESS	0x00
224 #define VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED 0x0a
225 #define VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED_WITH_FLAG 0x0b
226 #define VMWPVS_HOST_STATUS_UNDERRUN	0x0c
227 #define VMWPVS_HOST_STATUS_SELTIMEOUT	0x11
228 #define VMWPVS_HOST_STATUS_DATARUN	0x12
229 #define VMWPVS_HOST_STATUS_BUSFREE	0x13
230 #define VMWPVS_HOST_STATUS_INVPHASE	0x14
231 #define VMWPVS_HOST_STATUS_LUNMISMATCH	0x17
232 #define VMWPVS_HOST_STATUS_INVPARAM	0x1a
233 #define VMWPVS_HOST_STATUS_SENSEFAILED	0x1b
234 #define VMWPVS_HOST_STATUS_TAGREJECT	0x1c
235 #define VMWPVS_HOST_STATUS_BADMSG	0x1d
236 #define VMWPVS_HOST_STATUS_HAHARDWARE	0x20
237 #define VMWPVS_HOST_STATUS_NORESPONSE	0x21
238 #define VMWPVS_HOST_STATUS_SENT_RST	0x22
239 #define VMWPVS_HOST_STATUS_RECV_RST	0x23
240 #define VMWPVS_HOST_STATUS_DISCONNECT	0x24
241 #define VMWPVS_HOST_STATUS_BUS_RESET	0x25
242 #define VMWPVS_HOST_STATUS_ABORT_QUEUE	0x26
243 #define VMWPVS_HOST_STATUS_HA_SOFTWARE	0x27
244 #define VMWPVS_HOST_STATUS_HA_TIMEOUT	0x30
245 #define VMWPVS_HOST_STATUS_SCSI_PARITY	0x34
246 
247 #define VMWPVS_SCSI_STATUS_OK		0x00
248 #define VMWPVS_SCSI_STATUS_CHECK	0x02
249 
250 struct vmwpvs_cfg_pg_controller {
251 	struct vmwpvs_cfg_pg_header header;
252 
253 	u_int64_t		wwnn;
254 	u_int16_t		manufacturer[64];
255 	u_int16_t		serial_number[64];
256 	u_int16_t		oprom_version[32];
257 	u_int16_t		hardware_version[32];
258 	u_int16_t		firmware_version[32];
259 	u_int32_t		num_phys;
260 	u_int8_t		use_consec_phy_wwns;
261 	u_int8_t		__reserved[3];
262 } __packed;
263 
264 /* driver stuff */
265 
266 struct vmwpvs_dmamem {
267 	bus_dmamap_t		dm_map;
268 	bus_dma_segment_t	dm_seg;
269 	size_t			dm_size;
270 	caddr_t			dm_kva;
271 };
272 #define VMWPVS_DMA_MAP(_dm)	(_dm)->dm_map
273 #define VMWPVS_DMA_DVA(_dm)	(_dm)->dm_map->dm_segs[0].ds_addr
274 #define VMWPVS_DMA_KVA(_dm)	(void *)(_dm)->dm_kva
275 
276 struct vmwpvs_sgl {
277 	struct vmwpvs_sge	list[VMWPVS_MAXSGL];
278 } __packed;
279 
280 struct vmwpvs_ccb {
281 	SIMPLEQ_ENTRY(vmwpvs_ccb)
282 				ccb_entry;
283 
284 	bus_dmamap_t		ccb_dmamap;
285 	struct scsi_xfer	*ccb_xs;
286 	u_int64_t		ccb_ctx;
287 
288 	struct vmwpvs_sgl	*ccb_sgl;
289 	bus_addr_t		ccb_sgl_offset;
290 
291 	void			*ccb_sense;
292 	bus_addr_t		ccb_sense_offset;
293 };
294 SIMPLEQ_HEAD(vmwpvs_ccb_list, vmwpvs_ccb);
295 
296 struct vmwpvs_softc {
297 	struct device		sc_dev;
298 
299 	pci_chipset_tag_t	sc_pc;
300 	pcitag_t		sc_tag;
301 
302 	bus_space_tag_t		sc_iot;
303 	bus_space_handle_t	sc_ioh;
304 	bus_size_t		sc_ios;
305 	bus_dma_tag_t		sc_dmat;
306 
307 	struct vmwpvs_dmamem	*sc_req_ring;
308 	struct vmwpvs_dmamem	*sc_cmp_ring;
309 	struct vmwpvs_dmamem	*sc_msg_ring;
310 	struct vmwpvs_dmamem	*sc_ring_state;
311 	struct mutex		sc_ring_mtx;
312 
313 	struct vmwpvs_dmamem	*sc_sgls;
314 	struct vmwpvs_dmamem	*sc_sense;
315 	struct vmwpvs_ccb	*sc_ccbs;
316 	struct vmwpvs_ccb_list	sc_ccb_list;
317 	struct mutex		sc_ccb_mtx;
318 
319 	void			*sc_ih;
320 
321 	struct task		sc_msg_task;
322 
323 	u_int			sc_bus_width;
324 
325 	struct scsi_link	sc_link;
326 	struct scsi_iopool	sc_iopool;
327 	struct scsibus_softc	*sc_scsibus;
328 };
329 #define DEVNAME(_s)		((_s)->sc_dev.dv_xname)
330 
331 int	vmwpvs_match(struct device *, void *, void *);
332 void	vmwpvs_attach(struct device *, struct device *, void *);
333 
334 int	vmwpvs_intx(void *);
335 int	vmwpvs_intr(void *);
336 
337 #define vmwpvs_read(_s, _r) \
338 	bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
339 #define vmwpvs_write(_s, _r, _v) \
340 	bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
341 #define vmwpvs_barrier(_s, _r, _l, _d) \
342 	bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_d))
343 
344 struct cfattach vmwpvs_ca = {
345 	sizeof(struct vmwpvs_softc),
346 	vmwpvs_match,
347 	vmwpvs_attach,
348 	NULL
349 };
350 
351 struct cfdriver vmwpvs_cd = {
352 	NULL,
353 	"vmwpvs",
354 	DV_DULL
355 };
356 
357 void		vmwpvs_scsi_cmd(struct scsi_xfer *);
358 
359 struct scsi_adapter vmwpvs_switch = {
360 	vmwpvs_scsi_cmd,
361 	scsi_minphys,
362 	NULL,
363 	NULL,
364 	NULL
365 };
366 
367 #define dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
368 
369 void		vmwpvs_ccb_put(void *, void *);
370 void *		vmwpvs_ccb_get(void *);
371 
372 struct vmwpvs_dmamem *
373 		vmwpvs_dmamem_alloc(struct vmwpvs_softc *, size_t);
374 struct vmwpvs_dmamem *
375 		vmwpvs_dmamem_zalloc(struct vmwpvs_softc *, size_t);
376 void		vmwpvs_dmamem_free(struct vmwpvs_softc *,
377 		    struct vmwpvs_dmamem *);
378 
379 void		vmwpvs_cmd(struct vmwpvs_softc *, u_int32_t, void *, size_t);
380 int		vmwpvs_get_config(struct vmwpvs_softc *);
381 void		vmwpvs_setup_rings(struct vmwpvs_softc *);
382 void		vmwpvs_setup_msg_ring(struct vmwpvs_softc *);
383 void		vmwpvs_msg_task(void *, void *);
384 
385 struct vmwpvs_ccb *
386 		vmwpvs_scsi_cmd_poll(struct vmwpvs_softc *);
387 struct vmwpvs_ccb *
388 		vmwpvs_scsi_cmd_done(struct vmwpvs_softc *,
389 		    struct vmwpvs_ring_cmp *);
390 
391 int
392 vmwpvs_match(struct device *parent, void *match, void *aux)
393 {
394 	struct pci_attach_args *pa = aux;
395 
396 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
397 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_PVSCSI)
398 		return (1);
399 
400 	return (0);
401 }
402 
403 void
404 vmwpvs_attach(struct device *parent, struct device *self, void *aux)
405 {
406 	struct vmwpvs_softc *sc = (struct vmwpvs_softc *)self;
407 	struct pci_attach_args *pa = aux;
408 	struct scsibus_attach_args saa;
409 	pcireg_t memtype;
410 	u_int i, r, use_msg;
411 	int (*isr)(void *) = vmwpvs_intx;
412 	u_int32_t intmask;
413 	pci_intr_handle_t ih;
414 
415 	struct vmwpvs_ccb *ccb;
416 	struct vmwpvs_sgl *sgls;
417 	u_int8_t *sense;
418 
419 	sc->sc_pc = pa->pa_pc;
420 	sc->sc_tag = pa->pa_tag;
421 	sc->sc_dmat = pa->pa_dmat;
422 
423 	sc->sc_bus_width = 16;
424 	mtx_init(&sc->sc_ring_mtx, IPL_BIO);
425 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
426 	task_set(&sc->sc_msg_task, vmwpvs_msg_task, sc, NULL);
427 	SIMPLEQ_INIT(&sc->sc_ccb_list);
428 
429 	for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
430 		memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
431 		if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
432 			break;
433 	}
434 	if (r >= PCI_MAPREG_END) {
435 		printf(": unable to locate registers\n");
436 		return;
437 	}
438 
439 	if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
440 	    NULL, &sc->sc_ios, VMMPVS_PG_LEN) != 0) {
441 		printf(": unable to map registers\n");
442 		return;
443 	}
444 
445 	/* hook up the interrupt */
446 	vmwpvs_write(sc, VMWPVS_R_INTR_MASK, 0);
447 
448 	if (pci_intr_map_msi(pa, &ih) == 0)
449 		isr = vmwpvs_intr;
450 	else if (pci_intr_map(pa, &ih) != 0) {
451 		printf(": unable to map interrupt\n");
452 		goto unmap;
453 	}
454 	printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
455 
456 	/* do we have msg support? */
457 	vmwpvs_write(sc, VMWPVS_R_COMMAND, VMWPVS_CMD_SETUP_MSG_RING);
458 	use_msg = (vmwpvs_read(sc, VMWPVS_R_COMMAND_STATUS) != 0xffffffff);
459 
460 	if (vmwpvs_get_config(sc) != 0) {
461 		printf("%s: get configuration failed\n", DEVNAME(sc));
462 		goto unmap;
463 	}
464 
465 	sc->sc_ring_state = vmwpvs_dmamem_zalloc(sc, VMWPVS_PAGE_SIZE);
466 	if (sc->sc_ring_state == NULL) {
467 		printf("%s: unable to allocate ring state\n", DEVNAME(sc));
468 		goto unmap;
469 	}
470 
471 	sc->sc_req_ring = vmwpvs_dmamem_zalloc(sc,
472 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE);
473 	if (sc->sc_req_ring == NULL) {
474 		printf("%s: unable to allocate req ring\n", DEVNAME(sc));
475 		goto free_ring_state;
476 	}
477 
478 	sc->sc_cmp_ring = vmwpvs_dmamem_zalloc(sc,
479 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE);
480 	if (sc->sc_cmp_ring == NULL) {
481 		printf("%s: unable to allocate cmp ring\n", DEVNAME(sc));
482 		goto free_req_ring;
483 	}
484 
485 	if (use_msg) {
486 		sc->sc_msg_ring = vmwpvs_dmamem_zalloc(sc,
487 		    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE);
488 		if (sc->sc_msg_ring == NULL) {
489 			printf("%s: unable to allocate msg ring\n",
490 			    DEVNAME(sc));
491 			goto free_cmp_ring;
492 		}
493 	}
494 
495 	r = (VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) /
496 	    sizeof(struct vmwpvs_ring_req);
497 
498 	sc->sc_sgls = vmwpvs_dmamem_alloc(sc, r * sizeof(struct vmwpvs_sgl));
499 	if (sc->sc_sgls == NULL) {
500 		printf("%s: unable to allocate sgls\n", DEVNAME(sc));
501 		goto free_msg_ring;
502 	}
503 
504 	sc->sc_sense = vmwpvs_dmamem_alloc(sc, r * VMWPVS_SENSELEN);
505 	if (sc->sc_sense == NULL) {
506 		printf("%s: unable to allocate sense data\n", DEVNAME(sc));
507 		goto free_sgl;
508 	}
509 
510 	sc->sc_ccbs = mallocarray(r, sizeof(struct vmwpvs_ccb),
511 	    M_DEVBUF, M_WAITOK);
512 	/* cant fail */
513 
514 	sgls = VMWPVS_DMA_KVA(sc->sc_sgls);
515 	sense = VMWPVS_DMA_KVA(sc->sc_sense);
516 	for (i = 0; i < r; i++) {
517 		ccb = &sc->sc_ccbs[i];
518 
519 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
520 		    VMWPVS_MAXSGL, MAXPHYS, 0,
521 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
522 		    &ccb->ccb_dmamap) != 0) {
523 			printf("%s: unable to create ccb map\n", DEVNAME(sc));
524 			goto free_ccbs;
525 		}
526 
527 		ccb->ccb_ctx = 0xdeadbeef00000000ULL | (u_int64_t)i;
528 
529 		ccb->ccb_sgl_offset = i * sizeof(*sgls);
530 		ccb->ccb_sgl = &sgls[i];
531 
532 		ccb->ccb_sense_offset = i * VMWPVS_SENSELEN;
533 		ccb->ccb_sense = sense + ccb->ccb_sense_offset;
534 
535 		vmwpvs_ccb_put(sc, ccb);
536 	}
537 
538 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
539 	    isr, sc, DEVNAME(sc));
540 	if (sc->sc_ih == NULL)
541 		goto free_msg_ring;
542 
543 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring), 0,
544 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
545 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring), 0,
546 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREWRITE);
547 	if (use_msg) {
548 		bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_msg_ring), 0,
549 		    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
550 	}
551 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
552 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
553 
554 	intmask = VMWPVS_INTR_CMPL_MASK;
555 
556 	vmwpvs_setup_rings(sc);
557 	if (use_msg) {
558 		vmwpvs_setup_msg_ring(sc);
559 		intmask |= VMWPVS_INTR_MSG_MASK;
560 	}
561 
562 	vmwpvs_write(sc, VMWPVS_R_INTR_MASK, intmask);
563 
564 	/* controller init is done, lets plug the midlayer in */
565 
566 	scsi_iopool_init(&sc->sc_iopool, sc, vmwpvs_ccb_get, vmwpvs_ccb_put);
567 
568 	sc->sc_link.adapter = &vmwpvs_switch;
569 	sc->sc_link.adapter_softc = sc;
570 	sc->sc_link.adapter_target = -1;
571 	sc->sc_link.adapter_buswidth = sc->sc_bus_width;
572 	sc->sc_link.openings = VMWPVS_OPENINGS;
573 	sc->sc_link.pool = &sc->sc_iopool;
574 
575 	bzero(&saa, sizeof(saa));
576 	saa.saa_sc_link = &sc->sc_link;
577 
578 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
579 	    &saa, scsiprint);
580 
581 	return;
582 free_ccbs:
583 	while ((ccb = vmwpvs_ccb_get(sc)) != NULL)
584 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
585 	free(sc->sc_ccbs, M_DEVBUF, 0);
586 /* free_sense: */
587 	vmwpvs_dmamem_free(sc, sc->sc_sense);
588 free_sgl:
589 	vmwpvs_dmamem_free(sc, sc->sc_sgls);
590 free_msg_ring:
591 	if (use_msg)
592 		vmwpvs_dmamem_free(sc, sc->sc_msg_ring);
593 free_cmp_ring:
594 	vmwpvs_dmamem_free(sc, sc->sc_cmp_ring);
595 free_req_ring:
596 	vmwpvs_dmamem_free(sc, sc->sc_req_ring);
597 free_ring_state:
598 	vmwpvs_dmamem_free(sc, sc->sc_ring_state);
599 unmap:
600 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
601 	sc->sc_ios = 0;
602 }
603 
604 void
605 vmwpvs_setup_rings(struct vmwpvs_softc *sc)
606 {
607 	struct vmwpvs_setup_rings_cmd cmd;
608 	u_int64_t ppn;
609 	u_int i;
610 
611 	memset(&cmd, 0, sizeof(cmd));
612 	cmd.req_pages = VMWPVS_RING_PAGES;
613 	cmd.cmp_pages = VMWPVS_RING_PAGES;
614 	cmd.state_ppn = VMWPVS_DMA_DVA(sc->sc_ring_state) >> VMWPVS_PAGE_SHIFT;
615 
616 	ppn = VMWPVS_DMA_DVA(sc->sc_req_ring) >> VMWPVS_PAGE_SHIFT;
617 	for (i = 0; i < VMWPVS_RING_PAGES; i++)
618 		cmd.req_page_ppn[i] = ppn + i;
619 
620 	ppn = VMWPVS_DMA_DVA(sc->sc_cmp_ring) >> VMWPVS_PAGE_SHIFT;
621 	for (i = 0; i < VMWPVS_RING_PAGES; i++)
622 		cmd.cmp_page_ppn[i] = ppn + i;
623 
624 	vmwpvs_cmd(sc, VMWPVS_CMD_SETUP_RINGS, &cmd, sizeof(cmd));
625 }
626 
627 void
628 vmwpvs_setup_msg_ring(struct vmwpvs_softc *sc)
629 {
630 	struct vmwpvs_setup_rings_msg cmd;
631 	u_int64_t ppn;
632 	u_int i;
633 
634 	memset(&cmd, 0, sizeof(cmd));
635 	cmd.msg_pages = VMWPVS_RING_PAGES;
636 
637 	ppn = VMWPVS_DMA_DVA(sc->sc_msg_ring) >> VMWPVS_PAGE_SHIFT;
638 	for (i = 0; i < VMWPVS_RING_PAGES; i++)
639 		cmd.msg_page_ppn[i] = ppn + i;
640 
641 	vmwpvs_cmd(sc, VMWPVS_CMD_SETUP_MSG_RING, &cmd, sizeof(cmd));
642 }
643 
644 int
645 vmwpvs_get_config(struct vmwpvs_softc *sc)
646 {
647 	struct vmwpvs_cfg_cmd cmd;
648 	struct vmwpvs_dmamem *dm;
649 	struct vmwpvs_cfg_pg_controller *pg;
650 	struct vmwpvs_cfg_pg_header *hdr;
651 	int rv = 0;
652 
653 	dm = vmwpvs_dmamem_alloc(sc, VMWPVS_PAGE_SIZE);
654 	if (dm == NULL)
655 		return (ENOMEM);
656 
657 	memset(&cmd, 0, sizeof(cmd));
658 	cmd.cmp_addr = VMWPVS_DMA_DVA(dm);
659 	cmd.pg_addr_type = VMWPVS_CFGPGADDR_CONTROLLER;
660 	cmd.pg_num = VMWPVS_CFGPG_CONTROLLER;
661 
662 	pg = VMWPVS_DMA_KVA(dm);
663 	memset(pg, 0, VMWPVS_PAGE_SIZE);
664 	hdr = &pg->header;
665 	hdr->host_status = VMWPVS_HOST_STATUS_INVPARAM;
666 	hdr->scsi_status = VMWPVS_SCSI_STATUS_CHECK;
667 
668 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(dm), 0, VMWPVS_PAGE_SIZE,
669 	    BUS_DMASYNC_PREREAD);
670 	vmwpvs_cmd(sc, VMWPVS_CMD_CONFIG, &cmd, sizeof(cmd));
671 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(dm), 0, VMWPVS_PAGE_SIZE,
672 	    BUS_DMASYNC_POSTREAD);
673 
674 	if (hdr->host_status != VMWPVS_HOST_STATUS_SUCCESS ||
675 	    hdr->scsi_status != VMWPVS_SCSI_STATUS_OK) {
676 		rv = EIO;
677 		goto done;
678 	}
679 
680 	sc->sc_bus_width = pg->num_phys;
681 
682 done:
683 	vmwpvs_dmamem_free(sc, dm);
684 
685 	return (rv);
686 
687 }
688 
689 void
690 vmwpvs_cmd(struct vmwpvs_softc *sc, u_int32_t cmd, void *buf, size_t len)
691 {
692 	u_int32_t *p = buf;
693 	u_int i;
694 
695 	len /= sizeof(*p);
696 
697 	vmwpvs_write(sc, VMWPVS_R_COMMAND, cmd);
698 	for (i = 0; i < len; i++)
699 		vmwpvs_write(sc, VMWPVS_R_COMMAND_DATA, p[i]);
700 }
701 
702 int
703 vmwpvs_intx(void *xsc)
704 {
705 	struct vmwpvs_softc *sc = xsc;
706 	u_int32_t status;
707 
708 	status = vmwpvs_read(sc, VMWPVS_R_INTR_STATUS);
709 	if ((status & VMWPVS_INTR_ALL_MASK) == 0)
710 		return (0);
711 
712 	vmwpvs_write(sc, VMWPVS_R_INTR_STATUS, status);
713 
714 	return (vmwpvs_intr(sc));
715 }
716 
717 int
718 vmwpvs_intr(void *xsc)
719 {
720 	struct vmwpvs_softc *sc = xsc;
721 	volatile struct vmwpvw_ring_state *s =
722 	    VMWPVS_DMA_KVA(sc->sc_ring_state);
723 	struct vmwpvs_ring_cmp *ring = VMWPVS_DMA_KVA(sc->sc_cmp_ring);
724 	struct vmwpvs_ccb_list list = SIMPLEQ_HEAD_INITIALIZER(list);
725 	struct vmwpvs_ccb *ccb;
726 	u_int32_t cons, prod;
727 	int msg;
728 
729 	mtx_enter(&sc->sc_ring_mtx);
730 
731 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
732 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
733 	cons = s->cmp_cons;
734 	prod = s->cmp_prod;
735 	s->cmp_cons = prod;
736 
737 	msg = (sc->sc_msg_ring != NULL && s->msg_cons != s->msg_prod);
738 
739 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
740 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
741 
742 	if (cons != prod) {
743 		bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
744 		    0, VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD);
745 
746 		do {
747 			ccb = vmwpvs_scsi_cmd_done(sc,
748 			    &ring[cons++ % VMWPVS_CMP_COUNT]);
749 			SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_entry);
750 		} while (cons != prod);
751 
752 		bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
753 		    0, VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
754 	}
755 
756 	mtx_leave(&sc->sc_ring_mtx);
757 
758 	while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) {
759 		SIMPLEQ_REMOVE_HEAD(&list, ccb_entry);
760 		scsi_done(ccb->ccb_xs);
761 	}
762 
763 	if (msg)
764 		task_add(systq, &sc->sc_msg_task);
765 
766 	return (1);
767 }
768 
769 void
770 vmwpvs_msg_task(void *xsc, void *xnull)
771 {
772 	struct vmwpvs_softc *sc = xsc;
773 	volatile struct vmwpvw_ring_state *s =
774 	    VMWPVS_DMA_KVA(sc->sc_ring_state);
775 	struct vmwpvs_ring_msg *ring = VMWPVS_DMA_KVA(sc->sc_msg_ring);
776 	struct vmwpvs_ring_msg *msg;
777 	struct vmwpvs_ring_msg_dev *dvmsg;
778 	u_int32_t cons, prod;
779 
780 	mtx_enter(&sc->sc_ring_mtx);
781 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
782 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
783 	cons = s->msg_cons;
784 	prod = s->msg_prod;
785 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
786 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
787 	mtx_leave(&sc->sc_ring_mtx);
788 
789 	/*
790 	 * we dont have to lock around the msg ring cos the system taskq has
791 	 * only one thread.
792 	 */
793 
794 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_msg_ring), 0,
795 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD);
796 	while (cons != prod) {
797 		msg = &ring[cons++ % VMWPVS_MSG_COUNT];
798 
799 		switch (letoh32(msg->type)) {
800 		case VMWPVS_MSG_T_ADDED:
801 			dvmsg = (struct vmwpvs_ring_msg_dev *)msg;
802 			if (letoh32(dvmsg->bus) != 0) {
803 				printf("%s: ignoring request to add device"
804 				    " on bus %d\n", DEVNAME(sc),
805 				    letoh32(msg->type));
806 				break;
807 			}
808 
809 			if (scsi_probe_lun(sc->sc_scsibus,
810 			    letoh32(dvmsg->target), dvmsg->lun[1]) != 0) {
811 				printf("%s: error probing target %d lun %d\n",
812 				    DEVNAME(sc), letoh32(dvmsg->target),
813 				    dvmsg->lun[1]);
814 			};
815 			break;
816 
817 		case VMWPVS_MSG_T_REMOVED:
818 			dvmsg = (struct vmwpvs_ring_msg_dev *)msg;
819 			if (letoh32(dvmsg->bus) != 0) {
820 				printf("%s: ignorint request to remove device"
821 				    " on bus %d\n", DEVNAME(sc),
822 				    letoh32(msg->type));
823 				break;
824 			}
825 
826 			if (scsi_detach_lun(sc->sc_scsibus,
827 			    letoh32(dvmsg->target), dvmsg->lun[1],
828 			    DETACH_FORCE) != 0) {
829 				printf("%s: error detaching target %d lun %d\n",
830 				    DEVNAME(sc), letoh32(dvmsg->target),
831 				    dvmsg->lun[1]);
832 			};
833 			break;
834 
835 		default:
836 			printf("%s: unknown msg type %u\n", DEVNAME(sc),
837 			    letoh32(msg->type));
838 			break;
839 		}
840 	}
841 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_msg_ring), 0,
842 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
843 
844 	mtx_enter(&sc->sc_ring_mtx);
845 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
846 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
847 	s->msg_cons = prod;
848 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
849 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
850 	mtx_leave(&sc->sc_ring_mtx);
851 }
852 
853 void
854 vmwpvs_scsi_cmd(struct scsi_xfer *xs)
855 {
856 	struct scsi_link *link = xs->sc_link;
857 	struct vmwpvs_softc *sc = link->adapter_softc;
858 	struct vmwpvs_ccb *ccb = xs->io;
859 	bus_dmamap_t dmap = ccb->ccb_dmamap;
860 	volatile struct vmwpvw_ring_state *s =
861 	    VMWPVS_DMA_KVA(sc->sc_ring_state);
862 	struct vmwpvs_ring_req *ring = VMWPVS_DMA_KVA(sc->sc_req_ring), *r;
863 	u_int32_t prod;
864 	struct vmwpvs_ccb_list list;
865 	int error;
866 	u_int i;
867 
868 	ccb->ccb_xs = xs;
869 
870 	if (xs->datalen > 0) {
871 		error = bus_dmamap_load(sc->sc_dmat, dmap,
872 		    xs->data, xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ?
873 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
874 		if (error) {
875 			xs->error = XS_DRIVER_STUFFUP;
876 			scsi_done(xs);
877 			return;
878 		}
879 
880 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
881 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
882 		    BUS_DMASYNC_PREWRITE);
883 	}
884 
885 	mtx_enter(&sc->sc_ring_mtx);
886 
887 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
888 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
889 
890 	prod = s->req_prod;
891 	r = &ring[prod % VMWPVS_REQ_COUNT];
892 
893 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring),
894 	    prod * sizeof(*r), sizeof(*r), BUS_DMASYNC_POSTWRITE);
895 
896 	memset(r, 0, sizeof(*r));
897 	r->context = ccb->ccb_ctx;
898 
899 	if (xs->datalen > 0) {
900 		r->data_len = xs->datalen;
901 		if (dmap->dm_nsegs == 1) {
902 			r->data_addr = dmap->dm_segs[0].ds_addr;
903 		} else {
904 			struct vmwpvs_sge *sgl = ccb->ccb_sgl->list, *sge;
905 
906 			r->data_addr = VMWPVS_DMA_DVA(sc->sc_sgls) +
907 			    ccb->ccb_sgl_offset;
908 			r->flags = VMWPVS_REQ_SGL;
909 
910 			for (i = 0; i < dmap->dm_nsegs; i++) {
911 				sge = &sgl[i];
912 				sge->addr = dmap->dm_segs[i].ds_addr;
913 				sge->len = dmap->dm_segs[i].ds_len;
914 				sge->flags = 0;
915 			}
916 
917 			bus_dmamap_sync(sc->sc_dmat,
918 			    VMWPVS_DMA_MAP(sc->sc_sgls), ccb->ccb_sgl_offset,
919 			    sizeof(*sge) * dmap->dm_nsegs,
920 			    BUS_DMASYNC_PREWRITE);
921 		}
922 	}
923 	r->sense_addr = VMWPVS_DMA_DVA(sc->sc_sense) + ccb->ccb_sense_offset;
924 	r->sense_len = sizeof(xs->sense);
925 
926 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring), 0,
927 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
928 
929 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
930 	case SCSI_DATA_IN:
931 		r->flags |= VMWPVS_REQ_DIR_IN;
932 		break;
933 	case SCSI_DATA_OUT:
934 		r->flags |= VMWPVS_REQ_DIR_OUT;
935 		break;
936 	default:
937 		r->flags |= VMWPVS_REQ_DIR_NONE;
938 		break;
939 	}
940 
941 	memcpy(r->cdb, xs->cmd, xs->cmdlen);
942 	r->cdblen = xs->cmdlen;
943 	r->lun[1] = link->lun; /* ugly :( */
944 	r->tag = MSG_SIMPLE_Q_TAG;
945 	r->bus = 0;
946 	r->target = link->target;
947 	r->vcpu_hint = 0;
948 
949 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring), 0,
950 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREWRITE);
951 
952 	s->req_prod = prod + 1;
953 
954 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
955 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
956 
957 	vmwpvs_write(sc, xs->bp == NULL ?
958 	    VMWPVS_R_KICK_NON_RW_IO : VMWPVS_R_KICK_RW_IO, 0);
959 
960 	if (!ISSET(xs->flags, SCSI_POLL)) {
961 		mtx_leave(&sc->sc_ring_mtx);
962 		return;
963 	}
964 
965 	SIMPLEQ_INIT(&list);
966 	do {
967 		ccb = vmwpvs_scsi_cmd_poll(sc);
968 		SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_entry);
969 	} while (xs->io != ccb);
970 
971 	mtx_leave(&sc->sc_ring_mtx);
972 
973 	while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) {
974 		SIMPLEQ_REMOVE_HEAD(&list, ccb_entry);
975 		scsi_done(ccb->ccb_xs);
976 	}
977 }
978 
979 struct vmwpvs_ccb *
980 vmwpvs_scsi_cmd_poll(struct vmwpvs_softc *sc)
981 {
982 	volatile struct vmwpvw_ring_state *s =
983 	    VMWPVS_DMA_KVA(sc->sc_ring_state);
984 	struct vmwpvs_ring_cmp *ring = VMWPVS_DMA_KVA(sc->sc_cmp_ring);
985 	struct vmwpvs_ccb *ccb;
986 	u_int32_t prod, cons;
987 
988 	for (;;) {
989 		bus_dmamap_sync(sc->sc_dmat,
990 		    VMWPVS_DMA_MAP(sc->sc_ring_state), 0, VMWPVS_PAGE_SIZE,
991 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
992 
993 		cons = s->cmp_cons;
994 		prod = s->cmp_prod;
995 
996 		if (cons != prod)
997 			s->cmp_cons = cons + 1;
998 
999 		bus_dmamap_sync(sc->sc_dmat,
1000 		    VMWPVS_DMA_MAP(sc->sc_ring_state), 0, VMWPVS_PAGE_SIZE,
1001 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1002 
1003 		if (cons != prod)
1004 			break;
1005 		else
1006 			delay(1000);
1007 	}
1008 
1009 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
1010 	    0, VMWPVS_PAGE_SIZE * VMWPVS_RING_PAGES,
1011 	    BUS_DMASYNC_POSTREAD);
1012 	ccb = vmwpvs_scsi_cmd_done(sc, &ring[cons % VMWPVS_CMP_COUNT]);
1013 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
1014 	    0, VMWPVS_PAGE_SIZE * VMWPVS_RING_PAGES,
1015 	    BUS_DMASYNC_PREREAD);
1016 
1017 	return (ccb);
1018 }
1019 
1020 struct vmwpvs_ccb *
1021 vmwpvs_scsi_cmd_done(struct vmwpvs_softc *sc, struct vmwpvs_ring_cmp *c)
1022 {
1023 	u_int64_t ctx = c->context;
1024 	struct vmwpvs_ccb *ccb = &sc->sc_ccbs[ctx & 0xffffffff];
1025 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1026 	struct scsi_xfer *xs = ccb->ccb_xs;
1027 
1028 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_sense),
1029 	    ccb->ccb_sense_offset, sizeof(xs->sense), BUS_DMASYNC_POSTREAD);
1030 
1031 	if (xs->datalen > 0) {
1032 		if (dmap->dm_nsegs > 1) {
1033 			bus_dmamap_sync(sc->sc_dmat,
1034 			    VMWPVS_DMA_MAP(sc->sc_sgls), ccb->ccb_sgl_offset,
1035 			    sizeof(struct vmwpvs_sge) * dmap->dm_nsegs,
1036 			    BUS_DMASYNC_POSTWRITE);
1037 		}
1038 
1039 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1040 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1041 		    BUS_DMASYNC_POSTWRITE);
1042 
1043 		bus_dmamap_unload(sc->sc_dmat, dmap);
1044 	}
1045 
1046 	xs->status = c->scsi_status;
1047 	switch (c->host_status) {
1048 	case VMWPVS_HOST_STATUS_SUCCESS:
1049 	case VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED:
1050 	case VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED_WITH_FLAG:
1051 		if (c->scsi_status == VMWPVS_SCSI_STATUS_CHECK) {
1052 			memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1053 			xs->error = XS_SENSE;
1054 		} else
1055 			xs->error = XS_NOERROR;
1056 		xs->resid = 0;
1057 		break;
1058 
1059 	case VMWPVS_HOST_STATUS_UNDERRUN:
1060 	case VMWPVS_HOST_STATUS_DATARUN:
1061 		xs->resid = xs->datalen - c->data_len;
1062 		xs->error = XS_NOERROR;
1063 		break;
1064 
1065 	case VMWPVS_HOST_STATUS_SELTIMEOUT:
1066 		xs->error = XS_SELTIMEOUT;
1067 		break;
1068 
1069 	default:
1070 		printf("%s: %s:%d h:0x%x s:0x%x\n", DEVNAME(sc),
1071 		    __FUNCTION__, __LINE__, c->host_status, c->scsi_status);
1072 		xs->error = XS_DRIVER_STUFFUP;
1073 		break;
1074 	}
1075 
1076 	return (ccb);
1077 }
1078 
1079 void *
1080 vmwpvs_ccb_get(void *xsc)
1081 {
1082 	struct vmwpvs_softc *sc = xsc;
1083 	struct vmwpvs_ccb *ccb;
1084 
1085 	mtx_enter(&sc->sc_ccb_mtx);
1086 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list);
1087 	if (ccb != NULL)
1088 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry);
1089 	mtx_leave(&sc->sc_ccb_mtx);
1090 
1091 	return (ccb);
1092 }
1093 
1094 void
1095 vmwpvs_ccb_put(void *xsc, void *io)
1096 {
1097 	struct vmwpvs_softc *sc = xsc;
1098 	struct vmwpvs_ccb *ccb = io;
1099 
1100 	mtx_enter(&sc->sc_ccb_mtx);
1101 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_list, ccb, ccb_entry);
1102 	mtx_leave(&sc->sc_ccb_mtx);
1103 }
1104 
1105 struct vmwpvs_dmamem *
1106 vmwpvs_dmamem_alloc(struct vmwpvs_softc *sc, size_t size)
1107 {
1108 	struct vmwpvs_dmamem *dm;
1109 	int nsegs;
1110 
1111 	dm = malloc(sizeof(*dm), M_DEVBUF, M_NOWAIT | M_ZERO);
1112 	if (dm == NULL)
1113 		return (NULL);
1114 
1115 	dm->dm_size = size;
1116 
1117 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1118 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map) != 0)
1119 		goto dmfree;
1120 
1121 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &dm->dm_seg,
1122 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1123 		goto destroy;
1124 
1125 	if (bus_dmamem_map(sc->sc_dmat, &dm->dm_seg, nsegs, size,
1126 	    &dm->dm_kva, BUS_DMA_NOWAIT) != 0)
1127 		goto free;
1128 
1129 	if (bus_dmamap_load(sc->sc_dmat, dm->dm_map, dm->dm_kva, size,
1130 	    NULL, BUS_DMA_NOWAIT) != 0)
1131 		goto unmap;
1132 
1133 	return (dm);
1134 
1135 unmap:
1136 	bus_dmamem_unmap(sc->sc_dmat, dm->dm_kva, size);
1137 free:
1138 	bus_dmamem_free(sc->sc_dmat, &dm->dm_seg, 1);
1139 destroy:
1140 	bus_dmamap_destroy(sc->sc_dmat, dm->dm_map);
1141 dmfree:
1142 	free(dm, M_DEVBUF, 0);
1143 
1144 	return (NULL);
1145 }
1146 
1147 struct vmwpvs_dmamem *
1148 vmwpvs_dmamem_zalloc(struct vmwpvs_softc *sc, size_t size)
1149 {
1150 	struct vmwpvs_dmamem *dm;
1151 
1152 	dm = vmwpvs_dmamem_alloc(sc, size);
1153 	if (dm == NULL)
1154 		return (NULL);
1155 
1156 	memset(VMWPVS_DMA_KVA(dm), 0, size);
1157 
1158 	return (dm);
1159 }
1160 
1161 void
1162 vmwpvs_dmamem_free(struct vmwpvs_softc *sc, struct vmwpvs_dmamem *dm)
1163 {
1164 	bus_dmamap_unload(sc->sc_dmat, dm->dm_map);
1165 	bus_dmamem_unmap(sc->sc_dmat, dm->dm_kva, dm->dm_size);
1166 	bus_dmamem_free(sc->sc_dmat, &dm->dm_seg, 1);
1167 	bus_dmamap_destroy(sc->sc_dmat, dm->dm_map);
1168 	free(dm, M_DEVBUF, 0);
1169 }
1170