1 /* $OpenBSD: vmwpvs.c,v 1.30 2024/09/20 02:00:46 jsg Exp $ */
2
3 /*
4 * Copyright (c) 2013 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/malloc.h>
23 #include <sys/task.h>
24
25 #include <machine/bus.h>
26
27 #include <dev/pci/pcireg.h>
28 #include <dev/pci/pcivar.h>
29 #include <dev/pci/pcidevs.h>
30
31 #include <scsi/scsi_all.h>
32 #include <scsi/scsi_message.h>
33 #include <scsi/scsiconf.h>
34
35 /* pushbuttons */
36 #define VMWPVS_OPENINGS 64 /* according to the linux driver */
37 #define VMWPVS_RING_PAGES 2
38 #define VMWPVS_MAXSGL (MAXPHYS / PAGE_SIZE)
39 #define VMWPVS_SENSELEN roundup(sizeof(struct scsi_sense_data), 16)
40
41 /* "chip" definitions */
42
43 #define VMWPVS_R_COMMAND 0x0000
44 #define VMWPVS_R_COMMAND_DATA 0x0004
45 #define VMWPVS_R_COMMAND_STATUS 0x0008
46 #define VMWPVS_R_LAST_STS_0 0x0100
47 #define VMWPVS_R_LAST_STS_1 0x0104
48 #define VMWPVS_R_LAST_STS_2 0x0108
49 #define VMWPVS_R_LAST_STS_3 0x010c
50 #define VMWPVS_R_INTR_STATUS 0x100c
51 #define VMWPVS_R_INTR_MASK 0x2010
52 #define VMWPVS_R_KICK_NON_RW_IO 0x3014
53 #define VMWPVS_R_DEBUG 0x3018
54 #define VMWPVS_R_KICK_RW_IO 0x4018
55
56 #define VMWPVS_INTR_CMPL_0 (1 << 0)
57 #define VMWPVS_INTR_CMPL_1 (1 << 1)
58 #define VMWPVS_INTR_CMPL_MASK (VMWPVS_INTR_CMPL_0 | VMWPVS_INTR_CMPL_1)
59 #define VMWPVS_INTR_MSG_0 (1 << 2)
60 #define VMWPVS_INTR_MSG_1 (1 << 3)
61 #define VMWPVS_INTR_MSG_MASK (VMWPVS_INTR_MSG_0 | VMWPVS_INTR_MSG_1)
62 #define VMWPVS_INTR_ALL_MASK (VMWPVS_INTR_CMPL_MASK | VMWPVS_INTR_MSG_MASK)
63
64 #define VMWPVS_PAGE_SHIFT 12
65 #define VMWPVS_PAGE_SIZE (1 << VMWPVS_PAGE_SHIFT)
66
67 #define VMWPVS_NPG_COMMAND 1
68 #define VMWPVS_NPG_INTR_STATUS 1
69 #define VMWPVS_NPG_MISC 2
70 #define VMWPVS_NPG_KICK_IO 2
71 #define VMWPVS_NPG_MSI_X 2
72
73 #define VMWPVS_PG_COMMAND 0
74 #define VMWPVS_PG_INTR_STATUS (VMWPVS_PG_COMMAND + \
75 VMWPVS_NPG_COMMAND * VMWPVS_PAGE_SIZE)
76 #define VMWPVS_PG_MISC (VMWPVS_PG_INTR_STATUS + \
77 VMWPVS_NPG_INTR_STATUS * VMWPVS_PAGE_SIZE)
78 #define VMWPVS_PG_KICK_IO (VMWPVS_PG_MISC + \
79 VMWPVS_NPG_MISC * VMWPVS_PAGE_SIZE)
80 #define VMWPVS_PG_MSI_X (VMWPVS_PG_KICK_IO + \
81 VMWPVS_NPG_KICK_IO * VMWPVS_PAGE_SIZE)
82 #define VMMPVS_PG_LEN (VMWPVS_PG_MSI_X + \
83 VMWPVS_NPG_MSI_X * VMWPVS_PAGE_SIZE)
84
85 struct vmwpvw_ring_state {
86 u_int32_t req_prod;
87 u_int32_t req_cons;
88 u_int32_t req_entries; /* log 2 */
89
90 u_int32_t cmp_prod;
91 u_int32_t cmp_cons;
92 u_int32_t cmp_entries; /* log 2 */
93
94 u_int32_t __reserved[26];
95
96 u_int32_t msg_prod;
97 u_int32_t msg_cons;
98 u_int32_t msg_entries; /* log 2 */
99 } __packed;
100
101 struct vmwpvs_ring_req {
102 u_int64_t context;
103
104 u_int64_t data_addr;
105 u_int64_t data_len;
106
107 u_int64_t sense_addr;
108 u_int32_t sense_len;
109
110 u_int32_t flags;
111 #define VMWPVS_REQ_SGL (1 << 0)
112 #define VMWPVS_REQ_OOBCDB (1 << 1)
113 #define VMWPVS_REQ_DIR_NONE (1 << 2)
114 #define VMWPVS_REQ_DIR_IN (1 << 3)
115 #define VMWPVS_REQ_DIR_OUT (1 << 4)
116
117 u_int8_t cdb[16];
118 u_int8_t cdblen;
119 u_int8_t lun[8];
120 u_int8_t tag;
121 u_int8_t bus;
122 u_int8_t target;
123 u_int8_t vcpu_hint;
124
125 u_int8_t __reserved[59];
126 } __packed;
127 #define VMWPVS_REQ_COUNT ((VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) / \
128 sizeof(struct vmwpvs_ring_req))
129
130 struct vmwpvs_ring_cmp {
131 u_int64_t context;
132 u_int64_t data_len;
133 u_int32_t sense_len;
134 u_int16_t host_status;
135 u_int16_t scsi_status;
136 u_int32_t __reserved[2];
137 } __packed;
138 #define VMWPVS_CMP_COUNT ((VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) / \
139 sizeof(struct vmwpvs_ring_cmp))
140
141 struct vmwpvs_sge {
142 u_int64_t addr;
143 u_int32_t len;
144 u_int32_t flags;
145 } __packed;
146
147 struct vmwpvs_ring_msg {
148 u_int32_t type;
149 u_int32_t __args[31];
150 } __packed;
151 #define VMWPVS_MSG_COUNT ((VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) / \
152 sizeof(struct vmwpvs_ring_msg))
153
154 #define VMWPVS_MSG_T_ADDED 0
155 #define VMWPVS_MSG_T_REMOVED 1
156
157 struct vmwpvs_ring_msg_dev {
158 u_int32_t type;
159 u_int32_t bus;
160 u_int32_t target;
161 u_int8_t lun[8];
162
163 u_int32_t __pad[27];
164 } __packed;
165
166 struct vmwpvs_cfg_cmd {
167 u_int64_t cmp_addr;
168 u_int32_t pg_addr;
169 u_int32_t pg_addr_type;
170 u_int32_t pg_num;
171 u_int32_t __reserved;
172 } __packed;
173
174 #define VMWPVS_MAX_RING_PAGES 32
175 struct vmwpvs_setup_rings_cmd {
176 u_int32_t req_pages;
177 u_int32_t cmp_pages;
178 u_int64_t state_ppn;
179 u_int64_t req_page_ppn[VMWPVS_MAX_RING_PAGES];
180 u_int64_t cmp_page_ppn[VMWPVS_MAX_RING_PAGES];
181 } __packed;
182
183 #define VMWPVS_MAX_MSG_RING_PAGES 16
184 struct vmwpvs_setup_rings_msg {
185 u_int32_t msg_pages;
186 u_int32_t __reserved;
187 u_int64_t msg_page_ppn[VMWPVS_MAX_MSG_RING_PAGES];
188 } __packed;
189
190 #define VMWPVS_CMD_FIRST 0
191 #define VMWPVS_CMD_ADAPTER_RESET 1
192 #define VMWPVS_CMD_ISSUE_SCSI 2
193 #define VMWPVS_CMD_SETUP_RINGS 3
194 #define VMWPVS_CMD_RESET_BUS 4
195 #define VMWPVS_CMD_RESET_DEVICE 5
196 #define VMWPVS_CMD_ABORT_CMD 6
197 #define VMWPVS_CMD_CONFIG 7
198 #define VMWPVS_CMD_SETUP_MSG_RING 8
199 #define VMWPVS_CMD_DEVICE_UNPLUG 9
200 #define VMWPVS_CMD_LAST 10
201
202 #define VMWPVS_CFGPG_CONTROLLER 0x1958
203 #define VMWPVS_CFGPG_PHY 0x1959
204 #define VMWPVS_CFGPG_DEVICE 0x195a
205
206 #define VMWPVS_CFGPGADDR_CONTROLLER 0x2120
207 #define VMWPVS_CFGPGADDR_TARGET 0x2121
208 #define VMWPVS_CFGPGADDR_PHY 0x2122
209
210 struct vmwpvs_cfg_pg_header {
211 u_int32_t pg_num;
212 u_int16_t num_dwords;
213 u_int16_t host_status;
214 u_int16_t scsi_status;
215 u_int16_t __reserved[3];
216 } __packed;
217
218 #define VMWPVS_HOST_STATUS_SUCCESS 0x00
219 #define VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED 0x0a
220 #define VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED_WITH_FLAG 0x0b
221 #define VMWPVS_HOST_STATUS_UNDERRUN 0x0c
222 #define VMWPVS_HOST_STATUS_SELTIMEOUT 0x11
223 #define VMWPVS_HOST_STATUS_DATARUN 0x12
224 #define VMWPVS_HOST_STATUS_BUSFREE 0x13
225 #define VMWPVS_HOST_STATUS_INVPHASE 0x14
226 #define VMWPVS_HOST_STATUS_LUNMISMATCH 0x17
227 #define VMWPVS_HOST_STATUS_INVPARAM 0x1a
228 #define VMWPVS_HOST_STATUS_SENSEFAILED 0x1b
229 #define VMWPVS_HOST_STATUS_TAGREJECT 0x1c
230 #define VMWPVS_HOST_STATUS_BADMSG 0x1d
231 #define VMWPVS_HOST_STATUS_HAHARDWARE 0x20
232 #define VMWPVS_HOST_STATUS_NORESPONSE 0x21
233 #define VMWPVS_HOST_STATUS_SENT_RST 0x22
234 #define VMWPVS_HOST_STATUS_RECV_RST 0x23
235 #define VMWPVS_HOST_STATUS_DISCONNECT 0x24
236 #define VMWPVS_HOST_STATUS_BUS_RESET 0x25
237 #define VMWPVS_HOST_STATUS_ABORT_QUEUE 0x26
238 #define VMWPVS_HOST_STATUS_HA_SOFTWARE 0x27
239 #define VMWPVS_HOST_STATUS_HA_TIMEOUT 0x30
240 #define VMWPVS_HOST_STATUS_SCSI_PARITY 0x34
241
242 #define VMWPVS_SCSI_STATUS_OK 0x00
243 #define VMWPVS_SCSI_STATUS_CHECK 0x02
244
245 struct vmwpvs_cfg_pg_controller {
246 struct vmwpvs_cfg_pg_header header;
247
248 u_int64_t wwnn;
249 u_int16_t manufacturer[64];
250 u_int16_t serial_number[64];
251 u_int16_t oprom_version[32];
252 u_int16_t hardware_version[32];
253 u_int16_t firmware_version[32];
254 u_int32_t num_phys;
255 u_int8_t use_consec_phy_wwns;
256 u_int8_t __reserved[3];
257 } __packed;
258
259 /* driver stuff */
260
261 struct vmwpvs_dmamem {
262 bus_dmamap_t dm_map;
263 bus_dma_segment_t dm_seg;
264 size_t dm_size;
265 caddr_t dm_kva;
266 };
267 #define VMWPVS_DMA_MAP(_dm) (_dm)->dm_map
268 #define VMWPVS_DMA_DVA(_dm) (_dm)->dm_map->dm_segs[0].ds_addr
269 #define VMWPVS_DMA_KVA(_dm) (void *)(_dm)->dm_kva
270
271 struct vmwpvs_sgl {
272 struct vmwpvs_sge list[VMWPVS_MAXSGL];
273 } __packed;
274
275 struct vmwpvs_ccb {
276 SIMPLEQ_ENTRY(vmwpvs_ccb)
277 ccb_entry;
278
279 bus_dmamap_t ccb_dmamap;
280 struct scsi_xfer *ccb_xs;
281 u_int64_t ccb_ctx;
282
283 struct vmwpvs_sgl *ccb_sgl;
284 bus_addr_t ccb_sgl_offset;
285
286 void *ccb_sense;
287 bus_addr_t ccb_sense_offset;
288 };
289 SIMPLEQ_HEAD(vmwpvs_ccb_list, vmwpvs_ccb);
290
291 struct vmwpvs_softc {
292 struct device sc_dev;
293
294 pci_chipset_tag_t sc_pc;
295 pcitag_t sc_tag;
296
297 bus_space_tag_t sc_iot;
298 bus_space_handle_t sc_ioh;
299 bus_size_t sc_ios;
300 bus_dma_tag_t sc_dmat;
301
302 struct vmwpvs_dmamem *sc_req_ring;
303 struct vmwpvs_dmamem *sc_cmp_ring;
304 struct vmwpvs_dmamem *sc_msg_ring;
305 struct vmwpvs_dmamem *sc_ring_state;
306 struct mutex sc_ring_mtx;
307
308 struct vmwpvs_dmamem *sc_sgls;
309 struct vmwpvs_dmamem *sc_sense;
310 struct vmwpvs_ccb *sc_ccbs;
311 struct vmwpvs_ccb_list sc_ccb_list;
312 struct mutex sc_ccb_mtx;
313
314 void *sc_ih;
315
316 struct task sc_msg_task;
317
318 u_int sc_bus_width;
319
320 struct scsi_iopool sc_iopool;
321 struct scsibus_softc *sc_scsibus;
322 };
323 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
324
325 int vmwpvs_match(struct device *, void *, void *);
326 void vmwpvs_attach(struct device *, struct device *, void *);
327
328 int vmwpvs_intx(void *);
329 int vmwpvs_intr(void *);
330
331 #define vmwpvs_read(_s, _r) \
332 bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
333 #define vmwpvs_write(_s, _r, _v) \
334 bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
335 #define vmwpvs_barrier(_s, _r, _l, _d) \
336 bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_d))
337
338 const struct cfattach vmwpvs_ca = {
339 sizeof(struct vmwpvs_softc),
340 vmwpvs_match,
341 vmwpvs_attach,
342 NULL
343 };
344
345 struct cfdriver vmwpvs_cd = {
346 NULL,
347 "vmwpvs",
348 DV_DULL
349 };
350
351 void vmwpvs_scsi_cmd(struct scsi_xfer *);
352
353 const struct scsi_adapter vmwpvs_switch = {
354 vmwpvs_scsi_cmd, NULL, NULL, NULL, NULL
355 };
356
357 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t))
358
359 void vmwpvs_ccb_put(void *, void *);
360 void * vmwpvs_ccb_get(void *);
361
362 struct vmwpvs_dmamem *
363 vmwpvs_dmamem_alloc(struct vmwpvs_softc *, size_t);
364 struct vmwpvs_dmamem *
365 vmwpvs_dmamem_zalloc(struct vmwpvs_softc *, size_t);
366 void vmwpvs_dmamem_free(struct vmwpvs_softc *,
367 struct vmwpvs_dmamem *);
368
369 void vmwpvs_cmd(struct vmwpvs_softc *, u_int32_t, void *, size_t);
370 int vmwpvs_get_config(struct vmwpvs_softc *);
371 void vmwpvs_setup_rings(struct vmwpvs_softc *);
372 void vmwpvs_setup_msg_ring(struct vmwpvs_softc *);
373 void vmwpvs_msg_task(void *);
374
375 struct vmwpvs_ccb *
376 vmwpvs_scsi_cmd_poll(struct vmwpvs_softc *);
377 struct vmwpvs_ccb *
378 vmwpvs_scsi_cmd_done(struct vmwpvs_softc *,
379 struct vmwpvs_ring_cmp *);
380
381 int
vmwpvs_match(struct device * parent,void * match,void * aux)382 vmwpvs_match(struct device *parent, void *match, void *aux)
383 {
384 struct pci_attach_args *pa = aux;
385
386 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
387 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_PVSCSI)
388 return (1);
389
390 return (0);
391 }
392
393 void
vmwpvs_attach(struct device * parent,struct device * self,void * aux)394 vmwpvs_attach(struct device *parent, struct device *self, void *aux)
395 {
396 struct vmwpvs_softc *sc = (struct vmwpvs_softc *)self;
397 struct pci_attach_args *pa = aux;
398 struct scsibus_attach_args saa;
399 pcireg_t memtype;
400 u_int i, r, use_msg;
401 int (*isr)(void *) = vmwpvs_intx;
402 u_int32_t intmask;
403 pci_intr_handle_t ih;
404
405 struct vmwpvs_ccb *ccb;
406 struct vmwpvs_sgl *sgls;
407 u_int8_t *sense;
408
409 sc->sc_pc = pa->pa_pc;
410 sc->sc_tag = pa->pa_tag;
411 sc->sc_dmat = pa->pa_dmat;
412
413 sc->sc_bus_width = 16;
414 mtx_init(&sc->sc_ring_mtx, IPL_BIO);
415 mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
416 task_set(&sc->sc_msg_task, vmwpvs_msg_task, sc);
417 SIMPLEQ_INIT(&sc->sc_ccb_list);
418
419 for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
420 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
421 if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
422 break;
423 }
424 if (r >= PCI_MAPREG_END) {
425 printf(": unable to locate registers\n");
426 return;
427 }
428
429 if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
430 NULL, &sc->sc_ios, VMMPVS_PG_LEN) != 0) {
431 printf(": unable to map registers\n");
432 return;
433 }
434
435 /* hook up the interrupt */
436 vmwpvs_write(sc, VMWPVS_R_INTR_MASK, 0);
437
438 if (pci_intr_map_msi(pa, &ih) == 0)
439 isr = vmwpvs_intr;
440 else if (pci_intr_map(pa, &ih) != 0) {
441 printf(": unable to map interrupt\n");
442 goto unmap;
443 }
444 printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
445
446 /* do we have msg support? */
447 vmwpvs_write(sc, VMWPVS_R_COMMAND, VMWPVS_CMD_SETUP_MSG_RING);
448 use_msg = (vmwpvs_read(sc, VMWPVS_R_COMMAND_STATUS) != 0xffffffff);
449
450 if (vmwpvs_get_config(sc) != 0) {
451 printf("%s: get configuration failed\n", DEVNAME(sc));
452 goto unmap;
453 }
454
455 sc->sc_ring_state = vmwpvs_dmamem_zalloc(sc, VMWPVS_PAGE_SIZE);
456 if (sc->sc_ring_state == NULL) {
457 printf("%s: unable to allocate ring state\n", DEVNAME(sc));
458 goto unmap;
459 }
460
461 sc->sc_req_ring = vmwpvs_dmamem_zalloc(sc,
462 VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE);
463 if (sc->sc_req_ring == NULL) {
464 printf("%s: unable to allocate req ring\n", DEVNAME(sc));
465 goto free_ring_state;
466 }
467
468 sc->sc_cmp_ring = vmwpvs_dmamem_zalloc(sc,
469 VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE);
470 if (sc->sc_cmp_ring == NULL) {
471 printf("%s: unable to allocate cmp ring\n", DEVNAME(sc));
472 goto free_req_ring;
473 }
474
475 if (use_msg) {
476 sc->sc_msg_ring = vmwpvs_dmamem_zalloc(sc,
477 VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE);
478 if (sc->sc_msg_ring == NULL) {
479 printf("%s: unable to allocate msg ring\n",
480 DEVNAME(sc));
481 goto free_cmp_ring;
482 }
483 }
484
485 r = (VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) /
486 sizeof(struct vmwpvs_ring_req);
487
488 sc->sc_sgls = vmwpvs_dmamem_alloc(sc, r * sizeof(struct vmwpvs_sgl));
489 if (sc->sc_sgls == NULL) {
490 printf("%s: unable to allocate sgls\n", DEVNAME(sc));
491 goto free_msg_ring;
492 }
493
494 sc->sc_sense = vmwpvs_dmamem_alloc(sc, r * VMWPVS_SENSELEN);
495 if (sc->sc_sense == NULL) {
496 printf("%s: unable to allocate sense data\n", DEVNAME(sc));
497 goto free_sgl;
498 }
499
500 sc->sc_ccbs = mallocarray(r, sizeof(struct vmwpvs_ccb),
501 M_DEVBUF, M_WAITOK);
502 /* can't fail */
503
504 sgls = VMWPVS_DMA_KVA(sc->sc_sgls);
505 sense = VMWPVS_DMA_KVA(sc->sc_sense);
506 for (i = 0; i < r; i++) {
507 ccb = &sc->sc_ccbs[i];
508
509 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
510 VMWPVS_MAXSGL, MAXPHYS, 0,
511 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
512 &ccb->ccb_dmamap) != 0) {
513 printf("%s: unable to create ccb map\n", DEVNAME(sc));
514 goto free_ccbs;
515 }
516
517 ccb->ccb_ctx = 0xdeadbeef00000000ULL | (u_int64_t)i;
518
519 ccb->ccb_sgl_offset = i * sizeof(*sgls);
520 ccb->ccb_sgl = &sgls[i];
521
522 ccb->ccb_sense_offset = i * VMWPVS_SENSELEN;
523 ccb->ccb_sense = sense + ccb->ccb_sense_offset;
524
525 vmwpvs_ccb_put(sc, ccb);
526 }
527
528 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
529 isr, sc, DEVNAME(sc));
530 if (sc->sc_ih == NULL)
531 goto free_msg_ring;
532
533 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring), 0,
534 VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
535 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring), 0,
536 VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREWRITE);
537 if (use_msg) {
538 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_msg_ring), 0,
539 VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
540 }
541 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
542 VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
543
544 intmask = VMWPVS_INTR_CMPL_MASK;
545
546 vmwpvs_setup_rings(sc);
547 if (use_msg) {
548 vmwpvs_setup_msg_ring(sc);
549 intmask |= VMWPVS_INTR_MSG_MASK;
550 }
551
552 vmwpvs_write(sc, VMWPVS_R_INTR_MASK, intmask);
553
554 scsi_iopool_init(&sc->sc_iopool, sc, vmwpvs_ccb_get, vmwpvs_ccb_put);
555
556 saa.saa_adapter = &vmwpvs_switch;
557 saa.saa_adapter_softc = sc;
558 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
559 saa.saa_adapter_buswidth = sc->sc_bus_width;
560 saa.saa_luns = 8;
561 saa.saa_openings = VMWPVS_OPENINGS;
562 saa.saa_pool = &sc->sc_iopool;
563 saa.saa_quirks = saa.saa_flags = 0;
564 saa.saa_wwpn = saa.saa_wwnn = 0;
565
566 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, &saa,
567 scsiprint);
568
569 return;
570 free_ccbs:
571 while ((ccb = vmwpvs_ccb_get(sc)) != NULL)
572 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
573 free(sc->sc_ccbs, M_DEVBUF, r * sizeof(struct vmwpvs_ccb));
574 /* free_sense: */
575 vmwpvs_dmamem_free(sc, sc->sc_sense);
576 free_sgl:
577 vmwpvs_dmamem_free(sc, sc->sc_sgls);
578 free_msg_ring:
579 if (use_msg)
580 vmwpvs_dmamem_free(sc, sc->sc_msg_ring);
581 free_cmp_ring:
582 vmwpvs_dmamem_free(sc, sc->sc_cmp_ring);
583 free_req_ring:
584 vmwpvs_dmamem_free(sc, sc->sc_req_ring);
585 free_ring_state:
586 vmwpvs_dmamem_free(sc, sc->sc_ring_state);
587 unmap:
588 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
589 sc->sc_ios = 0;
590 }
591
592 void
vmwpvs_setup_rings(struct vmwpvs_softc * sc)593 vmwpvs_setup_rings(struct vmwpvs_softc *sc)
594 {
595 struct vmwpvs_setup_rings_cmd cmd;
596 u_int64_t ppn;
597 u_int i;
598
599 memset(&cmd, 0, sizeof(cmd));
600 cmd.req_pages = VMWPVS_RING_PAGES;
601 cmd.cmp_pages = VMWPVS_RING_PAGES;
602 cmd.state_ppn = VMWPVS_DMA_DVA(sc->sc_ring_state) >> VMWPVS_PAGE_SHIFT;
603
604 ppn = VMWPVS_DMA_DVA(sc->sc_req_ring) >> VMWPVS_PAGE_SHIFT;
605 for (i = 0; i < VMWPVS_RING_PAGES; i++)
606 cmd.req_page_ppn[i] = ppn + i;
607
608 ppn = VMWPVS_DMA_DVA(sc->sc_cmp_ring) >> VMWPVS_PAGE_SHIFT;
609 for (i = 0; i < VMWPVS_RING_PAGES; i++)
610 cmd.cmp_page_ppn[i] = ppn + i;
611
612 vmwpvs_cmd(sc, VMWPVS_CMD_SETUP_RINGS, &cmd, sizeof(cmd));
613 }
614
615 void
vmwpvs_setup_msg_ring(struct vmwpvs_softc * sc)616 vmwpvs_setup_msg_ring(struct vmwpvs_softc *sc)
617 {
618 struct vmwpvs_setup_rings_msg cmd;
619 u_int64_t ppn;
620 u_int i;
621
622 memset(&cmd, 0, sizeof(cmd));
623 cmd.msg_pages = VMWPVS_RING_PAGES;
624
625 ppn = VMWPVS_DMA_DVA(sc->sc_msg_ring) >> VMWPVS_PAGE_SHIFT;
626 for (i = 0; i < VMWPVS_RING_PAGES; i++)
627 cmd.msg_page_ppn[i] = ppn + i;
628
629 vmwpvs_cmd(sc, VMWPVS_CMD_SETUP_MSG_RING, &cmd, sizeof(cmd));
630 }
631
632 int
vmwpvs_get_config(struct vmwpvs_softc * sc)633 vmwpvs_get_config(struct vmwpvs_softc *sc)
634 {
635 struct vmwpvs_cfg_cmd cmd;
636 struct vmwpvs_dmamem *dm;
637 struct vmwpvs_cfg_pg_controller *pg;
638 struct vmwpvs_cfg_pg_header *hdr;
639 int rv = 0;
640
641 dm = vmwpvs_dmamem_alloc(sc, VMWPVS_PAGE_SIZE);
642 if (dm == NULL)
643 return (ENOMEM);
644
645 memset(&cmd, 0, sizeof(cmd));
646 cmd.cmp_addr = VMWPVS_DMA_DVA(dm);
647 cmd.pg_addr_type = VMWPVS_CFGPGADDR_CONTROLLER;
648 cmd.pg_num = VMWPVS_CFGPG_CONTROLLER;
649
650 pg = VMWPVS_DMA_KVA(dm);
651 memset(pg, 0, VMWPVS_PAGE_SIZE);
652 hdr = &pg->header;
653 hdr->host_status = VMWPVS_HOST_STATUS_INVPARAM;
654 hdr->scsi_status = VMWPVS_SCSI_STATUS_CHECK;
655
656 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(dm), 0, VMWPVS_PAGE_SIZE,
657 BUS_DMASYNC_PREREAD);
658 vmwpvs_cmd(sc, VMWPVS_CMD_CONFIG, &cmd, sizeof(cmd));
659 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(dm), 0, VMWPVS_PAGE_SIZE,
660 BUS_DMASYNC_POSTREAD);
661
662 if (hdr->host_status != VMWPVS_HOST_STATUS_SUCCESS ||
663 hdr->scsi_status != VMWPVS_SCSI_STATUS_OK) {
664 rv = EIO;
665 goto done;
666 }
667
668 sc->sc_bus_width = pg->num_phys;
669
670 done:
671 vmwpvs_dmamem_free(sc, dm);
672
673 return (rv);
674
675 }
676
677 void
vmwpvs_cmd(struct vmwpvs_softc * sc,u_int32_t cmd,void * buf,size_t len)678 vmwpvs_cmd(struct vmwpvs_softc *sc, u_int32_t cmd, void *buf, size_t len)
679 {
680 u_int32_t *p = buf;
681 u_int i;
682
683 len /= sizeof(*p);
684
685 vmwpvs_write(sc, VMWPVS_R_COMMAND, cmd);
686 for (i = 0; i < len; i++)
687 vmwpvs_write(sc, VMWPVS_R_COMMAND_DATA, p[i]);
688 }
689
690 int
vmwpvs_intx(void * xsc)691 vmwpvs_intx(void *xsc)
692 {
693 struct vmwpvs_softc *sc = xsc;
694 u_int32_t status;
695
696 status = vmwpvs_read(sc, VMWPVS_R_INTR_STATUS);
697 if ((status & VMWPVS_INTR_ALL_MASK) == 0)
698 return (0);
699
700 vmwpvs_write(sc, VMWPVS_R_INTR_STATUS, status);
701
702 return (vmwpvs_intr(sc));
703 }
704
705 int
vmwpvs_intr(void * xsc)706 vmwpvs_intr(void *xsc)
707 {
708 struct vmwpvs_softc *sc = xsc;
709 volatile struct vmwpvw_ring_state *s =
710 VMWPVS_DMA_KVA(sc->sc_ring_state);
711 struct vmwpvs_ring_cmp *ring = VMWPVS_DMA_KVA(sc->sc_cmp_ring);
712 struct vmwpvs_ccb_list list = SIMPLEQ_HEAD_INITIALIZER(list);
713 struct vmwpvs_ccb *ccb;
714 u_int32_t cons, prod;
715 int msg;
716
717 mtx_enter(&sc->sc_ring_mtx);
718
719 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
720 VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
721 cons = s->cmp_cons;
722 prod = s->cmp_prod;
723 s->cmp_cons = prod;
724
725 msg = (sc->sc_msg_ring != NULL && s->msg_cons != s->msg_prod);
726
727 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
728 VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
729
730 if (cons != prod) {
731 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
732 0, VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD);
733
734 do {
735 ccb = vmwpvs_scsi_cmd_done(sc,
736 &ring[cons++ % VMWPVS_CMP_COUNT]);
737 SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_entry);
738 } while (cons != prod);
739
740 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
741 0, VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
742 }
743
744 mtx_leave(&sc->sc_ring_mtx);
745
746 while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) {
747 SIMPLEQ_REMOVE_HEAD(&list, ccb_entry);
748 scsi_done(ccb->ccb_xs);
749 }
750
751 if (msg)
752 task_add(systq, &sc->sc_msg_task);
753
754 return (1);
755 }
756
757 void
vmwpvs_msg_task(void * xsc)758 vmwpvs_msg_task(void *xsc)
759 {
760 struct vmwpvs_softc *sc = xsc;
761 volatile struct vmwpvw_ring_state *s =
762 VMWPVS_DMA_KVA(sc->sc_ring_state);
763 struct vmwpvs_ring_msg *ring = VMWPVS_DMA_KVA(sc->sc_msg_ring);
764 struct vmwpvs_ring_msg *msg;
765 struct vmwpvs_ring_msg_dev *dvmsg;
766 u_int32_t cons, prod;
767
768 mtx_enter(&sc->sc_ring_mtx);
769 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
770 VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
771 cons = s->msg_cons;
772 prod = s->msg_prod;
773 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
774 VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
775 mtx_leave(&sc->sc_ring_mtx);
776
777 /*
778 * we dont have to lock around the msg ring cos the system taskq has
779 * only one thread.
780 */
781
782 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_msg_ring), 0,
783 VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD);
784 while (cons != prod) {
785 msg = &ring[cons++ % VMWPVS_MSG_COUNT];
786
787 switch (letoh32(msg->type)) {
788 case VMWPVS_MSG_T_ADDED:
789 dvmsg = (struct vmwpvs_ring_msg_dev *)msg;
790 if (letoh32(dvmsg->bus) != 0) {
791 printf("%s: ignoring request to add device"
792 " on bus %d\n", DEVNAME(sc),
793 letoh32(msg->type));
794 break;
795 }
796
797 if (scsi_probe_lun(sc->sc_scsibus,
798 letoh32(dvmsg->target), dvmsg->lun[1]) != 0) {
799 printf("%s: error probing target %d lun %d\n",
800 DEVNAME(sc), letoh32(dvmsg->target),
801 dvmsg->lun[1]);
802 }
803 break;
804
805 case VMWPVS_MSG_T_REMOVED:
806 dvmsg = (struct vmwpvs_ring_msg_dev *)msg;
807 if (letoh32(dvmsg->bus) != 0) {
808 printf("%s: ignoring request to remove device"
809 " on bus %d\n", DEVNAME(sc),
810 letoh32(msg->type));
811 break;
812 }
813
814 if (scsi_detach_lun(sc->sc_scsibus,
815 letoh32(dvmsg->target), dvmsg->lun[1],
816 DETACH_FORCE) != 0) {
817 printf("%s: error detaching target %d lun %d\n",
818 DEVNAME(sc), letoh32(dvmsg->target),
819 dvmsg->lun[1]);
820 }
821 break;
822
823 default:
824 printf("%s: unknown msg type %u\n", DEVNAME(sc),
825 letoh32(msg->type));
826 break;
827 }
828 }
829 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_msg_ring), 0,
830 VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
831
832 mtx_enter(&sc->sc_ring_mtx);
833 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
834 VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
835 s->msg_cons = prod;
836 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
837 VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
838 mtx_leave(&sc->sc_ring_mtx);
839 }
840
841 void
vmwpvs_scsi_cmd(struct scsi_xfer * xs)842 vmwpvs_scsi_cmd(struct scsi_xfer *xs)
843 {
844 struct scsi_link *link = xs->sc_link;
845 struct vmwpvs_softc *sc = link->bus->sb_adapter_softc;
846 struct vmwpvs_ccb *ccb = xs->io;
847 bus_dmamap_t dmap = ccb->ccb_dmamap;
848 volatile struct vmwpvw_ring_state *s =
849 VMWPVS_DMA_KVA(sc->sc_ring_state);
850 struct vmwpvs_ring_req *ring = VMWPVS_DMA_KVA(sc->sc_req_ring), *r;
851 u_int32_t prod;
852 struct vmwpvs_ccb_list list;
853 int error;
854 u_int i;
855
856 ccb->ccb_xs = xs;
857
858 if (xs->datalen > 0) {
859 error = bus_dmamap_load(sc->sc_dmat, dmap,
860 xs->data, xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ?
861 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
862 if (error) {
863 xs->error = XS_DRIVER_STUFFUP;
864 scsi_done(xs);
865 return;
866 }
867
868 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
869 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
870 BUS_DMASYNC_PREWRITE);
871 }
872
873 mtx_enter(&sc->sc_ring_mtx);
874
875 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
876 VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
877
878 prod = s->req_prod;
879 r = &ring[prod % VMWPVS_REQ_COUNT];
880
881 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring),
882 prod * sizeof(*r), sizeof(*r), BUS_DMASYNC_POSTWRITE);
883
884 memset(r, 0, sizeof(*r));
885 r->context = ccb->ccb_ctx;
886
887 if (xs->datalen > 0) {
888 r->data_len = xs->datalen;
889 if (dmap->dm_nsegs == 1) {
890 r->data_addr = dmap->dm_segs[0].ds_addr;
891 } else {
892 struct vmwpvs_sge *sgl = ccb->ccb_sgl->list, *sge;
893
894 r->data_addr = VMWPVS_DMA_DVA(sc->sc_sgls) +
895 ccb->ccb_sgl_offset;
896 r->flags = VMWPVS_REQ_SGL;
897
898 for (i = 0; i < dmap->dm_nsegs; i++) {
899 sge = &sgl[i];
900 sge->addr = dmap->dm_segs[i].ds_addr;
901 sge->len = dmap->dm_segs[i].ds_len;
902 sge->flags = 0;
903 }
904
905 bus_dmamap_sync(sc->sc_dmat,
906 VMWPVS_DMA_MAP(sc->sc_sgls), ccb->ccb_sgl_offset,
907 sizeof(*sge) * dmap->dm_nsegs,
908 BUS_DMASYNC_PREWRITE);
909 }
910 }
911 r->sense_addr = VMWPVS_DMA_DVA(sc->sc_sense) + ccb->ccb_sense_offset;
912 r->sense_len = sizeof(xs->sense);
913
914 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring), 0,
915 VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
916
917 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
918 case SCSI_DATA_IN:
919 r->flags |= VMWPVS_REQ_DIR_IN;
920 break;
921 case SCSI_DATA_OUT:
922 r->flags |= VMWPVS_REQ_DIR_OUT;
923 break;
924 default:
925 r->flags |= VMWPVS_REQ_DIR_NONE;
926 break;
927 }
928
929 memcpy(r->cdb, &xs->cmd, xs->cmdlen);
930 r->cdblen = xs->cmdlen;
931 r->lun[1] = link->lun; /* ugly :( */
932 r->tag = MSG_SIMPLE_Q_TAG;
933 r->bus = 0;
934 r->target = link->target;
935 r->vcpu_hint = 0;
936
937 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring), 0,
938 VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREWRITE);
939
940 s->req_prod = prod + 1;
941
942 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
943 VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
944
945 vmwpvs_write(sc, xs->bp == NULL ?
946 VMWPVS_R_KICK_NON_RW_IO : VMWPVS_R_KICK_RW_IO, 0);
947
948 if (!ISSET(xs->flags, SCSI_POLL)) {
949 mtx_leave(&sc->sc_ring_mtx);
950 return;
951 }
952
953 SIMPLEQ_INIT(&list);
954 do {
955 ccb = vmwpvs_scsi_cmd_poll(sc);
956 SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_entry);
957 } while (xs->io != ccb);
958
959 mtx_leave(&sc->sc_ring_mtx);
960
961 while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) {
962 SIMPLEQ_REMOVE_HEAD(&list, ccb_entry);
963 scsi_done(ccb->ccb_xs);
964 }
965 }
966
967 struct vmwpvs_ccb *
vmwpvs_scsi_cmd_poll(struct vmwpvs_softc * sc)968 vmwpvs_scsi_cmd_poll(struct vmwpvs_softc *sc)
969 {
970 volatile struct vmwpvw_ring_state *s =
971 VMWPVS_DMA_KVA(sc->sc_ring_state);
972 struct vmwpvs_ring_cmp *ring = VMWPVS_DMA_KVA(sc->sc_cmp_ring);
973 struct vmwpvs_ccb *ccb;
974 u_int32_t prod, cons;
975
976 for (;;) {
977 bus_dmamap_sync(sc->sc_dmat,
978 VMWPVS_DMA_MAP(sc->sc_ring_state), 0, VMWPVS_PAGE_SIZE,
979 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
980
981 cons = s->cmp_cons;
982 prod = s->cmp_prod;
983
984 if (cons != prod)
985 s->cmp_cons = cons + 1;
986
987 bus_dmamap_sync(sc->sc_dmat,
988 VMWPVS_DMA_MAP(sc->sc_ring_state), 0, VMWPVS_PAGE_SIZE,
989 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
990
991 if (cons != prod)
992 break;
993 else
994 delay(1000);
995 }
996
997 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
998 0, VMWPVS_PAGE_SIZE * VMWPVS_RING_PAGES,
999 BUS_DMASYNC_POSTREAD);
1000 ccb = vmwpvs_scsi_cmd_done(sc, &ring[cons % VMWPVS_CMP_COUNT]);
1001 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
1002 0, VMWPVS_PAGE_SIZE * VMWPVS_RING_PAGES,
1003 BUS_DMASYNC_PREREAD);
1004
1005 return (ccb);
1006 }
1007
1008 struct vmwpvs_ccb *
vmwpvs_scsi_cmd_done(struct vmwpvs_softc * sc,struct vmwpvs_ring_cmp * c)1009 vmwpvs_scsi_cmd_done(struct vmwpvs_softc *sc, struct vmwpvs_ring_cmp *c)
1010 {
1011 u_int64_t ctx = c->context;
1012 struct vmwpvs_ccb *ccb = &sc->sc_ccbs[ctx & 0xffffffff];
1013 bus_dmamap_t dmap = ccb->ccb_dmamap;
1014 struct scsi_xfer *xs = ccb->ccb_xs;
1015
1016 bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_sense),
1017 ccb->ccb_sense_offset, sizeof(xs->sense), BUS_DMASYNC_POSTREAD);
1018
1019 if (xs->datalen > 0) {
1020 if (dmap->dm_nsegs > 1) {
1021 bus_dmamap_sync(sc->sc_dmat,
1022 VMWPVS_DMA_MAP(sc->sc_sgls), ccb->ccb_sgl_offset,
1023 sizeof(struct vmwpvs_sge) * dmap->dm_nsegs,
1024 BUS_DMASYNC_POSTWRITE);
1025 }
1026
1027 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1028 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1029 BUS_DMASYNC_POSTWRITE);
1030
1031 bus_dmamap_unload(sc->sc_dmat, dmap);
1032 }
1033
1034 xs->status = c->scsi_status;
1035 switch (c->host_status) {
1036 case VMWPVS_HOST_STATUS_SUCCESS:
1037 case VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED:
1038 case VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED_WITH_FLAG:
1039 if (c->scsi_status == VMWPVS_SCSI_STATUS_CHECK) {
1040 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1041 xs->error = XS_SENSE;
1042 } else
1043 xs->error = XS_NOERROR;
1044 xs->resid = 0;
1045 break;
1046
1047 case VMWPVS_HOST_STATUS_UNDERRUN:
1048 case VMWPVS_HOST_STATUS_DATARUN:
1049 xs->resid = xs->datalen - c->data_len;
1050 xs->error = XS_NOERROR;
1051 break;
1052
1053 case VMWPVS_HOST_STATUS_SELTIMEOUT:
1054 xs->error = XS_SELTIMEOUT;
1055 break;
1056
1057 default:
1058 printf("%s: %s:%d h:0x%x s:0x%x\n", DEVNAME(sc),
1059 __FUNCTION__, __LINE__, c->host_status, c->scsi_status);
1060 xs->error = XS_DRIVER_STUFFUP;
1061 break;
1062 }
1063
1064 return (ccb);
1065 }
1066
1067 void *
vmwpvs_ccb_get(void * xsc)1068 vmwpvs_ccb_get(void *xsc)
1069 {
1070 struct vmwpvs_softc *sc = xsc;
1071 struct vmwpvs_ccb *ccb;
1072
1073 mtx_enter(&sc->sc_ccb_mtx);
1074 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list);
1075 if (ccb != NULL)
1076 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry);
1077 mtx_leave(&sc->sc_ccb_mtx);
1078
1079 return (ccb);
1080 }
1081
1082 void
vmwpvs_ccb_put(void * xsc,void * io)1083 vmwpvs_ccb_put(void *xsc, void *io)
1084 {
1085 struct vmwpvs_softc *sc = xsc;
1086 struct vmwpvs_ccb *ccb = io;
1087
1088 mtx_enter(&sc->sc_ccb_mtx);
1089 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_list, ccb, ccb_entry);
1090 mtx_leave(&sc->sc_ccb_mtx);
1091 }
1092
1093 struct vmwpvs_dmamem *
vmwpvs_dmamem_alloc(struct vmwpvs_softc * sc,size_t size)1094 vmwpvs_dmamem_alloc(struct vmwpvs_softc *sc, size_t size)
1095 {
1096 struct vmwpvs_dmamem *dm;
1097 int nsegs;
1098
1099 dm = malloc(sizeof(*dm), M_DEVBUF, M_NOWAIT | M_ZERO);
1100 if (dm == NULL)
1101 return (NULL);
1102
1103 dm->dm_size = size;
1104
1105 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1106 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map) != 0)
1107 goto dmfree;
1108
1109 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &dm->dm_seg,
1110 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1111 goto destroy;
1112
1113 if (bus_dmamem_map(sc->sc_dmat, &dm->dm_seg, nsegs, size,
1114 &dm->dm_kva, BUS_DMA_NOWAIT) != 0)
1115 goto free;
1116
1117 if (bus_dmamap_load(sc->sc_dmat, dm->dm_map, dm->dm_kva, size,
1118 NULL, BUS_DMA_NOWAIT) != 0)
1119 goto unmap;
1120
1121 return (dm);
1122
1123 unmap:
1124 bus_dmamem_unmap(sc->sc_dmat, dm->dm_kva, size);
1125 free:
1126 bus_dmamem_free(sc->sc_dmat, &dm->dm_seg, 1);
1127 destroy:
1128 bus_dmamap_destroy(sc->sc_dmat, dm->dm_map);
1129 dmfree:
1130 free(dm, M_DEVBUF, sizeof *dm);
1131
1132 return (NULL);
1133 }
1134
1135 struct vmwpvs_dmamem *
vmwpvs_dmamem_zalloc(struct vmwpvs_softc * sc,size_t size)1136 vmwpvs_dmamem_zalloc(struct vmwpvs_softc *sc, size_t size)
1137 {
1138 struct vmwpvs_dmamem *dm;
1139
1140 dm = vmwpvs_dmamem_alloc(sc, size);
1141 if (dm == NULL)
1142 return (NULL);
1143
1144 memset(VMWPVS_DMA_KVA(dm), 0, size);
1145
1146 return (dm);
1147 }
1148
1149 void
vmwpvs_dmamem_free(struct vmwpvs_softc * sc,struct vmwpvs_dmamem * dm)1150 vmwpvs_dmamem_free(struct vmwpvs_softc *sc, struct vmwpvs_dmamem *dm)
1151 {
1152 bus_dmamap_unload(sc->sc_dmat, dm->dm_map);
1153 bus_dmamem_unmap(sc->sc_dmat, dm->dm_kva, dm->dm_size);
1154 bus_dmamem_free(sc->sc_dmat, &dm->dm_seg, 1);
1155 bus_dmamap_destroy(sc->sc_dmat, dm->dm_map);
1156 free(dm, M_DEVBUF, sizeof *dm);
1157 }
1158