xref: /openbsd/sys/dev/vscsi.c (revision 898184e3)
1 /*	$OpenBSD: vscsi.c,v 1.26 2011/07/17 22:46:48 matthew Exp $ */
2 
3 /*
4  * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/buf.h>
22 #include <sys/kernel.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/proc.h>
26 #include <sys/conf.h>
27 #include <sys/queue.h>
28 #include <sys/rwlock.h>
29 #include <sys/pool.h>
30 #include <sys/ioctl.h>
31 #include <sys/poll.h>
32 #include <sys/selinfo.h>
33 
34 #include <scsi/scsi_all.h>
35 #include <scsi/scsiconf.h>
36 
37 #include <dev/vscsivar.h>
38 
39 int		vscsi_match(struct device *, void *, void *);
40 void		vscsi_attach(struct device *, struct device *, void *);
41 void		vscsi_shutdown(void *);
42 
43 struct vscsi_ccb {
44 	TAILQ_ENTRY(vscsi_ccb)	ccb_entry;
45 	int			ccb_tag;
46 	struct scsi_xfer	*ccb_xs;
47 	size_t			ccb_datalen;
48 };
49 
50 TAILQ_HEAD(vscsi_ccb_list, vscsi_ccb);
51 
52 enum vscsi_state {
53 	VSCSI_S_CLOSED,
54 	VSCSI_S_CONFIG,
55 	VSCSI_S_RUNNING
56 };
57 
58 struct vscsi_softc {
59 	struct device		sc_dev;
60 	struct scsi_link	sc_link;
61 	struct scsibus_softc	*sc_scsibus;
62 
63 	struct mutex		sc_state_mtx;
64 	enum vscsi_state	sc_state;
65 	u_int			sc_ref_count;
66 	struct pool		sc_ccb_pool;
67 
68 	struct scsi_iopool	sc_iopool;
69 
70 	struct vscsi_ccb_list	sc_ccb_i2t;
71 	struct vscsi_ccb_list	sc_ccb_t2i;
72 	int			sc_ccb_tag;
73 	struct mutex		sc_poll_mtx;
74 	struct rwlock		sc_ioc_lock;
75 
76 	struct selinfo		sc_sel;
77 	struct mutex		sc_sel_mtx;
78 };
79 
80 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
81 #define DEV2SC(_d) ((struct vscsi_softc *)device_lookup(&vscsi_cd, minor(_d)))
82 
83 struct cfattach vscsi_ca = {
84 	sizeof(struct vscsi_softc),
85 	vscsi_match,
86 	vscsi_attach
87 };
88 
89 struct cfdriver vscsi_cd = {
90 	NULL,
91 	"vscsi",
92 	DV_DULL
93 };
94 
95 void		vscsi_cmd(struct scsi_xfer *);
96 int		vscsi_probe(struct scsi_link *);
97 void		vscsi_free(struct scsi_link *);
98 
99 struct scsi_adapter vscsi_switch = {
100 	vscsi_cmd,
101 	scsi_minphys,
102 	vscsi_probe,
103 	vscsi_free
104 };
105 
106 int		vscsi_i2t(struct vscsi_softc *, struct vscsi_ioc_i2t *);
107 int		vscsi_data(struct vscsi_softc *, struct vscsi_ioc_data *, int);
108 int		vscsi_t2i(struct vscsi_softc *, struct vscsi_ioc_t2i *);
109 
110 void		vscsi_done(struct vscsi_softc *, struct vscsi_ccb *);
111 
112 void *		vscsi_ccb_get(void *);
113 void		vscsi_ccb_put(void *, void *);
114 
115 void		filt_vscsidetach(struct knote *);
116 int		filt_vscsiread(struct knote *, long);
117 
118 struct filterops vscsi_filtops = {
119 	1,
120 	NULL,
121 	filt_vscsidetach,
122 	filt_vscsiread
123 };
124 
125 
126 int
127 vscsi_match(struct device *parent, void *match, void *aux)
128 {
129 	return (1);
130 }
131 
132 void
133 vscsi_attach(struct device *parent, struct device *self, void *aux)
134 {
135 	struct vscsi_softc		*sc = (struct vscsi_softc *)self;
136 	struct scsibus_attach_args	saa;
137 
138 	printf("\n");
139 
140 	mtx_init(&sc->sc_state_mtx, IPL_BIO);
141 	sc->sc_state = VSCSI_S_CLOSED;
142 
143 	TAILQ_INIT(&sc->sc_ccb_i2t);
144 	TAILQ_INIT(&sc->sc_ccb_t2i);
145 	mtx_init(&sc->sc_poll_mtx, IPL_BIO);
146 	mtx_init(&sc->sc_sel_mtx, IPL_BIO);
147 	rw_init(&sc->sc_ioc_lock, "vscsiioc");
148 	scsi_iopool_init(&sc->sc_iopool, sc, vscsi_ccb_get, vscsi_ccb_put);
149 
150 	sc->sc_link.adapter = &vscsi_switch;
151 	sc->sc_link.adapter_softc = sc;
152 	sc->sc_link.adapter_target = 256;
153 	sc->sc_link.adapter_buswidth = 256;
154 	sc->sc_link.openings = 1;
155 	sc->sc_link.pool = &sc->sc_iopool;
156 
157 	bzero(&saa, sizeof(saa));
158 	saa.saa_sc_link = &sc->sc_link;
159 
160 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
161 	    &saa, scsiprint);
162 }
163 
164 void
165 vscsi_cmd(struct scsi_xfer *xs)
166 {
167 	struct scsi_link		*link = xs->sc_link;
168 	struct vscsi_softc		*sc = link->adapter_softc;
169 	struct vscsi_ccb		*ccb = xs->io;
170 	int				polled = ISSET(xs->flags, SCSI_POLL);
171 	int				running = 0;
172 
173 	if (ISSET(xs->flags, SCSI_POLL) && ISSET(xs->flags, SCSI_NOSLEEP)) {
174 		printf("%s: POLL && NOSLEEP for 0x%02x\n", DEVNAME(sc),
175 		    xs->cmd->opcode);
176 		xs->error = XS_DRIVER_STUFFUP;
177 		scsi_done(xs);
178 		return;
179 	}
180 
181 	ccb->ccb_xs = xs;
182 
183 	mtx_enter(&sc->sc_state_mtx);
184 	if (sc->sc_state == VSCSI_S_RUNNING) {
185 		running = 1;
186 		TAILQ_INSERT_TAIL(&sc->sc_ccb_i2t, ccb, ccb_entry);
187 	}
188 	mtx_leave(&sc->sc_state_mtx);
189 
190 	if (!running) {
191 		xs->error = XS_DRIVER_STUFFUP;
192 		scsi_done(xs);
193 		return;
194 	}
195 
196 	selwakeup(&sc->sc_sel);
197 
198 	if (polled) {
199 		mtx_enter(&sc->sc_poll_mtx);
200 		while (ccb->ccb_xs != NULL)
201 			msleep(ccb, &sc->sc_poll_mtx, PRIBIO, "vscsipoll", 0);
202 		mtx_leave(&sc->sc_poll_mtx);
203 		scsi_done(xs);
204 	}
205 }
206 
207 void
208 vscsi_done(struct vscsi_softc *sc, struct vscsi_ccb *ccb)
209 {
210 	struct scsi_xfer		*xs = ccb->ccb_xs;
211 
212 	if (ISSET(xs->flags, SCSI_POLL)) {
213 		mtx_enter(&sc->sc_poll_mtx);
214 		ccb->ccb_xs = NULL;
215 		wakeup(ccb);
216 		mtx_leave(&sc->sc_poll_mtx);
217 	} else
218 		scsi_done(xs);
219 }
220 
221 int
222 vscsi_probe(struct scsi_link *link)
223 {
224 	struct vscsi_softc		*sc = link->adapter_softc;
225 	int				rv = 0;
226 
227 	mtx_enter(&sc->sc_state_mtx);
228 	if (sc->sc_state == VSCSI_S_RUNNING)
229 		sc->sc_ref_count++;
230 	else
231 		rv = ENXIO;
232 	mtx_leave(&sc->sc_state_mtx);
233 
234 	return (rv);
235 }
236 
237 void
238 vscsi_free(struct scsi_link *link)
239 {
240 	struct vscsi_softc		*sc = link->adapter_softc;
241 
242 	mtx_enter(&sc->sc_state_mtx);
243 	sc->sc_ref_count--;
244 	if (sc->sc_state != VSCSI_S_RUNNING && sc->sc_ref_count == 0)
245 		wakeup(&sc->sc_ref_count);
246 	mtx_leave(&sc->sc_state_mtx);
247 }
248 
249 int
250 vscsiopen(dev_t dev, int flags, int mode, struct proc *p)
251 {
252 	struct vscsi_softc		*sc = DEV2SC(dev);
253 	enum vscsi_state		state = VSCSI_S_RUNNING;
254 	int				rv = 0;
255 
256 	if (sc == NULL)
257 		return (ENXIO);
258 
259 	mtx_enter(&sc->sc_state_mtx);
260 	if (sc->sc_state != VSCSI_S_CLOSED)
261 		rv = EBUSY;
262 	else
263 		sc->sc_state = VSCSI_S_CONFIG;
264 	mtx_leave(&sc->sc_state_mtx);
265 
266 	if (rv != 0) {
267 		device_unref(&sc->sc_dev);
268 		return (rv);
269 	}
270 
271 	pool_init(&sc->sc_ccb_pool, sizeof(struct vscsi_ccb), 0, 0, 0,
272 	    "vscsiccb", NULL);
273 	pool_setipl(&sc->sc_ccb_pool, IPL_BIO);
274 
275 	/* we need to guarantee some ccbs will be available for the iopool */
276 	rv = pool_prime(&sc->sc_ccb_pool, 8);
277 	if (rv != 0) {
278 		pool_destroy(&sc->sc_ccb_pool);
279 		state = VSCSI_S_CLOSED;
280 	}
281 
282 	/* commit changes */
283 	mtx_enter(&sc->sc_state_mtx);
284 	sc->sc_state = state;
285 	mtx_leave(&sc->sc_state_mtx);
286 
287 	device_unref(&sc->sc_dev);
288 	return (rv);
289 }
290 
291 int
292 vscsiioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
293 {
294 	struct vscsi_softc		*sc = DEV2SC(dev);
295 	struct vscsi_ioc_devevent	*de = (struct vscsi_ioc_devevent *)addr;
296 	int				read = 0;
297 	int				err = 0;
298 
299 	rw_enter_write(&sc->sc_ioc_lock);
300 
301 	switch (cmd) {
302 	case VSCSI_I2T:
303 		err = vscsi_i2t(sc, (struct vscsi_ioc_i2t *)addr);
304 		break;
305 
306 	case VSCSI_DATA_READ:
307 		read = 1;
308 	case VSCSI_DATA_WRITE:
309 		err = vscsi_data(sc, (struct vscsi_ioc_data *)addr, read);
310 		break;
311 
312 	case VSCSI_T2I:
313 		err = vscsi_t2i(sc, (struct vscsi_ioc_t2i *)addr);
314 		break;
315 
316 	case VSCSI_REQPROBE:
317 		err = scsi_req_probe(sc->sc_scsibus, de->target, de->lun);
318 		break;
319 
320 	case VSCSI_REQDETACH:
321 		err = scsi_req_detach(sc->sc_scsibus, de->target, de->lun,
322 		    DETACH_FORCE);
323 		break;
324 
325 	default:
326 		err = ENOTTY;
327 		break;
328 	}
329 
330 	rw_exit_write(&sc->sc_ioc_lock);
331 
332 	device_unref(&sc->sc_dev);
333 	return (err);
334 }
335 
336 int
337 vscsi_i2t(struct vscsi_softc *sc, struct vscsi_ioc_i2t *i2t)
338 {
339 	struct vscsi_ccb		*ccb;
340 	struct scsi_xfer		*xs;
341 	struct scsi_link		*link;
342 
343 	mtx_enter(&sc->sc_state_mtx);
344 	ccb = TAILQ_FIRST(&sc->sc_ccb_i2t);
345 	if (ccb != NULL)
346 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
347 	mtx_leave(&sc->sc_state_mtx);
348 
349 	if (ccb == NULL)
350 		return (EAGAIN);
351 
352 	xs = ccb->ccb_xs;
353 	link = xs->sc_link;
354 
355 	i2t->tag = ccb->ccb_tag;
356 	i2t->target = link->target;
357 	i2t->lun = link->lun;
358 	bcopy(xs->cmd, &i2t->cmd, xs->cmdlen);
359 	i2t->cmdlen = xs->cmdlen;
360 	i2t->datalen = xs->datalen;
361 
362 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
363 	case SCSI_DATA_IN:
364 		i2t->direction = VSCSI_DIR_READ;
365 		break;
366 	case SCSI_DATA_OUT:
367 		i2t->direction = VSCSI_DIR_WRITE;
368 		break;
369 	default:
370 		i2t->direction = VSCSI_DIR_NONE;
371 		break;
372 	}
373 
374 	TAILQ_INSERT_TAIL(&sc->sc_ccb_t2i, ccb, ccb_entry);
375 
376 	return (0);
377 }
378 
379 int
380 vscsi_data(struct vscsi_softc *sc, struct vscsi_ioc_data *data, int read)
381 {
382 	struct vscsi_ccb		*ccb;
383 	struct scsi_xfer		*xs;
384 	int				xsread;
385 	u_int8_t			*buf;
386 	int				rv = EINVAL;
387 
388 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
389 		if (ccb->ccb_tag == data->tag)
390 			break;
391 	}
392 	if (ccb == NULL)
393 		return (EFAULT);
394 
395 	xs = ccb->ccb_xs;
396 
397 	if (data->datalen > xs->datalen - ccb->ccb_datalen)
398 		return (ENOMEM);
399 
400 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
401 	case SCSI_DATA_IN:
402 		xsread = 1;
403 		break;
404 	case SCSI_DATA_OUT:
405 		xsread = 0;
406 		break;
407 	default:
408 		return (EINVAL);
409 	}
410 
411 	if (read != xsread)
412 		return (EINVAL);
413 
414 	buf = xs->data;
415 	buf += ccb->ccb_datalen;
416 
417 	if (read)
418 		rv = copyin(data->data, buf, data->datalen);
419 	else
420 		rv = copyout(buf, data->data, data->datalen);
421 
422 	if (rv == 0)
423 		ccb->ccb_datalen += data->datalen;
424 
425 	return (rv);
426 }
427 
428 int
429 vscsi_t2i(struct vscsi_softc *sc, struct vscsi_ioc_t2i *t2i)
430 {
431 	struct vscsi_ccb		*ccb;
432 	struct scsi_xfer		*xs;
433 	struct scsi_link		*link;
434 	int				rv = 0;
435 
436 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
437 		if (ccb->ccb_tag == t2i->tag)
438 			break;
439 	}
440 	if (ccb == NULL)
441 		return (EFAULT);
442 
443 	TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
444 
445 	xs = ccb->ccb_xs;
446 	link = xs->sc_link;
447 
448 	xs->resid = xs->datalen - ccb->ccb_datalen;
449 	xs->status = SCSI_OK;
450 
451 	switch (t2i->status) {
452 	case VSCSI_STAT_DONE:
453 		xs->error = XS_NOERROR;
454 		break;
455 	case VSCSI_STAT_SENSE:
456 		xs->error = XS_SENSE;
457 		bcopy(&t2i->sense, &xs->sense, sizeof(xs->sense));
458 		break;
459 	case VSCSI_STAT_RESET:
460 		xs->error = XS_RESET;
461 		break;
462 	case VSCSI_STAT_ERR:
463 	default:
464 		xs->error = XS_DRIVER_STUFFUP;
465 		break;
466 	}
467 
468 	vscsi_done(sc, ccb);
469 
470 	return (rv);
471 }
472 
473 int
474 vscsipoll(dev_t dev, int events, struct proc *p)
475 {
476 	struct vscsi_softc		*sc = DEV2SC(dev);
477 	int				revents = 0;
478 
479 	if (events & (POLLIN | POLLRDNORM)) {
480 		mtx_enter(&sc->sc_state_mtx);
481 		if (!TAILQ_EMPTY(&sc->sc_ccb_i2t))
482 			revents |= events & (POLLIN | POLLRDNORM);
483 		mtx_leave(&sc->sc_state_mtx);
484 	}
485 
486 	if (revents == 0) {
487 		if (events & (POLLIN | POLLRDNORM))
488 			selrecord(p, &sc->sc_sel);
489 	}
490 
491 	device_unref(&sc->sc_dev);
492 	return (revents);
493 }
494 
495 int
496 vscsikqfilter(dev_t dev, struct knote *kn)
497 {
498 	struct vscsi_softc *sc = DEV2SC(dev);
499 	struct klist *klist = &sc->sc_sel.si_note;
500 
501 	switch (kn->kn_filter) {
502 	case EVFILT_READ:
503 		kn->kn_fop = &vscsi_filtops;
504 		break;
505 	default:
506 		device_unref(&sc->sc_dev);
507 		return (EINVAL);
508 	}
509 
510 	kn->kn_hook = (caddr_t)sc;
511 
512 	mtx_enter(&sc->sc_sel_mtx);
513 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
514 	mtx_leave(&sc->sc_sel_mtx);
515 
516 	device_unref(&sc->sc_dev);
517 	return (0);
518 }
519 
520 void
521 filt_vscsidetach(struct knote *kn)
522 {
523 	struct vscsi_softc *sc = (struct vscsi_softc *)kn->kn_hook;
524 	struct klist *klist = &sc->sc_sel.si_note;
525 
526 	mtx_enter(&sc->sc_sel_mtx);
527 	SLIST_REMOVE(klist, kn, knote, kn_selnext);
528 	mtx_leave(&sc->sc_sel_mtx);
529 }
530 
531 int
532 filt_vscsiread(struct knote *kn, long hint)
533 {
534 	struct vscsi_softc *sc = (struct vscsi_softc *)kn->kn_hook;
535 	int event = 0;
536 
537 	mtx_enter(&sc->sc_state_mtx);
538 	if (!TAILQ_EMPTY(&sc->sc_ccb_i2t))
539 		event = 1;
540 	mtx_leave(&sc->sc_state_mtx);
541 
542 	return (event);
543 }
544 
545 int
546 vscsiclose(dev_t dev, int flags, int mode, struct proc *p)
547 {
548 	struct vscsi_softc		*sc = DEV2SC(dev);
549 	struct vscsi_ccb		*ccb;
550 
551 	mtx_enter(&sc->sc_state_mtx);
552 	KASSERT(sc->sc_state == VSCSI_S_RUNNING);
553 	sc->sc_state = VSCSI_S_CONFIG;
554 	mtx_leave(&sc->sc_state_mtx);
555 
556 	scsi_activate(sc->sc_scsibus, -1, -1, DVACT_DEACTIVATE);
557 
558 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_t2i)) != NULL) {
559 		TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
560 		ccb->ccb_xs->error = XS_RESET;
561 		vscsi_done(sc, ccb);
562 	}
563 
564 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_i2t)) != NULL) {
565 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
566 		ccb->ccb_xs->error = XS_RESET;
567 		vscsi_done(sc, ccb);
568 	}
569 
570 	scsi_req_detach(sc->sc_scsibus, -1, -1, DETACH_FORCE);
571 
572 	mtx_enter(&sc->sc_state_mtx);
573 	while (sc->sc_ref_count > 0) {
574 		msleep(&sc->sc_ref_count, &sc->sc_state_mtx,
575 		    PRIBIO, "vscsiref", 0);
576 	}
577 	mtx_leave(&sc->sc_state_mtx);
578 
579 	pool_destroy(&sc->sc_ccb_pool);
580 
581 	mtx_enter(&sc->sc_state_mtx);
582 	sc->sc_state = VSCSI_S_CLOSED;
583 	mtx_leave(&sc->sc_state_mtx);
584 
585 	device_unref(&sc->sc_dev);
586 	return (0);
587 }
588 
589 void *
590 vscsi_ccb_get(void *cookie)
591 {
592 	struct vscsi_softc		*sc = cookie;
593 	struct vscsi_ccb		*ccb = NULL;
594 
595 	ccb = pool_get(&sc->sc_ccb_pool, PR_NOWAIT);
596 	if (ccb != NULL) {
597 		ccb->ccb_tag = sc->sc_ccb_tag++;
598 		ccb->ccb_datalen = 0;
599 	}
600 
601 	return (ccb);
602 }
603 
604 void
605 vscsi_ccb_put(void *cookie, void *io)
606 {
607 	struct vscsi_softc		*sc = cookie;
608 	struct vscsi_ccb		*ccb = io;
609 
610 	pool_put(&sc->sc_ccb_pool, ccb);
611 }
612