1 /* $OpenBSD: mpath.c,v 1.25 2011/07/17 22:46:48 matthew Exp $ */ 2 3 /* 4 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/buf.h> 22 #include <sys/kernel.h> 23 #include <sys/malloc.h> 24 #include <sys/device.h> 25 #include <sys/proc.h> 26 #include <sys/conf.h> 27 #include <sys/queue.h> 28 #include <sys/rwlock.h> 29 #include <sys/pool.h> 30 #include <sys/ioctl.h> 31 #include <sys/poll.h> 32 #include <sys/selinfo.h> 33 34 #include <scsi/scsi_all.h> 35 #include <scsi/scsiconf.h> 36 #include <scsi/mpathvar.h> 37 38 #define MPATH_BUSWIDTH 256 39 40 int mpath_match(struct device *, void *, void *); 41 void mpath_attach(struct device *, struct device *, void *); 42 void mpath_shutdown(void *); 43 44 TAILQ_HEAD(mpath_paths, mpath_path); 45 46 struct mpath_ccb { 47 struct scsi_xfer *c_xs; 48 SIMPLEQ_ENTRY(mpath_ccb) c_entry; 49 }; 50 SIMPLEQ_HEAD(mpath_ccbs, mpath_ccb); 51 52 struct mpath_dev { 53 struct mutex d_mtx; 54 55 struct mpath_ccbs d_ccbs; 56 struct mpath_paths d_paths; 57 struct mpath_path *d_next_path; 58 59 u_int d_path_count; 60 61 const struct mpath_ops *d_ops; 62 struct devid *d_id; 63 }; 64 65 struct mpath_softc { 66 struct device sc_dev; 67 struct scsi_link sc_link; 68 struct pool sc_ccb_pool; 69 struct scsi_iopool sc_iopool; 70 struct scsibus_softc *sc_scsibus; 71 }; 72 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 73 74 struct mpath_softc *mpath; 75 struct mpath_dev *mpath_devs[MPATH_BUSWIDTH]; 76 77 struct cfattach mpath_ca = { 78 sizeof(struct mpath_softc), 79 mpath_match, 80 mpath_attach 81 }; 82 83 struct cfdriver mpath_cd = { 84 NULL, 85 "mpath", 86 DV_DULL 87 }; 88 89 void mpath_cmd(struct scsi_xfer *); 90 void mpath_minphys(struct buf *, struct scsi_link *); 91 int mpath_probe(struct scsi_link *); 92 93 struct mpath_path *mpath_next_path(struct mpath_dev *, int); 94 void mpath_done(struct scsi_xfer *); 95 96 struct scsi_adapter mpath_switch = { 97 mpath_cmd, 98 scsi_minphys, 99 mpath_probe 100 }; 101 102 void mpath_xs_stuffup(struct scsi_xfer *); 103 104 void * mpath_ccb_get(void *); 105 void mpath_ccb_put(void *, void *); 106 107 int 108 mpath_match(struct device *parent, void *match, void *aux) 109 { 110 return (1); 111 } 112 113 void 114 mpath_attach(struct device *parent, struct device *self, void *aux) 115 { 116 struct mpath_softc *sc = (struct mpath_softc *)self; 117 struct scsibus_attach_args saa; 118 119 mpath = sc; 120 121 printf("\n"); 122 123 pool_init(&sc->sc_ccb_pool, sizeof(struct mpath_ccb), 0, 0, 0, 124 "mpathccb", NULL); 125 pool_setipl(&sc->sc_ccb_pool, IPL_BIO); 126 127 scsi_iopool_init(&sc->sc_iopool, sc, mpath_ccb_get, mpath_ccb_put); 128 129 sc->sc_link.adapter = &mpath_switch; 130 sc->sc_link.adapter_softc = sc; 131 sc->sc_link.adapter_target = MPATH_BUSWIDTH; 132 sc->sc_link.adapter_buswidth = MPATH_BUSWIDTH; 133 sc->sc_link.luns = 1; 134 sc->sc_link.openings = 1024; /* XXX magical */ 135 sc->sc_link.pool = &sc->sc_iopool; 136 137 bzero(&saa, sizeof(saa)); 138 saa.saa_sc_link = &sc->sc_link; 139 140 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, 141 &saa, scsiprint); 142 } 143 144 void 145 mpath_xs_stuffup(struct scsi_xfer *xs) 146 { 147 xs->error = XS_DRIVER_STUFFUP; 148 scsi_done(xs); 149 } 150 151 int 152 mpath_probe(struct scsi_link *link) 153 { 154 struct mpath_dev *d = mpath_devs[link->target]; 155 156 if (link->lun != 0 || d == NULL) 157 return (ENXIO); 158 159 link->id = devid_copy(d->d_id); 160 161 return (0); 162 } 163 164 struct mpath_path * 165 mpath_next_path(struct mpath_dev *d, int next) 166 { 167 struct mpath_path *p; 168 169 if (d == NULL) 170 panic("%s: d is NULL", __func__); 171 172 p = d->d_next_path; 173 if (p != NULL && next == MPATH_NEXT) { 174 d->d_next_path = TAILQ_NEXT(p, p_entry); 175 if (d->d_next_path == NULL) 176 d->d_next_path = TAILQ_FIRST(&d->d_paths); 177 } 178 179 return (p); 180 } 181 182 void 183 mpath_cmd(struct scsi_xfer *xs) 184 { 185 struct scsi_link *link = xs->sc_link; 186 struct mpath_dev *d = mpath_devs[link->target]; 187 struct mpath_ccb *ccb = xs->io; 188 struct mpath_path *p; 189 struct scsi_xfer *mxs; 190 191 #ifdef DIAGNOSTIC 192 if (d == NULL) 193 panic("mpath_cmd issued against nonexistant device"); 194 #endif 195 196 if (ISSET(xs->flags, SCSI_POLL)) { 197 mtx_enter(&d->d_mtx); 198 p = mpath_next_path(d, d->d_ops->op_schedule); 199 mtx_leave(&d->d_mtx); 200 if (p == NULL) { 201 mpath_xs_stuffup(xs); 202 return; 203 } 204 205 mxs = scsi_xs_get(p->p_link, xs->flags); 206 if (mxs == NULL) { 207 mpath_xs_stuffup(xs); 208 return; 209 } 210 211 memcpy(mxs->cmd, xs->cmd, xs->cmdlen); 212 mxs->cmdlen = xs->cmdlen; 213 mxs->data = xs->data; 214 mxs->datalen = xs->datalen; 215 mxs->retries = xs->retries; 216 mxs->timeout = xs->timeout; 217 mxs->bp = xs->bp; 218 219 scsi_xs_sync(mxs); 220 221 xs->error = mxs->error; 222 xs->status = mxs->status; 223 xs->resid = mxs->resid; 224 225 memcpy(&xs->sense, &mxs->sense, sizeof(xs->sense)); 226 227 scsi_xs_put(mxs); 228 scsi_done(xs); 229 return; 230 } 231 232 ccb->c_xs = xs; 233 234 mtx_enter(&d->d_mtx); 235 SIMPLEQ_INSERT_TAIL(&d->d_ccbs, ccb, c_entry); 236 p = mpath_next_path(d, d->d_ops->op_schedule); 237 mtx_leave(&d->d_mtx); 238 239 if (p != NULL) 240 scsi_xsh_add(&p->p_xsh); 241 } 242 243 void 244 mpath_start(struct mpath_path *p, struct scsi_xfer *mxs) 245 { 246 struct mpath_dev *d = p->p_dev; 247 struct mpath_ccb *ccb; 248 struct scsi_xfer *xs; 249 int addxsh = 0; 250 251 if (ISSET(p->p_link->state, SDEV_S_DYING) || d == NULL) 252 goto fail; 253 254 mtx_enter(&d->d_mtx); 255 ccb = SIMPLEQ_FIRST(&d->d_ccbs); 256 if (ccb != NULL) { 257 SIMPLEQ_REMOVE_HEAD(&d->d_ccbs, c_entry); 258 if (!SIMPLEQ_EMPTY(&d->d_ccbs)) 259 addxsh = 1; 260 } 261 mtx_leave(&d->d_mtx); 262 263 if (ccb == NULL) 264 goto fail; 265 266 xs = ccb->c_xs; 267 268 memcpy(mxs->cmd, xs->cmd, xs->cmdlen); 269 mxs->cmdlen = xs->cmdlen; 270 mxs->data = xs->data; 271 mxs->datalen = xs->datalen; 272 mxs->retries = xs->retries; 273 mxs->timeout = xs->timeout; 274 mxs->bp = xs->bp; 275 mxs->flags = xs->flags; 276 277 mxs->cookie = xs; 278 mxs->done = mpath_done; 279 280 scsi_xs_exec(mxs); 281 282 if (addxsh) 283 scsi_xsh_add(&p->p_xsh); 284 285 return; 286 fail: 287 scsi_xs_put(mxs); 288 } 289 290 void 291 mpath_done(struct scsi_xfer *mxs) 292 { 293 struct scsi_xfer *xs = mxs->cookie; 294 struct scsi_link *link = xs->sc_link; 295 struct mpath_ccb *ccb = xs->io; 296 struct mpath_dev *d = mpath_devs[link->target]; 297 struct mpath_path *p; 298 int next = d->d_ops->op_schedule; 299 300 switch (mxs->error) { 301 case XS_SELTIMEOUT: /* physical path is gone, try the next */ 302 next = MPATH_NEXT; 303 case XS_RESET: 304 mtx_enter(&d->d_mtx); 305 SIMPLEQ_INSERT_HEAD(&d->d_ccbs, ccb, c_entry); 306 p = mpath_next_path(d, next); 307 mtx_leave(&d->d_mtx); 308 309 scsi_xs_put(mxs); 310 311 if (p != NULL) 312 scsi_xsh_add(&p->p_xsh); 313 314 return; 315 } 316 317 xs->error = mxs->error; 318 xs->status = mxs->status; 319 xs->resid = mxs->resid; 320 321 memcpy(&xs->sense, &mxs->sense, sizeof(xs->sense)); 322 323 scsi_xs_put(mxs); 324 325 scsi_done(xs); 326 } 327 328 void 329 mpath_minphys(struct buf *bp, struct scsi_link *link) 330 { 331 struct mpath_dev *d = mpath_devs[link->target]; 332 struct mpath_path *p; 333 334 #ifdef DIAGNOSTIC 335 if (d == NULL) 336 panic("mpath_minphys against nonexistant device"); 337 #endif 338 339 TAILQ_FOREACH(p, &d->d_paths, p_entry) 340 p->p_link->adapter->scsi_minphys(bp, p->p_link); 341 } 342 343 int 344 mpath_path_probe(struct scsi_link *link) 345 { 346 static struct cfdata *cf = NULL; 347 348 if (cf == NULL) { 349 for (cf = cfdata; cf->cf_attach != (struct cfattach *)-1; 350 cf++) { 351 if (cf->cf_attach == NULL) 352 continue; 353 if (cf->cf_driver == &mpath_cd) 354 break; 355 } 356 } 357 358 if (cf->cf_fstate == FSTATE_DNOTFOUND || cf->cf_fstate == FSTATE_DSTAR) 359 return (ENXIO); 360 361 if (link->id == NULL) 362 return (EINVAL); 363 364 if (mpath != NULL && mpath == link->adapter_softc) 365 return (ENXIO); 366 367 return (0); 368 } 369 370 int 371 mpath_path_attach(struct mpath_path *p, const struct mpath_ops *ops) 372 { 373 struct scsi_link *link = p->p_link; 374 struct mpath_dev *d = NULL; 375 int newdev = 0, addxsh = 0; 376 int target; 377 378 #ifdef DIAGNOSTIC 379 if (p->p_link == NULL) 380 panic("mpath_path_attach: NULL link"); 381 if (p->p_dev != NULL) 382 panic("mpath_path_attach: dev is not NULL"); 383 #endif 384 385 for (target = 0; target < MPATH_BUSWIDTH; target++) { 386 if ((d = mpath_devs[target]) == NULL) 387 continue; 388 389 if (DEVID_CMP(d->d_id, link->id) && d->d_ops == ops) 390 break; 391 392 d = NULL; 393 } 394 395 if (d == NULL) { 396 for (target = 0; target < MPATH_BUSWIDTH; target++) { 397 if (mpath_devs[target] == NULL) 398 break; 399 } 400 if (target >= MPATH_BUSWIDTH) 401 return (ENXIO); 402 403 d = malloc(sizeof(*d), M_DEVBUF, 404 M_WAITOK | M_CANFAIL | M_ZERO); 405 if (d == NULL) 406 return (ENOMEM); 407 408 mtx_init(&d->d_mtx, IPL_BIO); 409 TAILQ_INIT(&d->d_paths); 410 SIMPLEQ_INIT(&d->d_ccbs); 411 d->d_id = devid_copy(link->id); 412 d->d_ops = ops; 413 414 mpath_devs[target] = d; 415 newdev = 1; 416 } else { 417 /* 418 * instead of carrying identical values in different devid 419 * instances, delete the new one and reference the old one in 420 * the new scsi_link. 421 */ 422 devid_free(link->id); 423 link->id = devid_copy(d->d_id); 424 } 425 426 p->p_dev = d; 427 mtx_enter(&d->d_mtx); 428 if (TAILQ_EMPTY(&d->d_paths)) 429 d->d_next_path = p; 430 TAILQ_INSERT_TAIL(&d->d_paths, p, p_entry); 431 d->d_path_count++; 432 if (!SIMPLEQ_EMPTY(&d->d_ccbs)) 433 addxsh = 1; 434 mtx_leave(&d->d_mtx); 435 436 if (newdev && mpath != NULL) 437 scsi_probe_target(mpath->sc_scsibus, target); 438 else if (addxsh) 439 scsi_xsh_add(&p->p_xsh); 440 441 return (0); 442 } 443 444 int 445 mpath_path_detach(struct mpath_path *p) 446 { 447 struct mpath_dev *d = p->p_dev; 448 struct mpath_path *np = NULL; 449 450 #ifdef DIAGNOSTIC 451 if (d == NULL) 452 panic("mpath: detaching a path from a nonexistant bus"); 453 #endif 454 p->p_dev = NULL; 455 456 mtx_enter(&d->d_mtx); 457 TAILQ_REMOVE(&d->d_paths, p, p_entry); 458 if (d->d_next_path == p) 459 d->d_next_path = TAILQ_FIRST(&d->d_paths); 460 461 d->d_path_count--; 462 if (!SIMPLEQ_EMPTY(&d->d_ccbs)) 463 np = d->d_next_path; 464 mtx_leave(&d->d_mtx); 465 466 scsi_xsh_del(&p->p_xsh); 467 468 if (np != NULL) 469 scsi_xsh_add(&np->p_xsh); 470 471 return (0); 472 } 473 474 void * 475 mpath_ccb_get(void *cookie) 476 { 477 struct mpath_softc *sc = cookie; 478 479 return (pool_get(&sc->sc_ccb_pool, PR_NOWAIT)); 480 } 481 482 void 483 mpath_ccb_put(void *cookie, void *io) 484 { 485 struct mpath_softc *sc = cookie; 486 487 pool_put(&sc->sc_ccb_pool, io); 488 } 489 490 struct device * 491 mpath_bootdv(struct device *dev) 492 { 493 struct mpath_dev *d; 494 struct mpath_path *p; 495 int target; 496 497 if (mpath == NULL) 498 return (dev); 499 500 for (target = 0; target < MPATH_BUSWIDTH; target++) { 501 if ((d = mpath_devs[target]) == NULL) 502 continue; 503 504 TAILQ_FOREACH(p, &d->d_paths, p_entry) { 505 if (p->p_link->device_softc == dev) { 506 return (scsi_get_link(mpath->sc_scsibus, 507 target, 0)->device_softc); 508 } 509 } 510 } 511 512 return (dev); 513 } 514