xref: /netbsd/sys/arch/dreamcast/dev/maple/mmemcard.c (revision c4a72b64)
1 /*	$NetBSD: mmemcard.c,v 1.2 2002/12/06 16:03:52 itohy Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by ITOH Yasufumi.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/buf.h>
41 #include <sys/device.h>
42 #include <sys/disklabel.h>
43 #include <sys/disk.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/proc.h>
47 #include <sys/stat.h>
48 #include <sys/systm.h>
49 #include <sys/vnode.h>
50 #include <sys/conf.h>
51 
52 #include <dreamcast/dev/maple/maple.h>
53 #include <dreamcast/dev/maple/mapleconf.h>
54 
55 #define MMEM_MAXACCSIZE	1012	/* (255*4) - 8  =  253*32 / 8 */
56 
57 struct mmem_funcdef {	/* XXX assuming little-endian structure packing */
58 	unsigned unused	: 8,
59 		 ra	: 4,	/* number of access / read */
60 		 wa	: 4,	/* number of access / write */
61 		 bb	: 8,	/* block size / 32 - 1 */
62 		 pt	: 8;	/* number of partition - 1 */
63 };
64 
65 struct mmem_request_read_data {
66 	u_int32_t	func_code;
67 	u_int8_t	pt;
68 	u_int8_t	phase;
69 	u_int16_t	block;
70 };
71 
72 struct mmem_response_read_data {
73 	u_int32_t	func_code;	/* function code (big endian) */
74 	u_int32_t	blkno;		/* 512byte block number (big endian) */
75 	u_int8_t	data[MMEM_MAXACCSIZE];
76 };
77 
78 struct mmem_request_write_data {
79 	u_int32_t	func_code;
80 	u_int8_t	pt;
81 	u_int8_t	phase;		/* 0, 1, 2, 3: for each 128 byte */
82 	u_int16_t	block;
83 	u_int8_t	data[MMEM_MAXACCSIZE];
84 };
85 #define MMEM_SIZE_REQW(sc)	((sc)->sc_waccsz + 8)
86 
87 struct mmem_request_get_media_info {
88 	u_int32_t	func_code;
89 	u_int32_t	pt;		/* pt (1 byte) and unused 3 bytes */
90 };
91 
92 struct mmem_media_info {
93 	u_int16_t	maxblk, minblk;
94 	u_int16_t	infpos;
95 	u_int16_t	fatpos, fatsz;
96 	u_int16_t	dirpos, dirsz;
97 	u_int16_t	icon;
98 	u_int16_t	datasz;
99 	u_int16_t	rsvd[3];
100 };
101 
102 struct mmem_response_media_info {
103 	u_int32_t	func_code;	/* function code (big endian) */
104 	struct mmem_media_info info;
105 };
106 
107 struct mmem_softc {
108 	struct device	sc_dev;
109 
110 	struct device	*sc_parent;
111 	struct maple_unit *sc_unit;
112 	struct maple_devinfo *sc_devinfo;
113 
114 	enum mmem_stat {
115 		MMEM_INIT,	/* during initialization */
116 		MMEM_INIT2,	/* during initialization */
117 		MMEM_IDLE,	/* init done, not in I/O */
118 		MMEM_READ,	/* in read operation */
119 		MMEM_WRITE1,	/* in write operation (read and compare) */
120 		MMEM_WRITE2,	/* in write operation (write) */
121 		MMEM_DETACH	/* detaching */
122 	} sc_stat;
123 
124 	int		sc_npt;		/* number of partitions */
125 	int		sc_bsize;	/* block size */
126 	int		sc_wacc;	/* number of write access per block */
127 	int		sc_waccsz;	/* size of a write access */
128 	int		sc_racc;	/* number of read access per block */
129 	int		sc_raccsz;	/* size of a read access */
130 
131 	struct mmem_pt {
132 		int		pt_flags;
133 #define MMEM_PT_OK	1	/* partition is alive */
134 		struct disk	pt_dk;		/* disk(9) */
135 		struct mmem_media_info pt_info;	/* geometry per part */
136 
137 		char		pt_name[16 /* see device.h */ + 4 /* ".255" */];
138 	} *sc_pt;
139 
140 	/* write request buffer (only one is used at a time) */
141 	union {
142 		struct mmem_request_read_data req_read;
143 		struct mmem_request_write_data req_write;
144 		struct mmem_request_get_media_info req_minfo;
145 	} sc_req;
146 #define sc_reqr	sc_req.req_read
147 #define sc_reqw	sc_req.req_write
148 #define sc_reqm	sc_req.req_minfo
149 
150 	/* pending buffers */
151 	struct bufq_state sc_q;
152 
153 	/* current I/O access */
154 	struct buf	*sc_bp;
155 	int		sc_cnt;
156 	char		*sc_iobuf;
157 	int		sc_retry;
158 #define MMEM_MAXRETRY	12
159 };
160 
161 /*
162  * minor number layout (mmemdetach() depends on this layout):
163  *
164  * 19 18 17 16 15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0
165  * |---------------------| |---------------------| |---------|
166  *          unit                    part           disklabel partition
167  */
168 #define MMEM_PART(diskunit)	((diskunit) & 0xff)
169 #define MMEM_UNIT(diskunit)	((diskunit) >> 8)
170 #define MMEM_DISKMINOR(unit, part, disklabel_partition) \
171 	DISKMINOR(((unit) << 8) | (part), (disklabel_partition))
172 
173 static int	mmemmatch __P((struct device *, struct cfdata *, void *));
174 static void	mmemattach __P((struct device *, struct device *, void *));
175 static void	mmem_defaultlabel __P((struct mmem_softc *, struct mmem_pt *,
176 		    struct disklabel *));
177 static int	mmemdetach __P((struct device *, int));
178 static void	mmem_intr __P((void *, struct maple_response *, int, int));
179 static void	mmem_printerror __P((const char *, int, int, u_int32_t));
180 static void	mmemstart __P((struct mmem_softc *));
181 static void	mmemstart_bp __P((struct mmem_softc *));
182 static void	mmemstart_write2 __P((struct mmem_softc *));
183 static void	mmemdone __P((struct mmem_softc *, struct mmem_pt *, int));
184 
185 dev_type_open(mmemopen);
186 dev_type_close(mmemclose);
187 dev_type_read(mmemread);
188 dev_type_write(mmemwrite);
189 dev_type_ioctl(mmemioctl);
190 dev_type_strategy(mmemstrategy);
191 
192 const struct bdevsw mmem_bdevsw = {
193 	mmemopen, mmemclose, mmemstrategy, mmemioctl, nodump,
194 	nosize, D_DISK
195 };
196 
197 const struct cdevsw mmem_cdevsw = {
198 	mmemopen, mmemclose, mmemread, mmemwrite, mmemioctl,
199 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
200 };
201 
202 CFATTACH_DECL(mmem, sizeof(struct mmem_softc),
203     mmemmatch, mmemattach, mmemdetach, NULL);
204 
205 extern struct cfdriver mmem_cd;
206 
207 struct dkdriver mmemdkdriver = { mmemstrategy };
208 
209 static int
210 mmemmatch(parent, cf, aux)
211 	struct device *parent;
212 	struct cfdata *cf;
213 	void *aux;
214 {
215 	struct maple_attach_args *ma = aux;
216 
217 	return (ma->ma_function == MAPLE_FN_MEMCARD ? MAPLE_MATCH_FUNC : 0);
218 }
219 
220 static void
221 mmemattach(parent, self, aux)
222 	struct device *parent, *self;
223 	void *aux;
224 {
225 	struct mmem_softc *sc = (void *) self;
226 	struct maple_attach_args *ma = aux;
227 	int i;
228 	union {
229 		u_int32_t v;
230 		struct mmem_funcdef s;
231 	} funcdef;
232 
233 	sc->sc_parent = parent;
234 	sc->sc_unit = ma->ma_unit;
235 	sc->sc_devinfo = ma->ma_devinfo;
236 
237 	funcdef.v = maple_get_function_data(ma->ma_devinfo, MAPLE_FN_MEMCARD);
238 	printf(": Memory card\n");
239 	printf("%s: %d part, %d bytes/block, ",
240 	    sc->sc_dev.dv_xname,
241 	    sc->sc_npt = funcdef.s.pt + 1,
242 	    sc->sc_bsize = (funcdef.s.bb + 1)  << 5);
243 	if ((sc->sc_wacc = funcdef.s.wa) == 0)
244 		printf("no write, ");
245 	else
246 		printf("%d acc/write, ", sc->sc_wacc);
247 	if ((sc->sc_racc = funcdef.s.ra) == 0)
248 		printf("no read\n");
249 	else
250 		printf("%d acc/read\n", sc->sc_racc);
251 
252 	/*
253 	 * start init sequence
254 	 */
255 	sc->sc_stat = MMEM_INIT;
256 	bufq_alloc(&sc->sc_q, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
257 
258 	/* check consistency */
259 	if (sc->sc_wacc != 0) {
260 		sc->sc_waccsz = sc->sc_bsize / sc->sc_wacc;
261 		if (sc->sc_bsize != sc->sc_waccsz * sc->sc_wacc) {
262 			printf("%s: write access isn't equally divided\n",
263 			    sc->sc_dev.dv_xname);
264 			sc->sc_wacc = 0;	/* no write */
265 		} else if (sc->sc_waccsz > MMEM_MAXACCSIZE) {
266 			printf("%s: write access size is too large\n",
267 			    sc->sc_dev.dv_xname);
268 			sc->sc_wacc = 0;	/* no write */
269 		}
270 	}
271 	if (sc->sc_racc != 0) {
272 		sc->sc_raccsz = sc->sc_bsize / sc->sc_racc;
273 		if (sc->sc_bsize != sc->sc_raccsz * sc->sc_racc) {
274 			printf("%s: read access isn't equally divided\n",
275 			    sc->sc_dev.dv_xname);
276 			sc->sc_racc = 0;	/* no read */
277 		} else if (sc->sc_raccsz > MMEM_MAXACCSIZE) {
278 			printf("%s: read access size is too large\n",
279 			    sc->sc_dev.dv_xname);
280 			sc->sc_racc = 0;	/* no read */
281 		}
282 	}
283 	if (sc->sc_wacc == 0 && sc->sc_racc == 0) {
284 		printf("%s: device doesn't support read nor write\n",
285 		    sc->sc_dev.dv_xname);
286 		return;
287 	}
288 
289 	/* per-part structure */
290 	sc->sc_pt = malloc(sizeof(struct mmem_pt) * sc->sc_npt, M_DEVBUF,
291 	    M_WAITOK|M_ZERO);
292 
293 	for (i = 0; i < sc->sc_npt; i++) {
294 		sprintf(sc->sc_pt[i].pt_name, "%s.%d", sc->sc_dev.dv_xname, i);
295 	}
296 
297 	maple_set_callback(parent, sc->sc_unit, MAPLE_FN_MEMCARD,
298 	    mmem_intr, sc);
299 
300 	/*
301 	 * get capacity (start from partition 0)
302 	 */
303 	sc->sc_reqm.func_code = htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
304 	sc->sc_reqm.pt = 0;
305 	maple_command(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD,
306 	    MAPLE_COMMAND_GETMINFO, sizeof sc->sc_reqm / 4, &sc->sc_reqm, 0);
307 }
308 
309 static int
310 mmemdetach(self, flags)
311 	struct device *self;
312 	int flags;
313 {
314 	struct mmem_softc *sc = (struct mmem_softc *) self;
315 	struct buf *bp;
316 	int i;
317 	int minor_l, minor_h;
318 
319 	sc->sc_stat = MMEM_DETACH;	/* just in case */
320 
321 	/*
322 	 * kill pending I/O
323 	 */
324 	if ((bp = sc->sc_bp) != NULL) {
325 		bp->b_error = EIO;
326 		bp->b_flags |= B_ERROR;
327 		bp->b_resid = bp->b_bcount;
328 		biodone(bp);
329 	}
330 	while ((bp = BUFQ_GET(&sc->sc_q)) != NULL) {
331 		bp->b_error = EIO;
332 		bp->b_flags |= B_ERROR;
333 		bp->b_resid = bp->b_bcount;
334 		biodone(bp);
335 	}
336 	bufq_free(&sc->sc_q);
337 
338 	/*
339 	 * revoke vnodes
340 	 */
341 #ifdef __HAVE_OLD_DISKLABEL
342  #error This code assumes DISKUNIT() is contiguous in minor number.
343 #endif
344 	minor_l = MMEM_DISKMINOR(self->dv_unit, 0, 0);
345 	minor_h = MMEM_DISKMINOR(self->dv_unit, sc->sc_npt - 1,
346 	    MAXPARTITIONS - 1);
347 	vdevgone(bdevsw_lookup_major(&mmem_bdevsw), minor_l, minor_h, VBLK);
348 	vdevgone(cdevsw_lookup_major(&mmem_cdevsw), minor_l, minor_h, VCHR);
349 
350 	/*
351 	 * free per-partition structure
352 	 */
353 	if (sc->sc_pt) {
354 		/*
355 		 * detach disks
356 		 */
357 		for (i = 0; i < sc->sc_npt; i++) {
358 			if (sc->sc_pt[i].pt_flags & MMEM_PT_OK)
359 				disk_detach(&sc->sc_pt[i].pt_dk);
360 		}
361 		free(sc->sc_pt, M_DEVBUF);
362 	}
363 
364 	return 0;
365 }
366 
367 /* fake disklabel */
368 static void
369 mmem_defaultlabel(sc, pt, d)
370 	struct mmem_softc *sc;
371 	struct mmem_pt *pt;
372 	struct disklabel *d;
373 {
374 
375 	bzero(d, sizeof *d);
376 
377 #if 0
378 	d->d_type = DTYPE_FLOPPY;		/* XXX? */
379 #endif
380 	strncpy(d->d_typename, sc->sc_devinfo->di_product_name,
381 	    sizeof d->d_typename);
382 	strcpy(d->d_packname, "fictitious");
383 	d->d_secsize = sc->sc_bsize;
384 	d->d_ntracks = 1;			/* XXX */
385 	d->d_nsectors = d->d_secpercyl = 8;	/* XXX */
386 	d->d_secperunit = pt->pt_info.maxblk - pt->pt_info.minblk + 1;
387 	d->d_ncylinders = d->d_secperunit / d->d_secpercyl;
388 	d->d_rpm = 1;				/* when 4 acc/write */
389 
390 	d->d_npartitions = RAW_PART + 1;
391 	d->d_partitions[RAW_PART].p_size = d->d_secperunit;
392 
393 	d->d_magic = d->d_magic2 = DISKMAGIC;
394 	d->d_checksum = dkcksum(d);
395 }
396 
397 /*
398  * called back from maple bus driver
399  */
400 static void
401 mmem_intr(dev, response, sz, flags)
402 	void *dev;
403 	struct maple_response *response;
404 	int sz, flags;
405 {
406 	struct mmem_softc *sc = dev;
407 	struct mmem_response_read_data *r = (void *) response->data;
408 	struct mmem_response_media_info *rm = (void *) response->data;
409 	struct buf *bp;
410 	int part;
411 	struct mmem_pt *pt;
412 	char pbuf[9];
413 	int off;
414 
415 	switch (sc->sc_stat) {
416 	case MMEM_INIT:
417 		/* checking part geometry */
418 		part = sc->sc_reqm.pt;
419 		pt = &sc->sc_pt[part];
420 		switch ((maple_response_t) response->response_code) {
421 		case MAPLE_RESPONSE_DATATRF:
422 			pt->pt_info = rm->info;
423 			format_bytes(pbuf, sizeof(pbuf),
424 			    (u_int64_t)
425 				((pt->pt_info.maxblk - pt->pt_info.minblk + 1)
426 				 * sc->sc_bsize));
427 			printf("%s: %s, blk %d %d, inf %d, fat %d %d, dir %d %d, icon %d, data %d\n",
428 			    pt->pt_name,
429 			    pbuf,
430 			    pt->pt_info.maxblk, pt->pt_info.minblk,
431 			    pt->pt_info.infpos,
432 			    pt->pt_info.fatpos, pt->pt_info.fatsz,
433 			    pt->pt_info.dirpos, pt->pt_info.dirsz,
434 			    pt->pt_info.icon,
435 			    pt->pt_info.datasz);
436 
437 			pt->pt_dk.dk_driver = &mmemdkdriver;
438 			pt->pt_dk.dk_name = pt->pt_name;
439 			disk_attach(&pt->pt_dk);
440 
441 			mmem_defaultlabel(sc, pt, pt->pt_dk.dk_label);
442 
443 			/* this partition is active */
444 			pt->pt_flags = MMEM_PT_OK;
445 
446 			break;
447 		default:
448 			printf("%s: init: unexpected response %#x, sz %d\n",
449 			    pt->pt_name, ntohl(response->response_code), sz);
450 			break;
451 		}
452 		if (++part == sc->sc_npt) {
453 #if 1
454 			/*
455 			 * XXX Read a block and discard the contents (only to
456 			 * turn off the access indicator on Visual Memory).
457 			 */
458 			pt = &sc->sc_pt[0];
459 			sc->sc_reqr.func_code =
460 			    htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
461 			sc->sc_reqr.pt = 0;
462 			sc->sc_reqr.block = htons(pt->pt_info.minblk);
463 			sc->sc_reqr.phase = 0;
464 			maple_command(sc->sc_parent, sc->sc_unit,
465 			    MAPLE_FN_MEMCARD, MAPLE_COMMAND_BREAD,
466 			    sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
467 			sc->sc_stat = MMEM_INIT2;
468 #else
469 			sc->sc_stat = MMEM_IDLE;	/* init done */
470 #endif
471 		} else {
472 			sc->sc_reqm.pt = part;
473 			maple_command(sc->sc_parent, sc->sc_unit,
474 			    MAPLE_FN_MEMCARD, MAPLE_COMMAND_GETMINFO,
475 			    sizeof sc->sc_reqm / 4, &sc->sc_reqm, 0);
476 		}
477 		break;
478 
479 	case MMEM_INIT2:
480 		/* XXX just discard */
481 		sc->sc_stat = MMEM_IDLE;	/* init done */
482 		break;
483 
484 	case MMEM_READ:
485 		bp = sc->sc_bp;
486 
487 		switch ((maple_response_t) response->response_code) {
488 		case MAPLE_RESPONSE_DATATRF:		/* read done */
489 			off = sc->sc_raccsz * sc->sc_reqr.phase;
490 			bcopy(r->data + off, sc->sc_iobuf + off, sc->sc_raccsz);
491 
492 			if (++sc->sc_reqr.phase == sc->sc_racc) {
493 				/* all phase done */
494 				pt = &sc->sc_pt[sc->sc_reqr.pt];
495 				mmemdone(sc, pt, 0);
496 			} else {
497 				/* go next phase */
498 				maple_command(sc->sc_parent, sc->sc_unit,
499 				    MAPLE_FN_MEMCARD, MAPLE_COMMAND_BREAD,
500 				    sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
501 			}
502 			break;
503 		case MAPLE_RESPONSE_FILEERR:
504 			mmem_printerror(sc->sc_pt[sc->sc_reqr.pt].pt_name,
505 			    1, bp->b_rawblkno,
506 			    r->func_code /* XXX */);
507 			mmemstart_bp(sc);		/* retry */
508 			break;
509 		default:
510 			printf("%s: read: unexpected response %#x %#x, sz %d\n",
511 			    sc->sc_pt[sc->sc_reqr.pt].pt_name,
512 			    ntohl(response->response_code),
513 			    ntohl(r->func_code), sz);
514 			mmemstart_bp(sc);		/* retry */
515 			break;
516 		}
517 		break;
518 
519 	case MMEM_WRITE1:	/* read before write / verify after write */
520 		bp = sc->sc_bp;
521 
522 		switch ((maple_response_t) response->response_code) {
523 		case MAPLE_RESPONSE_DATATRF:		/* read done */
524 			off = sc->sc_raccsz * sc->sc_reqr.phase;
525 			if (bcmp(r->data + off, sc->sc_iobuf + off,
526 			    sc->sc_raccsz)) {
527 				/*
528 				 * data differ, start writing
529 				 */
530 				mmemstart_write2(sc);
531 			} else if (++sc->sc_reqr.phase == sc->sc_racc) {
532 				/*
533 				 * all phase done and compared equal
534 				 */
535 				pt = &sc->sc_pt[sc->sc_reqr.pt];
536 				mmemdone(sc, pt, 0);
537 			} else {
538 				/* go next phase */
539 				maple_command(sc->sc_parent, sc->sc_unit,
540 				    MAPLE_FN_MEMCARD, MAPLE_COMMAND_BREAD,
541 				    sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
542 			}
543 			break;
544 		case MAPLE_RESPONSE_FILEERR:
545 			mmem_printerror(sc->sc_pt[sc->sc_reqr.pt].pt_name,
546 			    1, bp->b_rawblkno,
547 			    r->func_code /* XXX */);
548 			mmemstart_write2(sc);	/* start writing */
549 			break;
550 		default:
551 			printf("%s: verify: unexpected response %#x %#x, sz %d\n",
552 			    sc->sc_pt[sc->sc_reqr.pt].pt_name,
553 			    ntohl(response->response_code),
554 			    ntohl(r->func_code), sz);
555 			mmemstart_write2(sc);	/* start writing */
556 			break;
557 		}
558 		break;
559 
560 	case MMEM_WRITE2:	/* write */
561 		bp = sc->sc_bp;
562 
563 		switch ((maple_response_t) response->response_code) {
564 		case MAPLE_RESPONSE_OK:			/* write done */
565 			if (sc->sc_reqw.phase == sc->sc_wacc) {
566 				/* all phase done */
567 				mmemstart_bp(sc);	/* start verify */
568 			} else if (++sc->sc_reqw.phase == sc->sc_wacc) {
569 				/* check error */
570 				maple_command(sc->sc_parent, sc->sc_unit,
571 				    MAPLE_FN_MEMCARD, MAPLE_COMMAND_GETLASTERR,
572 				    2 /* no data */ , &sc->sc_reqw,
573 				    MAPLE_FLAG_CMD_PERIODIC_TIMING);
574 			} else {
575 				/* go next phase */
576 				bcopy(sc->sc_iobuf
577 					+ sc->sc_waccsz * sc->sc_reqw.phase,
578 				    sc->sc_reqw.data, sc->sc_waccsz);
579 				maple_command(sc->sc_parent, sc->sc_unit,
580 				    MAPLE_FN_MEMCARD, MAPLE_COMMAND_BWRITE,
581 				    MMEM_SIZE_REQW(sc) / 4, &sc->sc_reqw,
582 				    MAPLE_FLAG_CMD_PERIODIC_TIMING);
583 			}
584 			break;
585 		case MAPLE_RESPONSE_FILEERR:
586 			mmem_printerror(sc->sc_pt[sc->sc_reqw.pt].pt_name,
587 			    0, bp->b_rawblkno,
588 			    r->func_code /* XXX */);
589 			mmemstart_write2(sc);	/* retry writing */
590 			break;
591 		default:
592 			printf("%s: write: unexpected response %#x, %#x, sz %d\n",
593 			    sc->sc_pt[sc->sc_reqw.pt].pt_name,
594 			    ntohl(response->response_code),
595 			    ntohl(r->func_code), sz);
596 			mmemstart_write2(sc);	/* retry writing */
597 			break;
598 		}
599 		break;
600 
601 	default:
602 		break;
603 	}
604 }
605 
606 static void
607 mmem_printerror(head, rd, blk, code)
608 	const char *head;
609 	int rd;		/* 1: read, 0: write */
610 	int blk;
611 	u_int32_t code;
612 {
613 
614 	printf("%s: error %sing blk %d:", head, rd? "read" : "writ", blk);
615 	NTOHL(code);
616 	if (code & 1)
617 		printf(" PT error");
618 	if (code & 2)
619 		printf(" Phase error");
620 	if (code & 4)
621 		printf(" Block error");
622 	if (code & 010)
623 		printf(" Write error");
624 	if (code & 020)
625 		printf(" Length error");
626 	if (code & 040)
627 		printf(" CRC error");
628 	if (code & ~077)
629 		printf(" Unknown error %#x", code & ~077);
630 	printf("\n");
631 }
632 
633 int
634 mmemopen(dev, flags, devtype, p)
635 	dev_t dev;
636 	int flags, devtype;
637 	struct proc *p;
638 {
639 	int diskunit, unit, part, labelpart;
640 	struct mmem_softc *sc;
641 	struct mmem_pt *pt;
642 
643 	diskunit = DISKUNIT(dev);
644 	unit = MMEM_UNIT(diskunit);
645 	part = MMEM_PART(diskunit);
646 	labelpart = DISKPART(dev);
647 	if ((sc = device_lookup(&mmem_cd, unit)) == NULL
648 	    || sc->sc_stat == MMEM_INIT
649 	    || sc->sc_stat == MMEM_INIT2
650 	    || part >= sc->sc_npt || (pt = &sc->sc_pt[part])->pt_flags == 0)
651 		return ENXIO;
652 
653 	switch (devtype) {
654 	case S_IFCHR:
655 		pt->pt_dk.dk_copenmask |= (1 << labelpart);
656 		break;
657 	case S_IFBLK:
658 		pt->pt_dk.dk_bopenmask |= (1 << labelpart);
659 		break;
660 	}
661 
662 	return 0;
663 }
664 
665 int
666 mmemclose(dev, flags, devtype, p)
667 	dev_t dev;
668 	int flags, devtype;
669 	struct proc *p;
670 {
671 	int diskunit, unit, part, labelpart;
672 	struct mmem_softc *sc;
673 	struct mmem_pt *pt;
674 
675 	diskunit = DISKUNIT(dev);
676 	unit = MMEM_UNIT(diskunit);
677 	part = MMEM_PART(diskunit);
678 	sc = mmem_cd.cd_devs[unit];
679 	pt = &sc->sc_pt[part];
680 	labelpart = DISKPART(dev);
681 
682 	switch (devtype) {
683 	case S_IFCHR:
684 		pt->pt_dk.dk_copenmask &= ~(1 << labelpart);
685 		break;
686 	case S_IFBLK:
687 		pt->pt_dk.dk_bopenmask &= ~(1 << labelpart);
688 		break;
689 	}
690 
691 	return 0;
692 }
693 
694 void
695 mmemstrategy(bp)
696 	struct buf *bp;
697 {
698 	int diskunit, unit, part, labelpart;
699 	struct mmem_softc *sc;
700 	struct mmem_pt *pt;
701 	daddr_t off, nblk, cnt;
702 
703 	diskunit = DISKUNIT(bp->b_dev);
704 	unit = MMEM_UNIT(diskunit);
705 	part = MMEM_PART(diskunit);
706 	if ((sc = device_lookup(&mmem_cd, unit)) == NULL
707 	    || sc->sc_stat == MMEM_INIT
708 	    || sc->sc_stat == MMEM_INIT2
709 	    || part >= sc->sc_npt || (pt = &sc->sc_pt[part])->pt_flags == 0)
710 		goto inval;
711 
712 #if 0
713 	printf("%s: mmemstrategy: blkno %d, count %ld\n",
714 	    pt->pt_name, bp->b_blkno, bp->b_bcount);
715 #endif
716 
717 	if (bp->b_flags & B_READ) {
718 		if (sc->sc_racc == 0)
719 			goto inval;		/* no read */
720 	} else if (sc->sc_wacc == 0) {
721 		bp->b_error = EROFS;		/* no write */
722 		goto bad;
723 	}
724 
725 	if (bp->b_blkno & ~(~(daddr_t)0 >> (DEV_BSHIFT + 1 /* sign bit */))
726 	    || (bp->b_bcount % sc->sc_bsize) != 0)
727 		goto inval;
728 
729 	cnt = howmany(bp->b_bcount, sc->sc_bsize);
730 	if (cnt == 0)
731 		goto done;	/* no work */
732 
733 	off = bp->b_blkno * DEV_BSIZE / sc->sc_bsize;
734 
735 	/* offset to disklabel partition */
736 	labelpart = DISKPART(bp->b_dev);
737 	if (labelpart == RAW_PART) {
738 		nblk = pt->pt_info.maxblk - pt->pt_info.minblk + 1;
739 	} else {
740 		off +=
741 		    nblk = pt->pt_dk.dk_label->d_partitions[labelpart].p_offset;
742 		nblk += pt->pt_dk.dk_label->d_partitions[labelpart].p_size;
743 	}
744 
745 	/* deal with the EOF condition */
746 	if (off + cnt > nblk) {
747 		if (off >= nblk) {
748 			if (off == nblk)
749 				goto done;
750 			goto inval;
751 		}
752 		cnt = nblk - off;
753 		bp->b_resid = bp->b_bcount - (cnt * sc->sc_bsize);
754 	}
755 
756 	bp->b_rawblkno = off;
757 
758 	/* queue this transfer */
759 	BUFQ_PUT(&sc->sc_q, bp);
760 
761 	if (sc->sc_stat == MMEM_IDLE)
762 		mmemstart(sc);
763 
764 	return;
765 
766 inval:	bp->b_error = EINVAL;
767 bad:	bp->b_flags |= B_ERROR;
768 done:	bp->b_resid = bp->b_bcount;
769 	biodone(bp);
770 }
771 
772 /*
773  * start I/O operations
774  */
775 static void
776 mmemstart(sc)
777 	struct mmem_softc *sc;
778 {
779 	struct buf *bp;
780 	struct mmem_pt *pt;
781 	int s;
782 
783 	if ((bp = BUFQ_GET(&sc->sc_q)) == NULL) {
784 		sc->sc_stat = MMEM_IDLE;
785 		maple_enable_unit_ping(sc->sc_parent, sc->sc_unit,
786 		    MAPLE_FN_MEMCARD, 1);
787 		return;
788 	}
789 
790 	sc->sc_bp = bp;
791 	sc->sc_cnt = howmany(bp->b_bcount - bp->b_resid, sc->sc_bsize);
792 	KASSERT(sc->sc_cnt);
793 	sc->sc_iobuf = bp->b_data;
794 	sc->sc_retry = 0;
795 
796 	pt = &sc->sc_pt[MMEM_PART(DISKUNIT(bp->b_dev))];
797 	s = splbio();
798 	disk_busy(&pt->pt_dk);
799 	splx(s);
800 
801 	/*
802 	 * I/O access will fail if the removal detection (by maple driver)
803 	 * occurs before finishing the I/O, so disable it.
804 	 * We are sending commands, and the removal detection is still alive.
805 	 */
806 	maple_enable_unit_ping(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD, 0);
807 
808 	mmemstart_bp(sc);
809 }
810 
811 /*
812  * start/retry a specified I/O operation
813  */
814 static void
815 mmemstart_bp(sc)
816 	struct mmem_softc *sc;
817 {
818 	struct buf *bp;
819 	int diskunit, part;
820 	struct mmem_pt *pt;
821 
822 	bp = sc->sc_bp;
823 	diskunit = DISKUNIT(bp->b_dev);
824 	part = MMEM_PART(diskunit);
825 	pt = &sc->sc_pt[part];
826 
827 	/* handle retry */
828 	if (sc->sc_retry++ > MMEM_MAXRETRY) {
829 		/* retry count exceeded */
830 		mmemdone(sc, pt, EIO);
831 		return;
832 	}
833 
834 	/*
835 	 * Start the first phase (phase# = 0).
836 	 */
837 	/* start read */
838 	sc->sc_stat = (bp->b_flags & B_READ) ? MMEM_READ : MMEM_WRITE1;
839 	sc->sc_reqr.func_code = htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
840 	sc->sc_reqr.pt = part;
841 	sc->sc_reqr.block = htons(bp->b_rawblkno);
842 	sc->sc_reqr.phase = 0;		/* first phase */
843 	maple_command(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD,
844 	    MAPLE_COMMAND_BREAD, sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
845 }
846 
847 static void
848 mmemstart_write2(sc)
849 	struct mmem_softc *sc;
850 {
851 	struct buf *bp;
852 	int diskunit, part;
853 	struct mmem_pt *pt;
854 
855 	bp = sc->sc_bp;
856 	diskunit = DISKUNIT(bp->b_dev);
857 	part = MMEM_PART(diskunit);
858 	pt = &sc->sc_pt[part];
859 
860 	/* handle retry */
861 	if (sc->sc_retry++ > MMEM_MAXRETRY - 2 /* spare for verify read */) {
862 		/* retry count exceeded */
863 		mmemdone(sc, pt, EIO);
864 		return;
865 	}
866 
867 	/*
868 	 * Start the first phase (phase# = 0).
869 	 */
870 	/* start write */
871 	sc->sc_stat = MMEM_WRITE2;
872 	sc->sc_reqw.func_code = htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
873 	sc->sc_reqw.pt = part;
874 	sc->sc_reqw.block = htons(bp->b_rawblkno);
875 	sc->sc_reqw.phase = 0;		/* first phase */
876 	bcopy(sc->sc_iobuf /* + sc->sc_waccsz * phase */,
877 	    sc->sc_reqw.data, sc->sc_waccsz);
878 	maple_command(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD,
879 	    MAPLE_COMMAND_BWRITE, MMEM_SIZE_REQW(sc) / 4, &sc->sc_reqw,
880 	    MAPLE_FLAG_CMD_PERIODIC_TIMING);
881 }
882 
883 static void
884 mmemdone(sc, pt, err)
885 	struct mmem_softc *sc;
886 	struct mmem_pt *pt;
887 	int err;
888 {
889 	struct buf *bp = sc->sc_bp;
890 	int s;
891 	int bcnt;
892 
893 	KASSERT(bp);
894 
895 	if (err) {
896 		bcnt = sc->sc_iobuf - bp->b_data;
897 		bp->b_resid = bp->b_bcount - bcnt;
898 
899 		/* raise error if no block is read */
900 		if (bcnt == 0) {
901 			bp->b_error = err;
902 			bp->b_flags |= B_ERROR;
903 		}
904 		goto term_xfer;
905 	}
906 
907 	sc->sc_iobuf += sc->sc_bsize;
908 	if (--sc->sc_cnt == 0) {
909 	term_xfer:
910 		/* terminate current transfer */
911 		sc->sc_bp = NULL;
912 		s = splbio();
913 		disk_unbusy(&pt->pt_dk, sc->sc_iobuf - bp->b_data,
914 		    sc->sc_stat == MMEM_READ);
915 		biodone(bp);
916 		splx(s);
917 
918 		/* go next transfer */
919 		mmemstart(sc);
920 	} else {
921 		/* go next block */
922 		bp->b_rawblkno++;
923 		sc->sc_retry = 0;
924 		mmemstart_bp(sc);
925 	}
926 }
927 
928 int
929 mmemread(dev, uio, flags)
930 	dev_t	dev;
931 	struct	uio *uio;
932 	int	flags;
933 {
934 
935 	return (physio(mmemstrategy, NULL, dev, B_READ, minphys, uio));
936 }
937 
938 int
939 mmemwrite(dev, uio, flags)
940 	dev_t	dev;
941 	struct	uio *uio;
942 	int	flags;
943 {
944 
945 	return (physio(mmemstrategy, NULL, dev, B_WRITE, minphys, uio));
946 }
947 
948 int
949 mmemioctl(dev, cmd, data, flag, p)
950 	dev_t dev;
951 	u_long cmd;
952 	caddr_t data;
953 	int flag;
954 	struct proc *p;
955 {
956 	int diskunit, unit, part;
957 	struct mmem_softc *sc;
958 	struct mmem_pt *pt;
959 
960 	diskunit = DISKUNIT(dev);
961 	unit = MMEM_UNIT(diskunit);
962 	part = MMEM_PART(diskunit);
963 	sc = mmem_cd.cd_devs[unit];
964 	pt = &sc->sc_pt[part];
965 
966 	switch (cmd) {
967 	case DIOCGDINFO:
968 		*(struct disklabel *)data = *pt->pt_dk.dk_label; /* XXX */
969 		break;
970 
971 	default:
972 		/* generic maple ioctl */
973 		return maple_unit_ioctl(sc->sc_parent, sc->sc_unit, cmd, data,
974 		    flag, p);
975 	}
976 
977 	return 0;
978 }
979