xref: /freebsd/sys/dev/xen/blkfront/block.h (revision 681ce946)
1 /*
2  * XenBSD block device driver
3  *
4  * Copyright (c) 2010-2013 Spectra Logic Corporation
5  * Copyright (c) 2009 Scott Long, Yahoo!
6  * Copyright (c) 2009 Frank Suchomel, Citrix
7  * Copyright (c) 2009 Doug F. Rabson, Citrix
8  * Copyright (c) 2005 Kip Macy
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
11  *
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this software and associated documentation files (the "Software"), to
15  * deal in the Software without restriction, including without limitation the
16  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17  * sell copies of the Software, and to permit persons to whom the Software is
18  * furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28  * DEALINGS IN THE SOFTWARE.
29  *
30  * $FreeBSD$
31  */
32 
33 #ifndef __XEN_BLKFRONT_BLOCK_H__
34 #define __XEN_BLKFRONT_BLOCK_H__
35 #include <xen/blkif.h>
36 
37 /**
38  * Given a number of blkif segments, compute the maximum I/O size supported.
39  *
40  * \note This calculation assumes that all but the first and last segments
41  *       of the I/O are fully utilized.
42  *
43  * \note We reserve a segment from the maximum supported by the transport to
44  *       guarantee we can handle an unaligned transfer without the need to
45  *       use a bounce buffer.
46  */
47 #define	XBD_SEGS_TO_SIZE(segs)						\
48 	(((segs) - 1) * PAGE_SIZE)
49 
50 /**
51  * Compute the maximum number of blkif segments requried to represent
52  * an I/O of the given size.
53  *
54  * \note This calculation assumes that all but the first and last segments
55  *       of the I/O are fully utilized.
56  *
57  * \note We reserve a segment to guarantee we can handle an unaligned
58  *       transfer without the need to use a bounce buffer.
59  */
60 #define	XBD_SIZE_TO_SEGS(size)						\
61 	((size / PAGE_SIZE) + 1)
62 
63 /**
64  * The maximum number of shared memory ring pages we will allow in a
65  * negotiated block-front/back communication channel.  Allow enough
66  * ring space for all requests to be  XBD_MAX_REQUEST_SIZE'd.
67  */
68 #define XBD_MAX_RING_PAGES		32
69 
70 /**
71  * The maximum number of outstanding requests we will allow in a negotiated
72  * block-front/back communication channel.
73  */
74 #define XBD_MAX_REQUESTS						\
75 	__CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES)
76 
77 /**
78  * The maximum number of blkif segments which can be provided per indirect
79  * page in an indirect request.
80  */
81 #define XBD_MAX_SEGMENTS_PER_PAGE					\
82 	(PAGE_SIZE / sizeof(struct blkif_request_segment))
83 
84 /**
85  * The maximum number of blkif segments which can be provided in an indirect
86  * request.
87  */
88 #define XBD_MAX_INDIRECT_SEGMENTS					\
89 	(BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST * XBD_MAX_SEGMENTS_PER_PAGE)
90 
91 /**
92  * Compute the number of indirect segment pages required for an I/O with the
93  * specified number of indirect segments.
94  */
95 #define XBD_INDIRECT_SEGS_TO_PAGES(segs)				\
96 	((segs + XBD_MAX_SEGMENTS_PER_PAGE - 1) / XBD_MAX_SEGMENTS_PER_PAGE)
97 
98 typedef enum {
99 	XBDCF_Q_MASK		= 0xFF,
100 	/* This command has contributed to xbd_qfrozen_cnt. */
101 	XBDCF_FROZEN		= 1<<8,
102 	/* Freeze the command queue on dispatch (i.e. single step command). */
103 	XBDCF_Q_FREEZE		= 1<<9,
104 	/* Bus DMA returned EINPROGRESS for this command. */
105 	XBDCF_ASYNC_MAPPING	= 1<<10,
106 	XBDCF_INITIALIZER	= XBDCF_Q_MASK
107 } xbdc_flag_t;
108 
109 struct xbd_command;
110 typedef void xbd_cbcf_t(struct xbd_command *);
111 
112 struct xbd_command {
113 	TAILQ_ENTRY(xbd_command) cm_link;
114 	struct xbd_softc	*cm_sc;
115 	xbdc_flag_t		 cm_flags;
116 	bus_dmamap_t		 cm_map;
117 	uint64_t		 cm_id;
118 	grant_ref_t		*cm_sg_refs;
119 	struct bio		*cm_bp;
120 	grant_ref_t		 cm_gref_head;
121 	void			*cm_data;
122 	size_t			 cm_datalen;
123 	u_int			 cm_nseg;
124 	int			 cm_operation;
125 	blkif_sector_t		 cm_sector_number;
126 	int			 cm_status;
127 	xbd_cbcf_t		*cm_complete;
128 	void			*cm_indirectionpages;
129 	grant_ref_t		 cm_indirectionrefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
130 };
131 
132 typedef enum {
133 	XBD_Q_FREE,
134 	XBD_Q_READY,
135 	XBD_Q_BUSY,
136 	XBD_Q_COMPLETE,
137 	XBD_Q_BIO,
138 	XBD_Q_COUNT,
139 	XBD_Q_NONE = XBDCF_Q_MASK
140 } xbd_q_index_t;
141 
142 typedef struct xbd_cm_q {
143 	TAILQ_HEAD(, xbd_command) q_tailq;
144 	uint32_t		  q_length;
145 	uint32_t		  q_max;
146 } xbd_cm_q_t;
147 
148 typedef enum {
149 	XBD_STATE_DISCONNECTED,
150 	XBD_STATE_CONNECTED,
151 	XBD_STATE_SUSPENDED
152 } xbd_state_t;
153 
154 typedef enum {
155 	XBDF_NONE	  = 0,
156 	XBDF_OPEN	  = 1 << 0, /* drive is open (can't shut down) */
157 	XBDF_BARRIER	  = 1 << 1, /* backend supports barriers */
158 	XBDF_FLUSH	  = 1 << 2, /* backend supports flush */
159 	XBDF_READY	  = 1 << 3, /* Is ready */
160 	XBDF_CM_SHORTAGE  = 1 << 4, /* Free cm resource shortage active. */
161 	XBDF_GNT_SHORTAGE = 1 << 5, /* Grant ref resource shortage active */
162 	XBDF_WAIT_IDLE	  = 1 << 6,  /*
163 				     * No new work until outstanding work
164 				     * completes.
165 				     */
166 	XBDF_DISCARD	  = 1 << 7, /* backend supports discard */
167 	XBDF_PERSISTENT	  = 1 << 8  /* backend supports persistent grants */
168 } xbd_flag_t;
169 
170 /*
171  * We have one of these per vbd, whether ide, scsi or 'other'.
172  */
173 struct xbd_softc {
174 	device_t			 xbd_dev;
175 	struct disk			*xbd_disk;	/* disk params */
176 	struct bio_queue_head 		 xbd_bioq;	/* sort queue */
177 	int				 xbd_unit;
178 	xbd_flag_t			 xbd_flags;
179 	int				 xbd_qfrozen_cnt;
180 	int				 xbd_vdevice;
181 	xbd_state_t			 xbd_state;
182 	u_int				 xbd_ring_pages;
183 	uint32_t			 xbd_max_requests;
184 	uint32_t			 xbd_max_request_segments;
185 	uint32_t			 xbd_max_request_size;
186 	uint32_t			 xbd_max_request_indirectpages;
187 	grant_ref_t			 xbd_ring_ref[XBD_MAX_RING_PAGES];
188 	blkif_front_ring_t		 xbd_ring;
189 	xen_intr_handle_t		 xen_intr_handle;
190 	struct gnttab_free_callback	 xbd_callback;
191 	xbd_cm_q_t			 xbd_cm_q[XBD_Q_COUNT];
192 	bus_dma_tag_t			 xbd_io_dmat;
193 
194 	/**
195 	 * The number of people holding this device open.  We won't allow a
196 	 * hot-unplug unless this is 0.
197 	 */
198 	int				 xbd_users;
199 	struct mtx			 xbd_io_lock;
200 
201 	struct xbd_command		*xbd_shadow;
202 };
203 
204 int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device,
205 			uint16_t vdisk_info, unsigned long sector_size,
206 			unsigned long phys_sector_size);
207 
208 static inline void
209 xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index)
210 {
211 	struct xbd_cm_q *cmq;
212 
213 	cmq = &sc->xbd_cm_q[index];
214 	cmq->q_length++;
215 	if (cmq->q_length > cmq->q_max)
216 		cmq->q_max = cmq->q_length;
217 }
218 
219 static inline void
220 xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index)
221 {
222 	sc->xbd_cm_q[index].q_length--;
223 }
224 
225 static inline uint32_t
226 xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index)
227 {
228 	return (sc->xbd_cm_q[index].q_length);
229 }
230 
231 static inline void
232 xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index)
233 {
234 	struct xbd_cm_q *cmq;
235 
236 	cmq = &sc->xbd_cm_q[index];
237 	TAILQ_INIT(&cmq->q_tailq);
238 	cmq->q_length = 0;
239 	cmq->q_max = 0;
240 }
241 
242 static inline void
243 xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index)
244 {
245 	KASSERT(index != XBD_Q_BIO,
246 	    ("%s: Commands cannot access the bio queue.", __func__));
247 	if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
248 		panic("%s: command %p is already on queue %d.",
249 		    __func__, cm, cm->cm_flags & XBDCF_Q_MASK);
250 	TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
251 	cm->cm_flags &= ~XBDCF_Q_MASK;
252 	cm->cm_flags |= index;
253 	xbd_added_qentry(cm->cm_sc, index);
254 }
255 
256 static inline void
257 xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index)
258 {
259 	KASSERT(index != XBD_Q_BIO,
260 	    ("%s: Commands cannot access the bio queue.", __func__));
261 	if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
262 		panic("%s: command %p is already on queue %d.",
263 		    __func__, cm, cm->cm_flags & XBDCF_Q_MASK);
264 	TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
265 	cm->cm_flags &= ~XBDCF_Q_MASK;
266 	cm->cm_flags |= index;
267 	xbd_added_qentry(cm->cm_sc, index);
268 }
269 
270 static inline struct xbd_command *
271 xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index)
272 {
273 	struct xbd_command *cm;
274 
275 	KASSERT(index != XBD_Q_BIO,
276 	    ("%s: Commands cannot access the bio queue.", __func__));
277 
278 	if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) {
279 		if ((cm->cm_flags & XBDCF_Q_MASK) != index) {
280 			panic("%s: command %p is on queue %d, "
281 			    "not specified queue %d",
282 			    __func__, cm,
283 			    cm->cm_flags & XBDCF_Q_MASK,
284 			    index);
285 		}
286 		TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link);
287 		cm->cm_flags &= ~XBDCF_Q_MASK;
288 		cm->cm_flags |= XBD_Q_NONE;
289 		xbd_removed_qentry(cm->cm_sc, index);
290 	}
291 	return (cm);
292 }
293 
294 static inline void
295 xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index)
296 {
297 	xbd_q_index_t index;
298 
299 	index = cm->cm_flags & XBDCF_Q_MASK;
300 
301 	KASSERT(index != XBD_Q_BIO,
302 	    ("%s: Commands cannot access the bio queue.", __func__));
303 
304 	if (index != expected_index) {
305 		panic("%s: command %p is on queue %d, not specified queue %d",
306 		    __func__, cm, index, expected_index);
307 	}
308 	TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
309 	cm->cm_flags &= ~XBDCF_Q_MASK;
310 	cm->cm_flags |= XBD_Q_NONE;
311 	xbd_removed_qentry(cm->cm_sc, index);
312 }
313 
314 static inline void
315 xbd_initq_bio(struct xbd_softc *sc)
316 {
317 	bioq_init(&sc->xbd_bioq);
318 }
319 
320 static inline void
321 xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp)
322 {
323 	bioq_insert_tail(&sc->xbd_bioq, bp);
324 	xbd_added_qentry(sc, XBD_Q_BIO);
325 }
326 
327 static inline void
328 xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp)
329 {
330 	bioq_insert_head(&sc->xbd_bioq, bp);
331 	xbd_added_qentry(sc, XBD_Q_BIO);
332 }
333 
334 static inline struct bio *
335 xbd_dequeue_bio(struct xbd_softc *sc)
336 {
337 	struct bio *bp;
338 
339 	if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) {
340 		bioq_remove(&sc->xbd_bioq, bp);
341 		xbd_removed_qentry(sc, XBD_Q_BIO);
342 	}
343 	return (bp);
344 }
345 
346 static inline void
347 xbd_initqs(struct xbd_softc *sc)
348 {
349 	u_int index;
350 
351 	for (index = 0; index < XBD_Q_COUNT; index++)
352 		xbd_initq_cm(sc, index);
353 
354 	xbd_initq_bio(sc);
355 }
356 
357 #endif /* __XEN_BLKFRONT_BLOCK_H__ */
358