1 /****************************************************************************** 2 * blkif.h 3 * 4 * Unified block-device I/O interface for Xen guest OSes. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Copyright (c) 2003-2004, Keir Fraser 25 */ 26 27 #ifndef __XEN_PUBLIC_IO_BLKIF_H__ 28 #define __XEN_PUBLIC_IO_BLKIF_H__ 29 30 #include "ring.h" 31 #include "../grant_table.h" 32 33 /* 34 * Front->back notifications: When enqueuing a new request, sending a 35 * notification can be made conditional on req_event (i.e., the generic 36 * hold-off mechanism provided by the ring macros). Backends must set 37 * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). 38 * 39 * Back->front notifications: When enqueuing a new response, sending a 40 * notification can be made conditional on rsp_event (i.e., the generic 41 * hold-off mechanism provided by the ring macros). Frontends must set 42 * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). 43 */ 44 45 #ifndef blkif_vdev_t 46 #define blkif_vdev_t uint16_t 47 #endif 48 #define blkif_sector_t uint64_t 49 50 /* 51 * REQUEST CODES. 52 */ 53 #define BLKIF_OP_READ 0 54 #define BLKIF_OP_WRITE 1 55 /* 56 * Recognised only if "feature-barrier" is present in backend xenbus info. 57 * The "feature-barrier" node contains a boolean indicating whether barrier 58 * requests are likely to succeed or fail. Either way, a barrier request 59 * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by 60 * the underlying block-device hardware. The boolean simply indicates whether 61 * or not it is worthwhile for the frontend to attempt barrier requests. 62 * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* 63 * create the "feature-barrier" node! 64 */ 65 #define BLKIF_OP_WRITE_BARRIER 2 66 /* 67 * Recognised if "feature-flush-cache" is present in backend xenbus 68 * info. A flush will ask the underlying storage hardware to flush its 69 * non-volatile caches as appropriate. The "feature-flush-cache" node 70 * contains a boolean indicating whether flush requests are likely to 71 * succeed or fail. Either way, a flush request may fail at any time 72 * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying 73 * block-device hardware. The boolean simply indicates whether or not it 74 * is worthwhile for the frontend to attempt flushes. If a backend does 75 * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the 76 * "feature-flush-cache" node! 77 */ 78 #define BLKIF_OP_FLUSH_DISKCACHE 3 79 80 /* 81 * Maximum scatter/gather segments per request. 82 * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. 83 * NB. This could be 12 if the ring indexes weren't stored in the same page. 84 */ 85 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 86 87 struct blkif_request_segment { 88 grant_ref_t gref; /* reference to I/O buffer frame */ 89 /* @first_sect: first sector in frame to transfer (inclusive). */ 90 /* @last_sect: last sector in frame to transfer (inclusive). */ 91 uint8_t first_sect, last_sect; 92 }; 93 94 struct blkif_request { 95 uint8_t operation; /* BLKIF_OP_??? */ 96 uint8_t nr_segments; /* number of segments */ 97 blkif_vdev_t handle; /* only for read/write requests */ 98 uint64_t id; /* private guest value, echoed in resp */ 99 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 100 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 101 }; 102 typedef struct blkif_request blkif_request_t; 103 104 struct blkif_response { 105 uint64_t id; /* copied from request */ 106 uint8_t operation; /* copied from request */ 107 int16_t status; /* BLKIF_RSP_??? */ 108 }; 109 typedef struct blkif_response blkif_response_t; 110 111 /* 112 * STATUS RETURN CODES. 113 */ 114 /* Operation not supported (only happens on barrier writes). */ 115 #define BLKIF_RSP_EOPNOTSUPP -2 116 /* Operation failed for some unspecified reason (-EIO). */ 117 #define BLKIF_RSP_ERROR -1 118 /* Operation completed successfully. */ 119 #define BLKIF_RSP_OKAY 0 120 121 /* 122 * Generate blkif ring structures and types. 123 */ 124 125 DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); 126 127 #define VDISK_CDROM 0x1 128 #define VDISK_REMOVABLE 0x2 129 #define VDISK_READONLY 0x4 130 131 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ 132 133 /* 134 * Local variables: 135 * mode: C 136 * c-set-style: "BSD" 137 * c-basic-offset: 4 138 * tab-width: 4 139 * indent-tabs-mode: nil 140 * End: 141 */ 142