1c1bb86cdSEric Blake /* 2c1bb86cdSEric Blake * Block driver for RAW files (posix) 3c1bb86cdSEric Blake * 4c1bb86cdSEric Blake * Copyright (c) 2006 Fabrice Bellard 5c1bb86cdSEric Blake * 6c1bb86cdSEric Blake * Permission is hereby granted, free of charge, to any person obtaining a copy 7c1bb86cdSEric Blake * of this software and associated documentation files (the "Software"), to deal 8c1bb86cdSEric Blake * in the Software without restriction, including without limitation the rights 9c1bb86cdSEric Blake * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10c1bb86cdSEric Blake * copies of the Software, and to permit persons to whom the Software is 11c1bb86cdSEric Blake * furnished to do so, subject to the following conditions: 12c1bb86cdSEric Blake * 13c1bb86cdSEric Blake * The above copyright notice and this permission notice shall be included in 14c1bb86cdSEric Blake * all copies or substantial portions of the Software. 15c1bb86cdSEric Blake * 16c1bb86cdSEric Blake * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17c1bb86cdSEric Blake * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18c1bb86cdSEric Blake * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19c1bb86cdSEric Blake * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20c1bb86cdSEric Blake * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21c1bb86cdSEric Blake * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22c1bb86cdSEric Blake * THE SOFTWARE. 23c1bb86cdSEric Blake */ 24922a01a0SMarkus Armbruster 25c1bb86cdSEric Blake #include "qemu/osdep.h" 26c1bb86cdSEric Blake #include "qapi/error.h" 27c1bb86cdSEric Blake #include "qemu/cutils.h" 28c1bb86cdSEric Blake #include "qemu/error-report.h" 29e2c1c34fSMarkus Armbruster #include "block/block-io.h" 30c1bb86cdSEric Blake #include "block/block_int.h" 31c1bb86cdSEric Blake #include "qemu/module.h" 32922a01a0SMarkus Armbruster #include "qemu/option.h" 33ffa244c8SKevin Wolf #include "qemu/units.h" 345df022cfSPeter Maydell #include "qemu/memalign.h" 35c1bb86cdSEric Blake #include "trace.h" 36c1bb86cdSEric Blake #include "block/thread-pool.h" 37c1bb86cdSEric Blake #include "qemu/iov.h" 38c1bb86cdSEric Blake #include "block/raw-aio.h" 39452fcdbcSMarkus Armbruster #include "qapi/qmp/qdict.h" 40c1bb86cdSEric Blake #include "qapi/qmp/qstring.h" 41c1bb86cdSEric Blake 427c9e5276SPaolo Bonzini #include "scsi/pr-manager.h" 437c9e5276SPaolo Bonzini #include "scsi/constants.h" 447c9e5276SPaolo Bonzini 45c1bb86cdSEric Blake #if defined(__APPLE__) && (__MACH__) 4614176c8dSJoelle van Dyne #include <sys/ioctl.h> 4714176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE) 48c1bb86cdSEric Blake #include <paths.h> 49c1bb86cdSEric Blake #include <sys/param.h> 500dfc7af2SAkihiko Odaki #include <sys/mount.h> 51c1bb86cdSEric Blake #include <IOKit/IOKitLib.h> 52c1bb86cdSEric Blake #include <IOKit/IOBSD.h> 53c1bb86cdSEric Blake #include <IOKit/storage/IOMediaBSDClient.h> 54c1bb86cdSEric Blake #include <IOKit/storage/IOMedia.h> 55c1bb86cdSEric Blake #include <IOKit/storage/IOCDMedia.h> 56c1bb86cdSEric Blake //#include <IOKit/storage/IOCDTypes.h> 57c1bb86cdSEric Blake #include <IOKit/storage/IODVDMedia.h> 58c1bb86cdSEric Blake #include <CoreFoundation/CoreFoundation.h> 5914176c8dSJoelle van Dyne #endif /* defined(HAVE_HOST_BLOCK_DEVICE) */ 60c1bb86cdSEric Blake #endif 61c1bb86cdSEric Blake 62c1bb86cdSEric Blake #ifdef __sun__ 63c1bb86cdSEric Blake #define _POSIX_PTHREAD_SEMANTICS 1 64c1bb86cdSEric Blake #include <sys/dkio.h> 65c1bb86cdSEric Blake #endif 66c1bb86cdSEric Blake #ifdef __linux__ 67c1bb86cdSEric Blake #include <sys/ioctl.h> 68c1bb86cdSEric Blake #include <sys/param.h> 691efad060SFam Zheng #include <sys/syscall.h> 705edc8557SKevin Wolf #include <sys/vfs.h> 716d43eaa3SSam Li #if defined(CONFIG_BLKZONED) 726d43eaa3SSam Li #include <linux/blkzoned.h> 736d43eaa3SSam Li #endif 74c1bb86cdSEric Blake #include <linux/cdrom.h> 75c1bb86cdSEric Blake #include <linux/fd.h> 76c1bb86cdSEric Blake #include <linux/fs.h> 77c1bb86cdSEric Blake #include <linux/hdreg.h> 785edc8557SKevin Wolf #include <linux/magic.h> 79c1bb86cdSEric Blake #include <scsi/sg.h> 80c1bb86cdSEric Blake #ifdef __s390__ 81c1bb86cdSEric Blake #include <asm/dasd.h> 82c1bb86cdSEric Blake #endif 83c1bb86cdSEric Blake #ifndef FS_NOCOW_FL 84c1bb86cdSEric Blake #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ 85c1bb86cdSEric Blake #endif 86c1bb86cdSEric Blake #endif 87c1bb86cdSEric Blake #if defined(CONFIG_FALLOCATE_PUNCH_HOLE) || defined(CONFIG_FALLOCATE_ZERO_RANGE) 88c1bb86cdSEric Blake #include <linux/falloc.h> 89c1bb86cdSEric Blake #endif 90c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 91c1bb86cdSEric Blake #include <sys/disk.h> 92c1bb86cdSEric Blake #include <sys/cdio.h> 93c1bb86cdSEric Blake #endif 94c1bb86cdSEric Blake 95c1bb86cdSEric Blake #ifdef __OpenBSD__ 96c1bb86cdSEric Blake #include <sys/ioctl.h> 97c1bb86cdSEric Blake #include <sys/disklabel.h> 98c1bb86cdSEric Blake #include <sys/dkio.h> 99c1bb86cdSEric Blake #endif 100c1bb86cdSEric Blake 101c1bb86cdSEric Blake #ifdef __NetBSD__ 102c1bb86cdSEric Blake #include <sys/ioctl.h> 103c1bb86cdSEric Blake #include <sys/disklabel.h> 104c1bb86cdSEric Blake #include <sys/dkio.h> 105c1bb86cdSEric Blake #include <sys/disk.h> 106c1bb86cdSEric Blake #endif 107c1bb86cdSEric Blake 108c1bb86cdSEric Blake #ifdef __DragonFly__ 109c1bb86cdSEric Blake #include <sys/ioctl.h> 110c1bb86cdSEric Blake #include <sys/diskslice.h> 111c1bb86cdSEric Blake #endif 112c1bb86cdSEric Blake 113c1bb86cdSEric Blake /* OS X does not have O_DSYNC */ 114c1bb86cdSEric Blake #ifndef O_DSYNC 115c1bb86cdSEric Blake #ifdef O_SYNC 116c1bb86cdSEric Blake #define O_DSYNC O_SYNC 117c1bb86cdSEric Blake #elif defined(O_FSYNC) 118c1bb86cdSEric Blake #define O_DSYNC O_FSYNC 119c1bb86cdSEric Blake #endif 120c1bb86cdSEric Blake #endif 121c1bb86cdSEric Blake 122c1bb86cdSEric Blake /* Approximate O_DIRECT with O_DSYNC if O_DIRECT isn't available */ 123c1bb86cdSEric Blake #ifndef O_DIRECT 124c1bb86cdSEric Blake #define O_DIRECT O_DSYNC 125c1bb86cdSEric Blake #endif 126c1bb86cdSEric Blake 127c1bb86cdSEric Blake #define FTYPE_FILE 0 128c1bb86cdSEric Blake #define FTYPE_CD 1 129c1bb86cdSEric Blake 130c1bb86cdSEric Blake #define MAX_BLOCKSIZE 4096 131c1bb86cdSEric Blake 132244a5668SFam Zheng /* Posix file locking bytes. Libvirt takes byte 0, we start from higher bytes, 133244a5668SFam Zheng * leaving a few more bytes for its future use. */ 134244a5668SFam Zheng #define RAW_LOCK_PERM_BASE 100 135244a5668SFam Zheng #define RAW_LOCK_SHARED_BASE 200 136244a5668SFam Zheng 137c1bb86cdSEric Blake typedef struct BDRVRawState { 138c1bb86cdSEric Blake int fd; 139244a5668SFam Zheng bool use_lock; 140c1bb86cdSEric Blake int type; 141c1bb86cdSEric Blake int open_flags; 142c1bb86cdSEric Blake size_t buf_align; 143c1bb86cdSEric Blake 144244a5668SFam Zheng /* The current permissions. */ 145244a5668SFam Zheng uint64_t perm; 146244a5668SFam Zheng uint64_t shared_perm; 147244a5668SFam Zheng 1482996ffadSFam Zheng /* The perms bits whose corresponding bytes are already locked in 149f2e3af29SFam Zheng * s->fd. */ 1502996ffadSFam Zheng uint64_t locked_perm; 1512996ffadSFam Zheng uint64_t locked_shared_perm; 1522996ffadSFam Zheng 153684960d4SStefano Garzarella uint64_t aio_max_batch; 154684960d4SStefano Garzarella 1556ceabe6fSKevin Wolf int perm_change_fd; 156094e3639SMax Reitz int perm_change_flags; 157e0c9cf3aSKevin Wolf BDRVReopenState *reopen_state; 158e0c9cf3aSKevin Wolf 159c1bb86cdSEric Blake bool has_discard:1; 160c1bb86cdSEric Blake bool has_write_zeroes:1; 161c1bb86cdSEric Blake bool use_linux_aio:1; 162c6447510SAarushi Mehta bool use_linux_io_uring:1; 1634751d09aSSam Li int64_t *offset; /* offset of zone append operation */ 164c7ddc882SDaniel P. Berrangé int page_cache_inconsistent; /* errno from fdatasync failure */ 165c1bb86cdSEric Blake bool has_fallocate; 166c1bb86cdSEric Blake bool needs_alignment; 1675dbd0ce1SKevin Wolf bool force_alignment; 168f357fcd8SStefan Hajnoczi bool drop_cache; 16931be8a2aSStefan Hajnoczi bool check_cache_dropped; 1701c450366SAnton Nefedov struct { 1711c450366SAnton Nefedov uint64_t discard_nb_ok; 1721c450366SAnton Nefedov uint64_t discard_nb_failed; 1731c450366SAnton Nefedov uint64_t discard_bytes_ok; 1741c450366SAnton Nefedov } stats; 1757c9e5276SPaolo Bonzini 1767c9e5276SPaolo Bonzini PRManager *pr_mgr; 177c1bb86cdSEric Blake } BDRVRawState; 178c1bb86cdSEric Blake 179c1bb86cdSEric Blake typedef struct BDRVRawReopenState { 180c1bb86cdSEric Blake int open_flags; 181f357fcd8SStefan Hajnoczi bool drop_cache; 18231be8a2aSStefan Hajnoczi bool check_cache_dropped; 183c1bb86cdSEric Blake } BDRVRawReopenState; 184c1bb86cdSEric Blake 18514176c8dSJoelle van Dyne static int fd_open(BlockDriverState *bs) 18614176c8dSJoelle van Dyne { 18714176c8dSJoelle van Dyne BDRVRawState *s = bs->opaque; 18814176c8dSJoelle van Dyne 18914176c8dSJoelle van Dyne /* this is just to ensure s->fd is sane (its called by io ops) */ 19014176c8dSJoelle van Dyne if (s->fd >= 0) { 19114176c8dSJoelle van Dyne return 0; 19214176c8dSJoelle van Dyne } 19314176c8dSJoelle van Dyne return -EIO; 19414176c8dSJoelle van Dyne } 19514176c8dSJoelle van Dyne 19636c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs); 197c1bb86cdSEric Blake 198c1bb86cdSEric Blake typedef struct RawPosixAIOData { 199c1bb86cdSEric Blake BlockDriverState *bs; 200c1bb86cdSEric Blake int aio_type; 201d57c44d0SKevin Wolf int aio_fildes; 202d57c44d0SKevin Wolf 203d57c44d0SKevin Wolf off_t aio_offset; 204d57c44d0SKevin Wolf uint64_t aio_nbytes; 205d57c44d0SKevin Wolf 20693f4e2ffSKevin Wolf union { 20793f4e2ffSKevin Wolf struct { 208d57c44d0SKevin Wolf struct iovec *iov; 209d57c44d0SKevin Wolf int niov; 210d57c44d0SKevin Wolf } io; 211d57c44d0SKevin Wolf struct { 212d57c44d0SKevin Wolf uint64_t cmd; 213d57c44d0SKevin Wolf void *buf; 214d57c44d0SKevin Wolf } ioctl; 215d57c44d0SKevin Wolf struct { 2161efad060SFam Zheng int aio_fd2; 2171efad060SFam Zheng off_t aio_offset2; 218d57c44d0SKevin Wolf } copy_range; 21993f4e2ffSKevin Wolf struct { 22093f4e2ffSKevin Wolf PreallocMode prealloc; 22193f4e2ffSKevin Wolf Error **errp; 222d57c44d0SKevin Wolf } truncate; 2236d43eaa3SSam Li struct { 2246d43eaa3SSam Li unsigned int *nr_zones; 2256d43eaa3SSam Li BlockZoneDescriptor *zones; 2266d43eaa3SSam Li } zone_report; 2276d43eaa3SSam Li struct { 2286d43eaa3SSam Li unsigned long op; 2296d43eaa3SSam Li } zone_mgmt; 23093f4e2ffSKevin Wolf }; 231c1bb86cdSEric Blake } RawPosixAIOData; 232c1bb86cdSEric Blake 233c1bb86cdSEric Blake #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 234c1bb86cdSEric Blake static int cdrom_reopen(BlockDriverState *bs); 235c1bb86cdSEric Blake #endif 236c1bb86cdSEric Blake 237797e3e38SDavid Edmondson /* 238797e3e38SDavid Edmondson * Elide EAGAIN and EACCES details when failing to lock, as this 239797e3e38SDavid Edmondson * indicates that the specified file region is already locked by 240797e3e38SDavid Edmondson * another process, which is considered a common scenario. 241797e3e38SDavid Edmondson */ 242797e3e38SDavid Edmondson #define raw_lock_error_setg_errno(errp, err, fmt, ...) \ 243797e3e38SDavid Edmondson do { \ 244797e3e38SDavid Edmondson if ((err) == EAGAIN || (err) == EACCES) { \ 245797e3e38SDavid Edmondson error_setg((errp), (fmt), ## __VA_ARGS__); \ 246797e3e38SDavid Edmondson } else { \ 247797e3e38SDavid Edmondson error_setg_errno((errp), (err), (fmt), ## __VA_ARGS__); \ 248797e3e38SDavid Edmondson } \ 249797e3e38SDavid Edmondson } while (0) 250797e3e38SDavid Edmondson 251c1bb86cdSEric Blake #if defined(__NetBSD__) 252db0754dfSFam Zheng static int raw_normalize_devicepath(const char **filename, Error **errp) 253c1bb86cdSEric Blake { 254c1bb86cdSEric Blake static char namebuf[PATH_MAX]; 255c1bb86cdSEric Blake const char *dp, *fname; 256c1bb86cdSEric Blake struct stat sb; 257c1bb86cdSEric Blake 258c1bb86cdSEric Blake fname = *filename; 259c1bb86cdSEric Blake dp = strrchr(fname, '/'); 260c1bb86cdSEric Blake if (lstat(fname, &sb) < 0) { 261f6fc1e30SPaolo Bonzini error_setg_file_open(errp, errno, fname); 262c1bb86cdSEric Blake return -errno; 263c1bb86cdSEric Blake } 264c1bb86cdSEric Blake 265c1bb86cdSEric Blake if (!S_ISBLK(sb.st_mode)) { 266c1bb86cdSEric Blake return 0; 267c1bb86cdSEric Blake } 268c1bb86cdSEric Blake 269c1bb86cdSEric Blake if (dp == NULL) { 270c1bb86cdSEric Blake snprintf(namebuf, PATH_MAX, "r%s", fname); 271c1bb86cdSEric Blake } else { 272c1bb86cdSEric Blake snprintf(namebuf, PATH_MAX, "%.*s/r%s", 273c1bb86cdSEric Blake (int)(dp - fname), fname, dp + 1); 274c1bb86cdSEric Blake } 275c1bb86cdSEric Blake *filename = namebuf; 276db0754dfSFam Zheng warn_report("%s is a block device, using %s", fname, *filename); 277c1bb86cdSEric Blake 278c1bb86cdSEric Blake return 0; 279c1bb86cdSEric Blake } 280c1bb86cdSEric Blake #else 281db0754dfSFam Zheng static int raw_normalize_devicepath(const char **filename, Error **errp) 282c1bb86cdSEric Blake { 283c1bb86cdSEric Blake return 0; 284c1bb86cdSEric Blake } 285c1bb86cdSEric Blake #endif 286c1bb86cdSEric Blake 287c1bb86cdSEric Blake /* 288c1bb86cdSEric Blake * Get logical block size via ioctl. On success store it in @sector_size_p. 289c1bb86cdSEric Blake */ 290c1bb86cdSEric Blake static int probe_logical_blocksize(int fd, unsigned int *sector_size_p) 291c1bb86cdSEric Blake { 292c1bb86cdSEric Blake unsigned int sector_size; 293c1bb86cdSEric Blake bool success = false; 294700f9ce0SPeter Maydell int i; 295c1bb86cdSEric Blake 296c1bb86cdSEric Blake errno = ENOTSUP; 297700f9ce0SPeter Maydell static const unsigned long ioctl_list[] = { 298c1bb86cdSEric Blake #ifdef BLKSSZGET 299700f9ce0SPeter Maydell BLKSSZGET, 300c1bb86cdSEric Blake #endif 301c1bb86cdSEric Blake #ifdef DKIOCGETBLOCKSIZE 302700f9ce0SPeter Maydell DKIOCGETBLOCKSIZE, 303c1bb86cdSEric Blake #endif 304c1bb86cdSEric Blake #ifdef DIOCGSECTORSIZE 305700f9ce0SPeter Maydell DIOCGSECTORSIZE, 306700f9ce0SPeter Maydell #endif 307700f9ce0SPeter Maydell }; 308700f9ce0SPeter Maydell 309700f9ce0SPeter Maydell /* Try a few ioctls to get the right size */ 310700f9ce0SPeter Maydell for (i = 0; i < (int)ARRAY_SIZE(ioctl_list); i++) { 311700f9ce0SPeter Maydell if (ioctl(fd, ioctl_list[i], §or_size) >= 0) { 312c1bb86cdSEric Blake *sector_size_p = sector_size; 313c1bb86cdSEric Blake success = true; 314c1bb86cdSEric Blake } 315700f9ce0SPeter Maydell } 316c1bb86cdSEric Blake 317c1bb86cdSEric Blake return success ? 0 : -errno; 318c1bb86cdSEric Blake } 319c1bb86cdSEric Blake 320c1bb86cdSEric Blake /** 321c1bb86cdSEric Blake * Get physical block size of @fd. 322c1bb86cdSEric Blake * On success, store it in @blk_size and return 0. 323c1bb86cdSEric Blake * On failure, return -errno. 324c1bb86cdSEric Blake */ 325c1bb86cdSEric Blake static int probe_physical_blocksize(int fd, unsigned int *blk_size) 326c1bb86cdSEric Blake { 327c1bb86cdSEric Blake #ifdef BLKPBSZGET 328c1bb86cdSEric Blake if (ioctl(fd, BLKPBSZGET, blk_size) < 0) { 329c1bb86cdSEric Blake return -errno; 330c1bb86cdSEric Blake } 331c1bb86cdSEric Blake return 0; 332c1bb86cdSEric Blake #else 333c1bb86cdSEric Blake return -ENOTSUP; 334c1bb86cdSEric Blake #endif 335c1bb86cdSEric Blake } 336c1bb86cdSEric Blake 3375edc8557SKevin Wolf /* 3385edc8557SKevin Wolf * Returns true if no alignment restrictions are necessary even for files 3395edc8557SKevin Wolf * opened with O_DIRECT. 3405edc8557SKevin Wolf * 3415edc8557SKevin Wolf * raw_probe_alignment() probes the required alignment and assume that 1 means 3425edc8557SKevin Wolf * the probing failed, so it falls back to a safe default of 4k. This can be 3435edc8557SKevin Wolf * avoided if we know that byte alignment is okay for the file. 3445edc8557SKevin Wolf */ 3455edc8557SKevin Wolf static bool dio_byte_aligned(int fd) 3465edc8557SKevin Wolf { 3475edc8557SKevin Wolf #ifdef __linux__ 3485edc8557SKevin Wolf struct statfs buf; 3495edc8557SKevin Wolf int ret; 3505edc8557SKevin Wolf 3515edc8557SKevin Wolf ret = fstatfs(fd, &buf); 3525edc8557SKevin Wolf if (ret == 0 && buf.f_type == NFS_SUPER_MAGIC) { 3535edc8557SKevin Wolf return true; 3545edc8557SKevin Wolf } 3555edc8557SKevin Wolf #endif 3565edc8557SKevin Wolf return false; 3575edc8557SKevin Wolf } 3585edc8557SKevin Wolf 3595dbd0ce1SKevin Wolf static bool raw_needs_alignment(BlockDriverState *bs) 3605dbd0ce1SKevin Wolf { 3615dbd0ce1SKevin Wolf BDRVRawState *s = bs->opaque; 3625dbd0ce1SKevin Wolf 3635dbd0ce1SKevin Wolf if ((bs->open_flags & BDRV_O_NOCACHE) != 0 && !dio_byte_aligned(s->fd)) { 3645dbd0ce1SKevin Wolf return true; 3655dbd0ce1SKevin Wolf } 3665dbd0ce1SKevin Wolf 3675dbd0ce1SKevin Wolf return s->force_alignment; 3685dbd0ce1SKevin Wolf } 3695dbd0ce1SKevin Wolf 370c1bb86cdSEric Blake /* Check if read is allowed with given memory buffer and length. 371c1bb86cdSEric Blake * 372c1bb86cdSEric Blake * This function is used to check O_DIRECT memory buffer and request alignment. 373c1bb86cdSEric Blake */ 374c1bb86cdSEric Blake static bool raw_is_io_aligned(int fd, void *buf, size_t len) 375c1bb86cdSEric Blake { 376c1bb86cdSEric Blake ssize_t ret = pread(fd, buf, len, 0); 377c1bb86cdSEric Blake 378c1bb86cdSEric Blake if (ret >= 0) { 379c1bb86cdSEric Blake return true; 380c1bb86cdSEric Blake } 381c1bb86cdSEric Blake 382c1bb86cdSEric Blake #ifdef __linux__ 383c1bb86cdSEric Blake /* The Linux kernel returns EINVAL for misaligned O_DIRECT reads. Ignore 384c1bb86cdSEric Blake * other errors (e.g. real I/O error), which could happen on a failed 385c1bb86cdSEric Blake * drive, since we only care about probing alignment. 386c1bb86cdSEric Blake */ 387c1bb86cdSEric Blake if (errno != EINVAL) { 388c1bb86cdSEric Blake return true; 389c1bb86cdSEric Blake } 390c1bb86cdSEric Blake #endif 391c1bb86cdSEric Blake 392c1bb86cdSEric Blake return false; 393c1bb86cdSEric Blake } 394c1bb86cdSEric Blake 395c1bb86cdSEric Blake static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp) 396c1bb86cdSEric Blake { 397c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 398c1bb86cdSEric Blake char *buf; 3998e3b0cbbSMarc-André Lureau size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size()); 400a6b257a0SNir Soffer size_t alignments[] = {1, 512, 1024, 2048, 4096}; 401c1bb86cdSEric Blake 402c1bb86cdSEric Blake /* For SCSI generic devices the alignment is not really used. 403c1bb86cdSEric Blake With buffered I/O, we don't have any restrictions. */ 404c1bb86cdSEric Blake if (bdrv_is_sg(bs) || !s->needs_alignment) { 405c1bb86cdSEric Blake bs->bl.request_alignment = 1; 406c1bb86cdSEric Blake s->buf_align = 1; 407c1bb86cdSEric Blake return; 408c1bb86cdSEric Blake } 409c1bb86cdSEric Blake 410c1bb86cdSEric Blake bs->bl.request_alignment = 0; 411c1bb86cdSEric Blake s->buf_align = 0; 412c1bb86cdSEric Blake /* Let's try to use the logical blocksize for the alignment. */ 413c1bb86cdSEric Blake if (probe_logical_blocksize(fd, &bs->bl.request_alignment) < 0) { 414c1bb86cdSEric Blake bs->bl.request_alignment = 0; 415c1bb86cdSEric Blake } 416a5730b8bSThomas Huth 417a5730b8bSThomas Huth #ifdef __linux__ 418a5730b8bSThomas Huth /* 419a5730b8bSThomas Huth * The XFS ioctl definitions are shipped in extra packages that might 420a5730b8bSThomas Huth * not always be available. Since we just need the XFS_IOC_DIOINFO ioctl 421a5730b8bSThomas Huth * here, we simply use our own definition instead: 422a5730b8bSThomas Huth */ 423a5730b8bSThomas Huth struct xfs_dioattr { 424a5730b8bSThomas Huth uint32_t d_mem; 425a5730b8bSThomas Huth uint32_t d_miniosz; 426a5730b8bSThomas Huth uint32_t d_maxiosz; 427a5730b8bSThomas Huth } da; 428a5730b8bSThomas Huth if (ioctl(fd, _IOR('X', 30, struct xfs_dioattr), &da) >= 0) { 429c1bb86cdSEric Blake bs->bl.request_alignment = da.d_miniosz; 430c1bb86cdSEric Blake /* The kernel returns wrong information for d_mem */ 431c1bb86cdSEric Blake /* s->buf_align = da.d_mem; */ 432c1bb86cdSEric Blake } 433c1bb86cdSEric Blake #endif 434c1bb86cdSEric Blake 435a6b257a0SNir Soffer /* 436a6b257a0SNir Soffer * If we could not get the sizes so far, we can only guess them. First try 437a6b257a0SNir Soffer * to detect request alignment, since it is more likely to succeed. Then 438a6b257a0SNir Soffer * try to detect buf_align, which cannot be detected in some cases (e.g. 439a6b257a0SNir Soffer * Gluster). If buf_align cannot be detected, we fallback to the value of 440a6b257a0SNir Soffer * request_alignment. 441a6b257a0SNir Soffer */ 442a6b257a0SNir Soffer 443a6b257a0SNir Soffer if (!bs->bl.request_alignment) { 444a6b257a0SNir Soffer int i; 445c1bb86cdSEric Blake size_t align; 446a6b257a0SNir Soffer buf = qemu_memalign(max_align, max_align); 447a6b257a0SNir Soffer for (i = 0; i < ARRAY_SIZE(alignments); i++) { 448a6b257a0SNir Soffer align = alignments[i]; 449a6b257a0SNir Soffer if (raw_is_io_aligned(fd, buf, align)) { 450a6b257a0SNir Soffer /* Fallback to safe value. */ 451a6b257a0SNir Soffer bs->bl.request_alignment = (align != 1) ? align : max_align; 452c1bb86cdSEric Blake break; 453c1bb86cdSEric Blake } 454c1bb86cdSEric Blake } 455c1bb86cdSEric Blake qemu_vfree(buf); 456c1bb86cdSEric Blake } 457c1bb86cdSEric Blake 458a6b257a0SNir Soffer if (!s->buf_align) { 459a6b257a0SNir Soffer int i; 460c1bb86cdSEric Blake size_t align; 461a6b257a0SNir Soffer buf = qemu_memalign(max_align, 2 * max_align); 462a6b257a0SNir Soffer for (i = 0; i < ARRAY_SIZE(alignments); i++) { 463a6b257a0SNir Soffer align = alignments[i]; 464a6b257a0SNir Soffer if (raw_is_io_aligned(fd, buf + align, max_align)) { 465236094c7SStefan Hajnoczi /* Fallback to request_alignment. */ 466a6b257a0SNir Soffer s->buf_align = (align != 1) ? align : bs->bl.request_alignment; 467c1bb86cdSEric Blake break; 468c1bb86cdSEric Blake } 469c1bb86cdSEric Blake } 470c1bb86cdSEric Blake qemu_vfree(buf); 471c1bb86cdSEric Blake } 472c1bb86cdSEric Blake 473c1bb86cdSEric Blake if (!s->buf_align || !bs->bl.request_alignment) { 474c1bb86cdSEric Blake error_setg(errp, "Could not find working O_DIRECT alignment"); 475c1bb86cdSEric Blake error_append_hint(errp, "Try cache.direct=off\n"); 476c1bb86cdSEric Blake } 477c1bb86cdSEric Blake } 478c1bb86cdSEric Blake 479bca5283bSKevin Wolf static int check_hdev_writable(int fd) 48020eaf1bfSKevin Wolf { 48120eaf1bfSKevin Wolf #if defined(BLKROGET) 48220eaf1bfSKevin Wolf /* Linux block devices can be configured "read-only" using blockdev(8). 48320eaf1bfSKevin Wolf * This is independent of device node permissions and therefore open(2) 48420eaf1bfSKevin Wolf * with O_RDWR succeeds. Actual writes fail with EPERM. 48520eaf1bfSKevin Wolf * 48620eaf1bfSKevin Wolf * bdrv_open() is supposed to fail if the disk is read-only. Explicitly 48720eaf1bfSKevin Wolf * check for read-only block devices so that Linux block devices behave 48820eaf1bfSKevin Wolf * properly. 48920eaf1bfSKevin Wolf */ 49020eaf1bfSKevin Wolf struct stat st; 49120eaf1bfSKevin Wolf int readonly = 0; 49220eaf1bfSKevin Wolf 493bca5283bSKevin Wolf if (fstat(fd, &st)) { 49420eaf1bfSKevin Wolf return -errno; 49520eaf1bfSKevin Wolf } 49620eaf1bfSKevin Wolf 49720eaf1bfSKevin Wolf if (!S_ISBLK(st.st_mode)) { 49820eaf1bfSKevin Wolf return 0; 49920eaf1bfSKevin Wolf } 50020eaf1bfSKevin Wolf 501bca5283bSKevin Wolf if (ioctl(fd, BLKROGET, &readonly) < 0) { 50220eaf1bfSKevin Wolf return -errno; 50320eaf1bfSKevin Wolf } 50420eaf1bfSKevin Wolf 50520eaf1bfSKevin Wolf if (readonly) { 50620eaf1bfSKevin Wolf return -EACCES; 50720eaf1bfSKevin Wolf } 50820eaf1bfSKevin Wolf #endif /* defined(BLKROGET) */ 50920eaf1bfSKevin Wolf return 0; 51020eaf1bfSKevin Wolf } 51120eaf1bfSKevin Wolf 51223dece19SKevin Wolf static void raw_parse_flags(int bdrv_flags, int *open_flags, bool has_writers) 513c1bb86cdSEric Blake { 51423dece19SKevin Wolf bool read_write = false; 515c1bb86cdSEric Blake assert(open_flags != NULL); 516c1bb86cdSEric Blake 517c1bb86cdSEric Blake *open_flags |= O_BINARY; 518c1bb86cdSEric Blake *open_flags &= ~O_ACCMODE; 51923dece19SKevin Wolf 52023dece19SKevin Wolf if (bdrv_flags & BDRV_O_AUTO_RDONLY) { 52123dece19SKevin Wolf read_write = has_writers; 52223dece19SKevin Wolf } else if (bdrv_flags & BDRV_O_RDWR) { 52323dece19SKevin Wolf read_write = true; 52423dece19SKevin Wolf } 52523dece19SKevin Wolf 52623dece19SKevin Wolf if (read_write) { 527c1bb86cdSEric Blake *open_flags |= O_RDWR; 528c1bb86cdSEric Blake } else { 529c1bb86cdSEric Blake *open_flags |= O_RDONLY; 530c1bb86cdSEric Blake } 531c1bb86cdSEric Blake 532c1bb86cdSEric Blake /* Use O_DSYNC for write-through caching, no flags for write-back caching, 533c1bb86cdSEric Blake * and O_DIRECT for no caching. */ 534c1bb86cdSEric Blake if ((bdrv_flags & BDRV_O_NOCACHE)) { 535c1bb86cdSEric Blake *open_flags |= O_DIRECT; 536c1bb86cdSEric Blake } 537c1bb86cdSEric Blake } 538c1bb86cdSEric Blake 539c1bb86cdSEric Blake static void raw_parse_filename(const char *filename, QDict *options, 540c1bb86cdSEric Blake Error **errp) 541c1bb86cdSEric Blake { 54203c320d8SMax Reitz bdrv_parse_filename_strip_prefix(filename, "file:", options); 543c1bb86cdSEric Blake } 544c1bb86cdSEric Blake 545c1bb86cdSEric Blake static QemuOptsList raw_runtime_opts = { 546c1bb86cdSEric Blake .name = "raw", 547c1bb86cdSEric Blake .head = QTAILQ_HEAD_INITIALIZER(raw_runtime_opts.head), 548c1bb86cdSEric Blake .desc = { 549c1bb86cdSEric Blake { 550c1bb86cdSEric Blake .name = "filename", 551c1bb86cdSEric Blake .type = QEMU_OPT_STRING, 552c1bb86cdSEric Blake .help = "File name of the image", 553c1bb86cdSEric Blake }, 554c1bb86cdSEric Blake { 555c1bb86cdSEric Blake .name = "aio", 556c1bb86cdSEric Blake .type = QEMU_OPT_STRING, 557c6447510SAarushi Mehta .help = "host AIO implementation (threads, native, io_uring)", 558c1bb86cdSEric Blake }, 55916b48d5dSFam Zheng { 560684960d4SStefano Garzarella .name = "aio-max-batch", 561684960d4SStefano Garzarella .type = QEMU_OPT_NUMBER, 562684960d4SStefano Garzarella .help = "AIO max batch size (0 = auto handled by AIO backend, default: 0)", 563684960d4SStefano Garzarella }, 564684960d4SStefano Garzarella { 56516b48d5dSFam Zheng .name = "locking", 56616b48d5dSFam Zheng .type = QEMU_OPT_STRING, 56716b48d5dSFam Zheng .help = "file locking mode (on/off/auto, default: auto)", 56816b48d5dSFam Zheng }, 5697c9e5276SPaolo Bonzini { 5707c9e5276SPaolo Bonzini .name = "pr-manager", 5717c9e5276SPaolo Bonzini .type = QEMU_OPT_STRING, 5727c9e5276SPaolo Bonzini .help = "id of persistent reservation manager object (default: none)", 5737c9e5276SPaolo Bonzini }, 574f357fcd8SStefan Hajnoczi #if defined(__linux__) 575f357fcd8SStefan Hajnoczi { 576f357fcd8SStefan Hajnoczi .name = "drop-cache", 577f357fcd8SStefan Hajnoczi .type = QEMU_OPT_BOOL, 578f357fcd8SStefan Hajnoczi .help = "invalidate page cache during live migration (default: on)", 579f357fcd8SStefan Hajnoczi }, 580f357fcd8SStefan Hajnoczi #endif 58131be8a2aSStefan Hajnoczi { 58231be8a2aSStefan Hajnoczi .name = "x-check-cache-dropped", 58331be8a2aSStefan Hajnoczi .type = QEMU_OPT_BOOL, 58431be8a2aSStefan Hajnoczi .help = "check that page cache was dropped on live migration (default: off)" 58531be8a2aSStefan Hajnoczi }, 586c1bb86cdSEric Blake { /* end of list */ } 587c1bb86cdSEric Blake }, 588c1bb86cdSEric Blake }; 589c1bb86cdSEric Blake 5908a2ce0bcSAlberto Garcia static const char *const mutable_opts[] = { "x-check-cache-dropped", NULL }; 5918a2ce0bcSAlberto Garcia 592c1bb86cdSEric Blake static int raw_open_common(BlockDriverState *bs, QDict *options, 593230ff739SJohn Snow int bdrv_flags, int open_flags, 594230ff739SJohn Snow bool device, Error **errp) 595c1bb86cdSEric Blake { 596c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 597c1bb86cdSEric Blake QemuOpts *opts; 598c1bb86cdSEric Blake Error *local_err = NULL; 599c1bb86cdSEric Blake const char *filename = NULL; 6007c9e5276SPaolo Bonzini const char *str; 601c1bb86cdSEric Blake BlockdevAioOptions aio, aio_default; 602c1bb86cdSEric Blake int fd, ret; 603c1bb86cdSEric Blake struct stat st; 604244a5668SFam Zheng OnOffAuto locking; 605c1bb86cdSEric Blake 606c1bb86cdSEric Blake opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort); 607af175e85SMarkus Armbruster if (!qemu_opts_absorb_qdict(opts, options, errp)) { 608c1bb86cdSEric Blake ret = -EINVAL; 609c1bb86cdSEric Blake goto fail; 610c1bb86cdSEric Blake } 611c1bb86cdSEric Blake 612c1bb86cdSEric Blake filename = qemu_opt_get(opts, "filename"); 613c1bb86cdSEric Blake 614db0754dfSFam Zheng ret = raw_normalize_devicepath(&filename, errp); 615c1bb86cdSEric Blake if (ret != 0) { 616c1bb86cdSEric Blake goto fail; 617c1bb86cdSEric Blake } 618c1bb86cdSEric Blake 619c6447510SAarushi Mehta if (bdrv_flags & BDRV_O_NATIVE_AIO) { 620c6447510SAarushi Mehta aio_default = BLOCKDEV_AIO_OPTIONS_NATIVE; 621c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 622c6447510SAarushi Mehta } else if (bdrv_flags & BDRV_O_IO_URING) { 623c6447510SAarushi Mehta aio_default = BLOCKDEV_AIO_OPTIONS_IO_URING; 624c6447510SAarushi Mehta #endif 625c6447510SAarushi Mehta } else { 626c6447510SAarushi Mehta aio_default = BLOCKDEV_AIO_OPTIONS_THREADS; 627c6447510SAarushi Mehta } 628c6447510SAarushi Mehta 629f7abe0ecSMarc-André Lureau aio = qapi_enum_parse(&BlockdevAioOptions_lookup, 630f7abe0ecSMarc-André Lureau qemu_opt_get(opts, "aio"), 63106c60b6cSMarkus Armbruster aio_default, &local_err); 632c1bb86cdSEric Blake if (local_err) { 633c1bb86cdSEric Blake error_propagate(errp, local_err); 634c1bb86cdSEric Blake ret = -EINVAL; 635c1bb86cdSEric Blake goto fail; 636c1bb86cdSEric Blake } 637c6447510SAarushi Mehta 638c1bb86cdSEric Blake s->use_linux_aio = (aio == BLOCKDEV_AIO_OPTIONS_NATIVE); 639c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 640c6447510SAarushi Mehta s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING); 641c6447510SAarushi Mehta #endif 642c1bb86cdSEric Blake 643684960d4SStefano Garzarella s->aio_max_batch = qemu_opt_get_number(opts, "aio-max-batch", 0); 644684960d4SStefano Garzarella 645f7abe0ecSMarc-André Lureau locking = qapi_enum_parse(&OnOffAuto_lookup, 646f7abe0ecSMarc-André Lureau qemu_opt_get(opts, "locking"), 64706c60b6cSMarkus Armbruster ON_OFF_AUTO_AUTO, &local_err); 648244a5668SFam Zheng if (local_err) { 649244a5668SFam Zheng error_propagate(errp, local_err); 650244a5668SFam Zheng ret = -EINVAL; 651244a5668SFam Zheng goto fail; 652244a5668SFam Zheng } 653244a5668SFam Zheng switch (locking) { 654244a5668SFam Zheng case ON_OFF_AUTO_ON: 655244a5668SFam Zheng s->use_lock = true; 6562b218f5dSFam Zheng if (!qemu_has_ofd_lock()) { 657db0754dfSFam Zheng warn_report("File lock requested but OFD locking syscall is " 658db0754dfSFam Zheng "unavailable, falling back to POSIX file locks"); 659db0754dfSFam Zheng error_printf("Due to the implementation, locks can be lost " 6602b218f5dSFam Zheng "unexpectedly.\n"); 6612b218f5dSFam Zheng } 662244a5668SFam Zheng break; 663244a5668SFam Zheng case ON_OFF_AUTO_OFF: 664244a5668SFam Zheng s->use_lock = false; 665244a5668SFam Zheng break; 666244a5668SFam Zheng case ON_OFF_AUTO_AUTO: 6672b218f5dSFam Zheng s->use_lock = qemu_has_ofd_lock(); 668244a5668SFam Zheng break; 669244a5668SFam Zheng default: 670244a5668SFam Zheng abort(); 671244a5668SFam Zheng } 672244a5668SFam Zheng 6737c9e5276SPaolo Bonzini str = qemu_opt_get(opts, "pr-manager"); 6747c9e5276SPaolo Bonzini if (str) { 6757c9e5276SPaolo Bonzini s->pr_mgr = pr_manager_lookup(str, &local_err); 6767c9e5276SPaolo Bonzini if (local_err) { 6777c9e5276SPaolo Bonzini error_propagate(errp, local_err); 6787c9e5276SPaolo Bonzini ret = -EINVAL; 6797c9e5276SPaolo Bonzini goto fail; 6807c9e5276SPaolo Bonzini } 6817c9e5276SPaolo Bonzini } 6827c9e5276SPaolo Bonzini 683f357fcd8SStefan Hajnoczi s->drop_cache = qemu_opt_get_bool(opts, "drop-cache", true); 68431be8a2aSStefan Hajnoczi s->check_cache_dropped = qemu_opt_get_bool(opts, "x-check-cache-dropped", 68531be8a2aSStefan Hajnoczi false); 68631be8a2aSStefan Hajnoczi 687c1bb86cdSEric Blake s->open_flags = open_flags; 68823dece19SKevin Wolf raw_parse_flags(bdrv_flags, &s->open_flags, false); 689c1bb86cdSEric Blake 690c1bb86cdSEric Blake s->fd = -1; 691b18a24a9SDaniel P. Berrangé fd = qemu_open(filename, s->open_flags, errp); 69264107dc0SKevin Wolf ret = fd < 0 ? -errno : 0; 69364107dc0SKevin Wolf 69464107dc0SKevin Wolf if (ret < 0) { 695c1bb86cdSEric Blake if (ret == -EROFS) { 696c1bb86cdSEric Blake ret = -EACCES; 697c1bb86cdSEric Blake } 698c1bb86cdSEric Blake goto fail; 699c1bb86cdSEric Blake } 700c1bb86cdSEric Blake s->fd = fd; 701c1bb86cdSEric Blake 702bca5283bSKevin Wolf /* Check s->open_flags rather than bdrv_flags due to auto-read-only */ 703bca5283bSKevin Wolf if (s->open_flags & O_RDWR) { 704bca5283bSKevin Wolf ret = check_hdev_writable(s->fd); 705bca5283bSKevin Wolf if (ret < 0) { 706bca5283bSKevin Wolf error_setg_errno(errp, -ret, "The device is not writable"); 707bca5283bSKevin Wolf goto fail; 708bca5283bSKevin Wolf } 709bca5283bSKevin Wolf } 710bca5283bSKevin Wolf 711244a5668SFam Zheng s->perm = 0; 712244a5668SFam Zheng s->shared_perm = BLK_PERM_ALL; 713244a5668SFam Zheng 714c1bb86cdSEric Blake #ifdef CONFIG_LINUX_AIO 715c1bb86cdSEric Blake /* Currently Linux does AIO only for files opened with O_DIRECT */ 716ed6e2161SNishanth Aravamudan if (s->use_linux_aio) { 717ed6e2161SNishanth Aravamudan if (!(s->open_flags & O_DIRECT)) { 718c1bb86cdSEric Blake error_setg(errp, "aio=native was specified, but it requires " 719c1bb86cdSEric Blake "cache.direct=on, which was not specified."); 720c1bb86cdSEric Blake ret = -EINVAL; 721c1bb86cdSEric Blake goto fail; 722c1bb86cdSEric Blake } 723ed6e2161SNishanth Aravamudan if (!aio_setup_linux_aio(bdrv_get_aio_context(bs), errp)) { 724ed6e2161SNishanth Aravamudan error_prepend(errp, "Unable to use native AIO: "); 725ed6e2161SNishanth Aravamudan goto fail; 726ed6e2161SNishanth Aravamudan } 727ed6e2161SNishanth Aravamudan } 728c1bb86cdSEric Blake #else 729c1bb86cdSEric Blake if (s->use_linux_aio) { 730c1bb86cdSEric Blake error_setg(errp, "aio=native was specified, but is not supported " 731c1bb86cdSEric Blake "in this build."); 732c1bb86cdSEric Blake ret = -EINVAL; 733c1bb86cdSEric Blake goto fail; 734c1bb86cdSEric Blake } 735c1bb86cdSEric Blake #endif /* !defined(CONFIG_LINUX_AIO) */ 736c1bb86cdSEric Blake 737c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 738c6447510SAarushi Mehta if (s->use_linux_io_uring) { 739c6447510SAarushi Mehta if (!aio_setup_linux_io_uring(bdrv_get_aio_context(bs), errp)) { 740c6447510SAarushi Mehta error_prepend(errp, "Unable to use io_uring: "); 741c6447510SAarushi Mehta goto fail; 742c6447510SAarushi Mehta } 743c6447510SAarushi Mehta } 744c6447510SAarushi Mehta #else 745c6447510SAarushi Mehta if (s->use_linux_io_uring) { 746c6447510SAarushi Mehta error_setg(errp, "aio=io_uring was specified, but is not supported " 747c6447510SAarushi Mehta "in this build."); 748c6447510SAarushi Mehta ret = -EINVAL; 749c6447510SAarushi Mehta goto fail; 750c6447510SAarushi Mehta } 751c6447510SAarushi Mehta #endif /* !defined(CONFIG_LINUX_IO_URING) */ 752c6447510SAarushi Mehta 753c1bb86cdSEric Blake s->has_discard = true; 754c1bb86cdSEric Blake s->has_write_zeroes = true; 755c1bb86cdSEric Blake 756c1bb86cdSEric Blake if (fstat(s->fd, &st) < 0) { 757c1bb86cdSEric Blake ret = -errno; 758c1bb86cdSEric Blake error_setg_errno(errp, errno, "Could not stat file"); 759c1bb86cdSEric Blake goto fail; 760c1bb86cdSEric Blake } 761230ff739SJohn Snow 762230ff739SJohn Snow if (!device) { 7638d17adf3SDaniel P. Berrangé if (!S_ISREG(st.st_mode)) { 7648d17adf3SDaniel P. Berrangé error_setg(errp, "'%s' driver requires '%s' to be a regular file", 7658d17adf3SDaniel P. Berrangé bs->drv->format_name, bs->filename); 766230ff739SJohn Snow ret = -EINVAL; 767230ff739SJohn Snow goto fail; 768230ff739SJohn Snow } else { 769c1bb86cdSEric Blake s->has_fallocate = true; 770c1bb86cdSEric Blake } 771230ff739SJohn Snow } else { 772230ff739SJohn Snow if (!(S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) { 7738d17adf3SDaniel P. Berrangé error_setg(errp, "'%s' driver requires '%s' to be either " 7748d17adf3SDaniel P. Berrangé "a character or block device", 7758d17adf3SDaniel P. Berrangé bs->drv->format_name, bs->filename); 776230ff739SJohn Snow ret = -EINVAL; 777230ff739SJohn Snow goto fail; 778230ff739SJohn Snow } 779230ff739SJohn Snow } 780774c726cSSam Li #ifdef CONFIG_BLKZONED 781774c726cSSam Li /* 782774c726cSSam Li * The kernel page cache does not reliably work for writes to SWR zones 783774c726cSSam Li * of zoned block device because it can not guarantee the order of writes. 784774c726cSSam Li */ 785774c726cSSam Li if ((bs->bl.zoned != BLK_Z_NONE) && 786774c726cSSam Li (!(s->open_flags & O_DIRECT))) { 787774c726cSSam Li error_setg(errp, "The driver supports zoned devices, and it requires " 788774c726cSSam Li "cache.direct=on, which was not specified."); 789774c726cSSam Li return -EINVAL; /* No host kernel page cache */ 790774c726cSSam Li } 791774c726cSSam Li #endif 792230ff739SJohn Snow 793c1bb86cdSEric Blake if (S_ISBLK(st.st_mode)) { 794c1bb86cdSEric Blake #ifdef __linux__ 795c1bb86cdSEric Blake /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do 796c1bb86cdSEric Blake * not rely on the contents of discarded blocks unless using O_DIRECT. 797c1bb86cdSEric Blake * Same for BLKZEROOUT. 798c1bb86cdSEric Blake */ 799c1bb86cdSEric Blake if (!(bs->open_flags & BDRV_O_NOCACHE)) { 800c1bb86cdSEric Blake s->has_write_zeroes = false; 801c1bb86cdSEric Blake } 802c1bb86cdSEric Blake #endif 803c1bb86cdSEric Blake } 804c1bb86cdSEric Blake #ifdef __FreeBSD__ 805c1bb86cdSEric Blake if (S_ISCHR(st.st_mode)) { 806c1bb86cdSEric Blake /* 807c1bb86cdSEric Blake * The file is a char device (disk), which on FreeBSD isn't behind 808c1bb86cdSEric Blake * a pager, so force all requests to be aligned. This is needed 809c1bb86cdSEric Blake * so QEMU makes sure all IO operations on the device are aligned 810c1bb86cdSEric Blake * to sector size, or else FreeBSD will reject them with EINVAL. 811c1bb86cdSEric Blake */ 8125dbd0ce1SKevin Wolf s->force_alignment = true; 813c1bb86cdSEric Blake } 814c1bb86cdSEric Blake #endif 8155dbd0ce1SKevin Wolf s->needs_alignment = raw_needs_alignment(bs); 816c1bb86cdSEric Blake 817738301e1SKevin Wolf bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK; 8182f0c6e7aSKevin Wolf if (S_ISREG(st.st_mode)) { 8192f0c6e7aSKevin Wolf /* When extending regular files, we get zeros from the OS */ 8202f0c6e7aSKevin Wolf bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE; 8212f0c6e7aSKevin Wolf } 822c1bb86cdSEric Blake ret = 0; 823c1bb86cdSEric Blake fail: 824a8c5cf27SKevin Wolf if (ret < 0 && s->fd != -1) { 825a8c5cf27SKevin Wolf qemu_close(s->fd); 826a8c5cf27SKevin Wolf } 827c1bb86cdSEric Blake if (filename && (bdrv_flags & BDRV_O_TEMPORARY)) { 828c1bb86cdSEric Blake unlink(filename); 829c1bb86cdSEric Blake } 830c1bb86cdSEric Blake qemu_opts_del(opts); 831c1bb86cdSEric Blake return ret; 832c1bb86cdSEric Blake } 833c1bb86cdSEric Blake 834c1bb86cdSEric Blake static int raw_open(BlockDriverState *bs, QDict *options, int flags, 835c1bb86cdSEric Blake Error **errp) 836c1bb86cdSEric Blake { 837c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 838c1bb86cdSEric Blake 839c1bb86cdSEric Blake s->type = FTYPE_FILE; 840230ff739SJohn Snow return raw_open_common(bs, options, flags, 0, false, errp); 841c1bb86cdSEric Blake } 842c1bb86cdSEric Blake 843244a5668SFam Zheng typedef enum { 844244a5668SFam Zheng RAW_PL_PREPARE, 845244a5668SFam Zheng RAW_PL_COMMIT, 846244a5668SFam Zheng RAW_PL_ABORT, 847244a5668SFam Zheng } RawPermLockOp; 848244a5668SFam Zheng 849244a5668SFam Zheng #define PERM_FOREACH(i) \ 850244a5668SFam Zheng for ((i) = 0; (1ULL << (i)) <= BLK_PERM_ALL; i++) 851244a5668SFam Zheng 852244a5668SFam Zheng /* Lock bytes indicated by @perm_lock_bits and @shared_perm_lock_bits in the 853244a5668SFam Zheng * file; if @unlock == true, also unlock the unneeded bytes. 854244a5668SFam Zheng * @shared_perm_lock_bits is the mask of all permissions that are NOT shared. 855244a5668SFam Zheng */ 8562996ffadSFam Zheng static int raw_apply_lock_bytes(BDRVRawState *s, int fd, 857244a5668SFam Zheng uint64_t perm_lock_bits, 858244a5668SFam Zheng uint64_t shared_perm_lock_bits, 859244a5668SFam Zheng bool unlock, Error **errp) 860244a5668SFam Zheng { 861244a5668SFam Zheng int ret; 862244a5668SFam Zheng int i; 8632996ffadSFam Zheng uint64_t locked_perm, locked_shared_perm; 8642996ffadSFam Zheng 8652996ffadSFam Zheng if (s) { 8662996ffadSFam Zheng locked_perm = s->locked_perm; 8672996ffadSFam Zheng locked_shared_perm = s->locked_shared_perm; 8682996ffadSFam Zheng } else { 8692996ffadSFam Zheng /* 8702996ffadSFam Zheng * We don't have the previous bits, just lock/unlock for each of the 8712996ffadSFam Zheng * requested bits. 8722996ffadSFam Zheng */ 8732996ffadSFam Zheng if (unlock) { 8742996ffadSFam Zheng locked_perm = BLK_PERM_ALL; 8752996ffadSFam Zheng locked_shared_perm = BLK_PERM_ALL; 8762996ffadSFam Zheng } else { 8772996ffadSFam Zheng locked_perm = 0; 8782996ffadSFam Zheng locked_shared_perm = 0; 8792996ffadSFam Zheng } 8802996ffadSFam Zheng } 881244a5668SFam Zheng 882244a5668SFam Zheng PERM_FOREACH(i) { 883244a5668SFam Zheng int off = RAW_LOCK_PERM_BASE + i; 8842996ffadSFam Zheng uint64_t bit = (1ULL << i); 8852996ffadSFam Zheng if ((perm_lock_bits & bit) && !(locked_perm & bit)) { 886d0a96155SMax Reitz ret = qemu_lock_fd(fd, off, 1, false); 887244a5668SFam Zheng if (ret) { 888797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret, "Failed to lock byte %d", 889797e3e38SDavid Edmondson off); 890244a5668SFam Zheng return ret; 8912996ffadSFam Zheng } else if (s) { 8922996ffadSFam Zheng s->locked_perm |= bit; 893244a5668SFam Zheng } 8942996ffadSFam Zheng } else if (unlock && (locked_perm & bit) && !(perm_lock_bits & bit)) { 895d0a96155SMax Reitz ret = qemu_unlock_fd(fd, off, 1); 896244a5668SFam Zheng if (ret) { 897797e3e38SDavid Edmondson error_setg_errno(errp, -ret, "Failed to unlock byte %d", off); 898244a5668SFam Zheng return ret; 8992996ffadSFam Zheng } else if (s) { 9002996ffadSFam Zheng s->locked_perm &= ~bit; 901244a5668SFam Zheng } 902244a5668SFam Zheng } 903244a5668SFam Zheng } 904244a5668SFam Zheng PERM_FOREACH(i) { 905244a5668SFam Zheng int off = RAW_LOCK_SHARED_BASE + i; 9062996ffadSFam Zheng uint64_t bit = (1ULL << i); 9072996ffadSFam Zheng if ((shared_perm_lock_bits & bit) && !(locked_shared_perm & bit)) { 908d0a96155SMax Reitz ret = qemu_lock_fd(fd, off, 1, false); 909244a5668SFam Zheng if (ret) { 910797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret, "Failed to lock byte %d", 911797e3e38SDavid Edmondson off); 912244a5668SFam Zheng return ret; 9132996ffadSFam Zheng } else if (s) { 9142996ffadSFam Zheng s->locked_shared_perm |= bit; 915244a5668SFam Zheng } 9162996ffadSFam Zheng } else if (unlock && (locked_shared_perm & bit) && 9172996ffadSFam Zheng !(shared_perm_lock_bits & bit)) { 918d0a96155SMax Reitz ret = qemu_unlock_fd(fd, off, 1); 919244a5668SFam Zheng if (ret) { 920797e3e38SDavid Edmondson error_setg_errno(errp, -ret, "Failed to unlock byte %d", off); 921244a5668SFam Zheng return ret; 9222996ffadSFam Zheng } else if (s) { 9232996ffadSFam Zheng s->locked_shared_perm &= ~bit; 924244a5668SFam Zheng } 925244a5668SFam Zheng } 926244a5668SFam Zheng } 927244a5668SFam Zheng return 0; 928244a5668SFam Zheng } 929244a5668SFam Zheng 930244a5668SFam Zheng /* Check "unshared" bytes implied by @perm and ~@shared_perm in the file. */ 931d0a96155SMax Reitz static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm, 932244a5668SFam Zheng Error **errp) 933244a5668SFam Zheng { 934244a5668SFam Zheng int ret; 935244a5668SFam Zheng int i; 936244a5668SFam Zheng 937244a5668SFam Zheng PERM_FOREACH(i) { 938244a5668SFam Zheng int off = RAW_LOCK_SHARED_BASE + i; 939244a5668SFam Zheng uint64_t p = 1ULL << i; 940244a5668SFam Zheng if (perm & p) { 941d0a96155SMax Reitz ret = qemu_lock_fd_test(fd, off, 1, true); 942244a5668SFam Zheng if (ret) { 943244a5668SFam Zheng char *perm_name = bdrv_perm_names(p); 944797e3e38SDavid Edmondson 945797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret, 946244a5668SFam Zheng "Failed to get \"%s\" lock", 947244a5668SFam Zheng perm_name); 948244a5668SFam Zheng g_free(perm_name); 949244a5668SFam Zheng return ret; 950244a5668SFam Zheng } 951244a5668SFam Zheng } 952244a5668SFam Zheng } 953244a5668SFam Zheng PERM_FOREACH(i) { 954244a5668SFam Zheng int off = RAW_LOCK_PERM_BASE + i; 955244a5668SFam Zheng uint64_t p = 1ULL << i; 956244a5668SFam Zheng if (!(shared_perm & p)) { 957d0a96155SMax Reitz ret = qemu_lock_fd_test(fd, off, 1, true); 958244a5668SFam Zheng if (ret) { 959244a5668SFam Zheng char *perm_name = bdrv_perm_names(p); 960797e3e38SDavid Edmondson 961797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret, 962244a5668SFam Zheng "Failed to get shared \"%s\" lock", 963244a5668SFam Zheng perm_name); 964244a5668SFam Zheng g_free(perm_name); 965244a5668SFam Zheng return ret; 966244a5668SFam Zheng } 967244a5668SFam Zheng } 968244a5668SFam Zheng } 969244a5668SFam Zheng return 0; 970244a5668SFam Zheng } 971244a5668SFam Zheng 972244a5668SFam Zheng static int raw_handle_perm_lock(BlockDriverState *bs, 973244a5668SFam Zheng RawPermLockOp op, 974244a5668SFam Zheng uint64_t new_perm, uint64_t new_shared, 975244a5668SFam Zheng Error **errp) 976244a5668SFam Zheng { 977244a5668SFam Zheng BDRVRawState *s = bs->opaque; 978244a5668SFam Zheng int ret = 0; 979244a5668SFam Zheng Error *local_err = NULL; 980244a5668SFam Zheng 981244a5668SFam Zheng if (!s->use_lock) { 982244a5668SFam Zheng return 0; 983244a5668SFam Zheng } 984244a5668SFam Zheng 985244a5668SFam Zheng if (bdrv_get_flags(bs) & BDRV_O_INACTIVE) { 986244a5668SFam Zheng return 0; 987244a5668SFam Zheng } 988244a5668SFam Zheng 989244a5668SFam Zheng switch (op) { 990244a5668SFam Zheng case RAW_PL_PREPARE: 991696aaaedSVladimir Sementsov-Ogievskiy if ((s->perm | new_perm) == s->perm && 992696aaaedSVladimir Sementsov-Ogievskiy (s->shared_perm & new_shared) == s->shared_perm) 993696aaaedSVladimir Sementsov-Ogievskiy { 994696aaaedSVladimir Sementsov-Ogievskiy /* 995696aaaedSVladimir Sementsov-Ogievskiy * We are going to unlock bytes, it should not fail. If it fail due 996696aaaedSVladimir Sementsov-Ogievskiy * to some fs-dependent permission-unrelated reasons (which occurs 997696aaaedSVladimir Sementsov-Ogievskiy * sometimes on NFS and leads to abort in bdrv_replace_child) we 998696aaaedSVladimir Sementsov-Ogievskiy * can't prevent such errors by any check here. And we ignore them 999696aaaedSVladimir Sementsov-Ogievskiy * anyway in ABORT and COMMIT. 1000696aaaedSVladimir Sementsov-Ogievskiy */ 1001696aaaedSVladimir Sementsov-Ogievskiy return 0; 1002696aaaedSVladimir Sementsov-Ogievskiy } 1003f2e3af29SFam Zheng ret = raw_apply_lock_bytes(s, s->fd, s->perm | new_perm, 1004244a5668SFam Zheng ~s->shared_perm | ~new_shared, 1005244a5668SFam Zheng false, errp); 1006244a5668SFam Zheng if (!ret) { 1007f2e3af29SFam Zheng ret = raw_check_lock_bytes(s->fd, new_perm, new_shared, errp); 1008244a5668SFam Zheng if (!ret) { 1009244a5668SFam Zheng return 0; 1010244a5668SFam Zheng } 1011b857431dSFam Zheng error_append_hint(errp, 1012b857431dSFam Zheng "Is another process using the image [%s]?\n", 1013b857431dSFam Zheng bs->filename); 1014244a5668SFam Zheng } 1015244a5668SFam Zheng /* fall through to unlock bytes. */ 1016244a5668SFam Zheng case RAW_PL_ABORT: 1017f2e3af29SFam Zheng raw_apply_lock_bytes(s, s->fd, s->perm, ~s->shared_perm, 1018d0a96155SMax Reitz true, &local_err); 1019244a5668SFam Zheng if (local_err) { 1020244a5668SFam Zheng /* Theoretically the above call only unlocks bytes and it cannot 1021244a5668SFam Zheng * fail. Something weird happened, report it. 1022244a5668SFam Zheng */ 1023db0754dfSFam Zheng warn_report_err(local_err); 1024244a5668SFam Zheng } 1025244a5668SFam Zheng break; 1026244a5668SFam Zheng case RAW_PL_COMMIT: 1027f2e3af29SFam Zheng raw_apply_lock_bytes(s, s->fd, new_perm, ~new_shared, 1028d0a96155SMax Reitz true, &local_err); 1029244a5668SFam Zheng if (local_err) { 1030244a5668SFam Zheng /* Theoretically the above call only unlocks bytes and it cannot 1031244a5668SFam Zheng * fail. Something weird happened, report it. 1032244a5668SFam Zheng */ 1033db0754dfSFam Zheng warn_report_err(local_err); 1034244a5668SFam Zheng } 1035244a5668SFam Zheng break; 1036244a5668SFam Zheng } 1037244a5668SFam Zheng return ret; 1038244a5668SFam Zheng } 1039244a5668SFam Zheng 1040ad24b679SMarc-André Lureau /* Sets a specific flag */ 1041ad24b679SMarc-André Lureau static int fcntl_setfl(int fd, int flag) 1042ad24b679SMarc-André Lureau { 1043ad24b679SMarc-André Lureau int flags; 1044ad24b679SMarc-André Lureau 1045ad24b679SMarc-André Lureau flags = fcntl(fd, F_GETFL); 1046ad24b679SMarc-André Lureau if (flags == -1) { 1047ad24b679SMarc-André Lureau return -errno; 1048ad24b679SMarc-André Lureau } 1049ad24b679SMarc-André Lureau if (fcntl(fd, F_SETFL, flags | flag) == -1) { 1050ad24b679SMarc-André Lureau return -errno; 1051ad24b679SMarc-André Lureau } 1052ad24b679SMarc-André Lureau return 0; 1053ad24b679SMarc-André Lureau } 1054ad24b679SMarc-André Lureau 10555cec2870SKevin Wolf static int raw_reconfigure_getfd(BlockDriverState *bs, int flags, 105623dece19SKevin Wolf int *open_flags, uint64_t perm, bool force_dup, 10576ceabe6fSKevin Wolf Error **errp) 10585cec2870SKevin Wolf { 10595cec2870SKevin Wolf BDRVRawState *s = bs->opaque; 10605cec2870SKevin Wolf int fd = -1; 10615cec2870SKevin Wolf int ret; 106223dece19SKevin Wolf bool has_writers = perm & 106323dece19SKevin Wolf (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_RESIZE); 10645cec2870SKevin Wolf int fcntl_flags = O_APPEND | O_NONBLOCK; 10655cec2870SKevin Wolf #ifdef O_NOATIME 10665cec2870SKevin Wolf fcntl_flags |= O_NOATIME; 10675cec2870SKevin Wolf #endif 10685cec2870SKevin Wolf 10695cec2870SKevin Wolf *open_flags = 0; 10705cec2870SKevin Wolf if (s->type == FTYPE_CD) { 10715cec2870SKevin Wolf *open_flags |= O_NONBLOCK; 10725cec2870SKevin Wolf } 10735cec2870SKevin Wolf 107423dece19SKevin Wolf raw_parse_flags(flags, open_flags, has_writers); 10755cec2870SKevin Wolf 10765cec2870SKevin Wolf #ifdef O_ASYNC 10775cec2870SKevin Wolf /* Not all operating systems have O_ASYNC, and those that don't 10785cec2870SKevin Wolf * will not let us track the state into rs->open_flags (typically 10795cec2870SKevin Wolf * you achieve the same effect with an ioctl, for example I_SETSIG 10805cec2870SKevin Wolf * on Solaris). But we do not use O_ASYNC, so that's fine. 10815cec2870SKevin Wolf */ 10825cec2870SKevin Wolf assert((s->open_flags & O_ASYNC) == 0); 10835cec2870SKevin Wolf #endif 10845cec2870SKevin Wolf 10856ceabe6fSKevin Wolf if (!force_dup && *open_flags == s->open_flags) { 10866ceabe6fSKevin Wolf /* We're lucky, the existing fd is fine */ 10876ceabe6fSKevin Wolf return s->fd; 10886ceabe6fSKevin Wolf } 10896ceabe6fSKevin Wolf 10905cec2870SKevin Wolf if ((*open_flags & ~fcntl_flags) == (s->open_flags & ~fcntl_flags)) { 10915cec2870SKevin Wolf /* dup the original fd */ 10925cec2870SKevin Wolf fd = qemu_dup(s->fd); 10935cec2870SKevin Wolf if (fd >= 0) { 10945cec2870SKevin Wolf ret = fcntl_setfl(fd, *open_flags); 10955cec2870SKevin Wolf if (ret) { 10965cec2870SKevin Wolf qemu_close(fd); 10975cec2870SKevin Wolf fd = -1; 10985cec2870SKevin Wolf } 10995cec2870SKevin Wolf } 11005cec2870SKevin Wolf } 11015cec2870SKevin Wolf 1102b18a24a9SDaniel P. Berrangé /* If we cannot use fcntl, or fcntl failed, fall back to qemu_open() */ 11035cec2870SKevin Wolf if (fd == -1) { 11045cec2870SKevin Wolf const char *normalized_filename = bs->filename; 11055cec2870SKevin Wolf ret = raw_normalize_devicepath(&normalized_filename, errp); 11065cec2870SKevin Wolf if (ret >= 0) { 1107b18a24a9SDaniel P. Berrangé fd = qemu_open(normalized_filename, *open_flags, errp); 11085cec2870SKevin Wolf if (fd == -1) { 11095cec2870SKevin Wolf return -1; 11105cec2870SKevin Wolf } 11115cec2870SKevin Wolf } 11125cec2870SKevin Wolf } 11135cec2870SKevin Wolf 1114bca5283bSKevin Wolf if (fd != -1 && (*open_flags & O_RDWR)) { 1115bca5283bSKevin Wolf ret = check_hdev_writable(fd); 1116bca5283bSKevin Wolf if (ret < 0) { 1117bca5283bSKevin Wolf qemu_close(fd); 1118bca5283bSKevin Wolf error_setg_errno(errp, -ret, "The device is not writable"); 1119bca5283bSKevin Wolf return -1; 1120bca5283bSKevin Wolf } 1121bca5283bSKevin Wolf } 1122bca5283bSKevin Wolf 11235cec2870SKevin Wolf return fd; 11245cec2870SKevin Wolf } 11255cec2870SKevin Wolf 1126c1bb86cdSEric Blake static int raw_reopen_prepare(BDRVReopenState *state, 1127c1bb86cdSEric Blake BlockReopenQueue *queue, Error **errp) 1128c1bb86cdSEric Blake { 1129c1bb86cdSEric Blake BDRVRawState *s; 1130c1bb86cdSEric Blake BDRVRawReopenState *rs; 113131be8a2aSStefan Hajnoczi QemuOpts *opts; 1132a6aeca0cSKevin Wolf int ret; 1133c1bb86cdSEric Blake 1134c1bb86cdSEric Blake assert(state != NULL); 1135c1bb86cdSEric Blake assert(state->bs != NULL); 1136c1bb86cdSEric Blake 1137c1bb86cdSEric Blake s = state->bs->opaque; 1138c1bb86cdSEric Blake 1139c1bb86cdSEric Blake state->opaque = g_new0(BDRVRawReopenState, 1); 1140c1bb86cdSEric Blake rs = state->opaque; 114131be8a2aSStefan Hajnoczi 114231be8a2aSStefan Hajnoczi /* Handle options changes */ 114331be8a2aSStefan Hajnoczi opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort); 1144af175e85SMarkus Armbruster if (!qemu_opts_absorb_qdict(opts, state->options, errp)) { 114531be8a2aSStefan Hajnoczi ret = -EINVAL; 114631be8a2aSStefan Hajnoczi goto out; 114731be8a2aSStefan Hajnoczi } 114831be8a2aSStefan Hajnoczi 1149f357fcd8SStefan Hajnoczi rs->drop_cache = qemu_opt_get_bool_del(opts, "drop-cache", true); 11508d324575SAlberto Garcia rs->check_cache_dropped = 11518d324575SAlberto Garcia qemu_opt_get_bool_del(opts, "x-check-cache-dropped", false); 11528d324575SAlberto Garcia 11538d324575SAlberto Garcia /* This driver's reopen function doesn't currently allow changing 11548d324575SAlberto Garcia * other options, so let's put them back in the original QDict and 11558d324575SAlberto Garcia * bdrv_reopen_prepare() will detect changes and complain. */ 11568d324575SAlberto Garcia qemu_opts_to_qdict(opts, state->options); 1157c1bb86cdSEric Blake 115872373e40SVladimir Sementsov-Ogievskiy /* 115972373e40SVladimir Sementsov-Ogievskiy * As part of reopen prepare we also want to create new fd by 116072373e40SVladimir Sementsov-Ogievskiy * raw_reconfigure_getfd(). But it wants updated "perm", when in 116172373e40SVladimir Sementsov-Ogievskiy * bdrv_reopen_multiple() .bdrv_reopen_prepare() callback called prior to 116272373e40SVladimir Sementsov-Ogievskiy * permission update. Happily, permission update is always a part (a seprate 116372373e40SVladimir Sementsov-Ogievskiy * stage) of bdrv_reopen_multiple() so we can rely on this fact and 116472373e40SVladimir Sementsov-Ogievskiy * reconfigure fd in raw_check_perm(). 116572373e40SVladimir Sementsov-Ogievskiy */ 1166c1bb86cdSEric Blake 1167e0c9cf3aSKevin Wolf s->reopen_state = state; 1168a6aeca0cSKevin Wolf ret = 0; 116972373e40SVladimir Sementsov-Ogievskiy 117031be8a2aSStefan Hajnoczi out: 117131be8a2aSStefan Hajnoczi qemu_opts_del(opts); 1172c1bb86cdSEric Blake return ret; 1173c1bb86cdSEric Blake } 1174c1bb86cdSEric Blake 1175c1bb86cdSEric Blake static void raw_reopen_commit(BDRVReopenState *state) 1176c1bb86cdSEric Blake { 1177c1bb86cdSEric Blake BDRVRawReopenState *rs = state->opaque; 1178c1bb86cdSEric Blake BDRVRawState *s = state->bs->opaque; 1179c1bb86cdSEric Blake 1180f357fcd8SStefan Hajnoczi s->drop_cache = rs->drop_cache; 118131be8a2aSStefan Hajnoczi s->check_cache_dropped = rs->check_cache_dropped; 1182c1bb86cdSEric Blake s->open_flags = rs->open_flags; 1183c1bb86cdSEric Blake g_free(state->opaque); 1184c1bb86cdSEric Blake state->opaque = NULL; 1185e0c9cf3aSKevin Wolf 1186e0c9cf3aSKevin Wolf assert(s->reopen_state == state); 1187e0c9cf3aSKevin Wolf s->reopen_state = NULL; 1188c1bb86cdSEric Blake } 1189c1bb86cdSEric Blake 1190c1bb86cdSEric Blake 1191c1bb86cdSEric Blake static void raw_reopen_abort(BDRVReopenState *state) 1192c1bb86cdSEric Blake { 1193c1bb86cdSEric Blake BDRVRawReopenState *rs = state->opaque; 1194e0c9cf3aSKevin Wolf BDRVRawState *s = state->bs->opaque; 1195c1bb86cdSEric Blake 1196c1bb86cdSEric Blake /* nothing to do if NULL, we didn't get far enough */ 1197c1bb86cdSEric Blake if (rs == NULL) { 1198c1bb86cdSEric Blake return; 1199c1bb86cdSEric Blake } 1200c1bb86cdSEric Blake 1201c1bb86cdSEric Blake g_free(state->opaque); 1202c1bb86cdSEric Blake state->opaque = NULL; 1203e0c9cf3aSKevin Wolf 1204e0c9cf3aSKevin Wolf assert(s->reopen_state == state); 1205e0c9cf3aSKevin Wolf s->reopen_state = NULL; 1206c1bb86cdSEric Blake } 1207c1bb86cdSEric Blake 120818473467SPaolo Bonzini static int hdev_get_max_hw_transfer(int fd, struct stat *st) 1209c1bb86cdSEric Blake { 1210c1bb86cdSEric Blake #ifdef BLKSECTGET 121118473467SPaolo Bonzini if (S_ISBLK(st->st_mode)) { 121218473467SPaolo Bonzini unsigned short max_sectors = 0; 121318473467SPaolo Bonzini if (ioctl(fd, BLKSECTGET, &max_sectors) == 0) { 121418473467SPaolo Bonzini return max_sectors * 512; 121518473467SPaolo Bonzini } 121618473467SPaolo Bonzini } else { 121748265250SEric Farman int max_bytes = 0; 1218867eccfeSMaxim Levitsky if (ioctl(fd, BLKSECTGET, &max_bytes) == 0) { 121948265250SEric Farman return max_bytes; 1220c1bb86cdSEric Blake } 122118473467SPaolo Bonzini } 122218473467SPaolo Bonzini return -errno; 1223c1bb86cdSEric Blake #else 1224c1bb86cdSEric Blake return -ENOSYS; 1225c1bb86cdSEric Blake #endif 1226c1bb86cdSEric Blake } 1227c1bb86cdSEric Blake 1228a735b56eSSam Li /* 1229a735b56eSSam Li * Get a sysfs attribute value as character string. 1230a735b56eSSam Li */ 1231a735b56eSSam Li #ifdef CONFIG_LINUX 1232a735b56eSSam Li static int get_sysfs_str_val(struct stat *st, const char *attribute, 1233a735b56eSSam Li char **val) { 1234a735b56eSSam Li g_autofree char *sysfspath = NULL; 1235a735b56eSSam Li size_t len; 1236a735b56eSSam Li 1237a735b56eSSam Li if (!S_ISBLK(st->st_mode)) { 1238a735b56eSSam Li return -ENOTSUP; 1239a735b56eSSam Li } 1240a735b56eSSam Li 1241a735b56eSSam Li sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/%s", 1242a735b56eSSam Li major(st->st_rdev), minor(st->st_rdev), 1243a735b56eSSam Li attribute); 124429a242e1SSam Li if (!g_file_get_contents(sysfspath, val, &len, NULL)) { 1245a735b56eSSam Li return -ENOENT; 1246a735b56eSSam Li } 1247a735b56eSSam Li 1248a735b56eSSam Li /* The file is ended with '\n' */ 1249a735b56eSSam Li char *p; 1250a735b56eSSam Li p = *val; 1251a735b56eSSam Li if (*(p + len - 1) == '\n') { 1252a735b56eSSam Li *(p + len - 1) = '\0'; 1253a735b56eSSam Li } 125429a242e1SSam Li return 0; 1255a735b56eSSam Li } 1256a735b56eSSam Li #endif 1257a735b56eSSam Li 12586d43eaa3SSam Li #if defined(CONFIG_BLKZONED) 1259a735b56eSSam Li static int get_sysfs_zoned_model(struct stat *st, BlockZoneModel *zoned) 1260a735b56eSSam Li { 1261a735b56eSSam Li g_autofree char *val = NULL; 1262a735b56eSSam Li int ret; 1263a735b56eSSam Li 1264a735b56eSSam Li ret = get_sysfs_str_val(st, "zoned", &val); 1265a735b56eSSam Li if (ret < 0) { 1266a735b56eSSam Li return ret; 1267a735b56eSSam Li } 1268a735b56eSSam Li 1269a735b56eSSam Li if (strcmp(val, "host-managed") == 0) { 1270a735b56eSSam Li *zoned = BLK_Z_HM; 1271a735b56eSSam Li } else if (strcmp(val, "host-aware") == 0) { 1272a735b56eSSam Li *zoned = BLK_Z_HA; 1273a735b56eSSam Li } else if (strcmp(val, "none") == 0) { 1274a735b56eSSam Li *zoned = BLK_Z_NONE; 1275a735b56eSSam Li } else { 1276a735b56eSSam Li return -ENOTSUP; 1277a735b56eSSam Li } 1278a735b56eSSam Li return 0; 1279a735b56eSSam Li } 12806d43eaa3SSam Li #endif /* defined(CONFIG_BLKZONED) */ 1281a735b56eSSam Li 1282a735b56eSSam Li /* 1283a735b56eSSam Li * Get a sysfs attribute value as a long integer. 1284a735b56eSSam Li */ 1285a735b56eSSam Li #ifdef CONFIG_LINUX 1286a735b56eSSam Li static long get_sysfs_long_val(struct stat *st, const char *attribute) 1287a735b56eSSam Li { 1288a735b56eSSam Li g_autofree char *str = NULL; 1289a735b56eSSam Li const char *end; 1290a735b56eSSam Li long val; 1291a735b56eSSam Li int ret; 1292a735b56eSSam Li 1293a735b56eSSam Li ret = get_sysfs_str_val(st, attribute, &str); 1294a735b56eSSam Li if (ret < 0) { 1295a735b56eSSam Li return ret; 1296a735b56eSSam Li } 1297a735b56eSSam Li 1298a735b56eSSam Li /* The file is ended with '\n', pass 'end' to accept that. */ 1299a735b56eSSam Li ret = qemu_strtol(str, &end, 10, &val); 1300a735b56eSSam Li if (ret == 0 && end && *end == '\0') { 1301a735b56eSSam Li ret = val; 1302a735b56eSSam Li } 1303a735b56eSSam Li return ret; 1304a735b56eSSam Li } 1305a735b56eSSam Li #endif 1306a735b56eSSam Li 130718473467SPaolo Bonzini static int hdev_get_max_segments(int fd, struct stat *st) 13089103f1ceSFam Zheng { 13099103f1ceSFam Zheng #ifdef CONFIG_LINUX 13109103f1ceSFam Zheng int ret; 1311867eccfeSMaxim Levitsky 131218473467SPaolo Bonzini if (S_ISCHR(st->st_mode)) { 13138ad5ab61SPaolo Bonzini if (ioctl(fd, SG_GET_SG_TABLESIZE, &ret) == 0) { 13148ad5ab61SPaolo Bonzini return ret; 13158ad5ab61SPaolo Bonzini } 13168ad5ab61SPaolo Bonzini return -ENOTSUP; 13178ad5ab61SPaolo Bonzini } 1318a735b56eSSam Li return get_sysfs_long_val(st, "max_segments"); 13199103f1ceSFam Zheng #else 13209103f1ceSFam Zheng return -ENOTSUP; 13219103f1ceSFam Zheng #endif 13229103f1ceSFam Zheng } 13239103f1ceSFam Zheng 13246d43eaa3SSam Li #if defined(CONFIG_BLKZONED) 1325a3c41f06SSam Li /* 1326a3c41f06SSam Li * If the reset_all flag is true, then the wps of zone whose state is 1327a3c41f06SSam Li * not readonly or offline should be all reset to the start sector. 1328a3c41f06SSam Li * Else, take the real wp of the device. 1329a3c41f06SSam Li */ 1330a3c41f06SSam Li static int get_zones_wp(BlockDriverState *bs, int fd, int64_t offset, 1331a3c41f06SSam Li unsigned int nrz, bool reset_all) 1332a3c41f06SSam Li { 1333a3c41f06SSam Li struct blk_zone *blkz; 1334a3c41f06SSam Li size_t rep_size; 1335a3c41f06SSam Li uint64_t sector = offset >> BDRV_SECTOR_BITS; 1336a3c41f06SSam Li BlockZoneWps *wps = bs->wps; 1337a3c41f06SSam Li unsigned int j = offset / bs->bl.zone_size; 1338a3c41f06SSam Li unsigned int n = 0, i = 0; 1339a3c41f06SSam Li int ret; 1340a3c41f06SSam Li rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone); 1341a3c41f06SSam Li g_autofree struct blk_zone_report *rep = NULL; 1342a3c41f06SSam Li 1343a3c41f06SSam Li rep = g_malloc(rep_size); 1344a3c41f06SSam Li blkz = (struct blk_zone *)(rep + 1); 1345a3c41f06SSam Li while (n < nrz) { 1346a3c41f06SSam Li memset(rep, 0, rep_size); 1347a3c41f06SSam Li rep->sector = sector; 1348a3c41f06SSam Li rep->nr_zones = nrz - n; 1349a3c41f06SSam Li 1350a3c41f06SSam Li do { 1351a3c41f06SSam Li ret = ioctl(fd, BLKREPORTZONE, rep); 1352a3c41f06SSam Li } while (ret != 0 && errno == EINTR); 1353a3c41f06SSam Li if (ret != 0) { 1354a3c41f06SSam Li error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d", 1355a3c41f06SSam Li fd, offset, errno); 1356a3c41f06SSam Li return -errno; 1357a3c41f06SSam Li } 1358a3c41f06SSam Li 1359a3c41f06SSam Li if (!rep->nr_zones) { 1360a3c41f06SSam Li break; 1361a3c41f06SSam Li } 1362a3c41f06SSam Li 1363a3c41f06SSam Li for (i = 0; i < rep->nr_zones; ++i, ++n, ++j) { 1364a3c41f06SSam Li /* 1365a3c41f06SSam Li * The wp tracking cares only about sequential writes required and 1366a3c41f06SSam Li * sequential write preferred zones so that the wp can advance to 1367a3c41f06SSam Li * the right location. 1368a3c41f06SSam Li * Use the most significant bit of the wp location to indicate the 1369a3c41f06SSam Li * zone type: 0 for SWR/SWP zones and 1 for conventional zones. 1370a3c41f06SSam Li */ 1371a3c41f06SSam Li if (blkz[i].type == BLK_ZONE_TYPE_CONVENTIONAL) { 1372a3c41f06SSam Li wps->wp[j] |= 1ULL << 63; 1373a3c41f06SSam Li } else { 1374a3c41f06SSam Li switch(blkz[i].cond) { 1375a3c41f06SSam Li case BLK_ZONE_COND_FULL: 1376a3c41f06SSam Li case BLK_ZONE_COND_READONLY: 1377a3c41f06SSam Li /* Zone not writable */ 1378a3c41f06SSam Li wps->wp[j] = (blkz[i].start + blkz[i].len) << BDRV_SECTOR_BITS; 1379a3c41f06SSam Li break; 1380a3c41f06SSam Li case BLK_ZONE_COND_OFFLINE: 1381a3c41f06SSam Li /* Zone not writable nor readable */ 1382a3c41f06SSam Li wps->wp[j] = (blkz[i].start) << BDRV_SECTOR_BITS; 1383a3c41f06SSam Li break; 1384a3c41f06SSam Li default: 1385a3c41f06SSam Li if (reset_all) { 1386a3c41f06SSam Li wps->wp[j] = blkz[i].start << BDRV_SECTOR_BITS; 1387a3c41f06SSam Li } else { 1388a3c41f06SSam Li wps->wp[j] = blkz[i].wp << BDRV_SECTOR_BITS; 1389a3c41f06SSam Li } 1390a3c41f06SSam Li break; 1391a3c41f06SSam Li } 1392a3c41f06SSam Li } 1393a3c41f06SSam Li } 1394a3c41f06SSam Li sector = blkz[i - 1].start + blkz[i - 1].len; 1395a3c41f06SSam Li } 1396a3c41f06SSam Li 1397a3c41f06SSam Li return 0; 1398a3c41f06SSam Li } 1399a3c41f06SSam Li 1400a3c41f06SSam Li static void update_zones_wp(BlockDriverState *bs, int fd, int64_t offset, 1401a3c41f06SSam Li unsigned int nrz) 1402a3c41f06SSam Li { 1403a3c41f06SSam Li if (get_zones_wp(bs, fd, offset, nrz, 0) < 0) { 1404a3c41f06SSam Li error_report("update zone wp failed"); 1405a3c41f06SSam Li } 1406a3c41f06SSam Li } 1407a3c41f06SSam Li 1408a735b56eSSam Li static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, 1409a735b56eSSam Li Error **errp) 1410a735b56eSSam Li { 1411a3c41f06SSam Li BDRVRawState *s = bs->opaque; 1412a735b56eSSam Li BlockZoneModel zoned; 1413a735b56eSSam Li int ret; 1414a735b56eSSam Li 1415a735b56eSSam Li bs->bl.zoned = BLK_Z_NONE; 1416a735b56eSSam Li 1417a735b56eSSam Li ret = get_sysfs_zoned_model(st, &zoned); 1418a735b56eSSam Li if (ret < 0 || zoned == BLK_Z_NONE) { 1419a735b56eSSam Li return; 1420a735b56eSSam Li } 1421a735b56eSSam Li bs->bl.zoned = zoned; 14226d43eaa3SSam Li 14236d43eaa3SSam Li ret = get_sysfs_long_val(st, "max_open_zones"); 14246d43eaa3SSam Li if (ret >= 0) { 14256d43eaa3SSam Li bs->bl.max_open_zones = ret; 1426a735b56eSSam Li } 1427a735b56eSSam Li 14286d43eaa3SSam Li ret = get_sysfs_long_val(st, "max_active_zones"); 14296d43eaa3SSam Li if (ret >= 0) { 14306d43eaa3SSam Li bs->bl.max_active_zones = ret; 14316d43eaa3SSam Li } 14326d43eaa3SSam Li 14336d43eaa3SSam Li /* 14346d43eaa3SSam Li * The zoned device must at least have zone size and nr_zones fields. 14356d43eaa3SSam Li */ 14366d43eaa3SSam Li ret = get_sysfs_long_val(st, "chunk_sectors"); 14376d43eaa3SSam Li if (ret < 0) { 14386d43eaa3SSam Li error_setg_errno(errp, -ret, "Unable to read chunk_sectors " 14396d43eaa3SSam Li "sysfs attribute"); 14406d43eaa3SSam Li return; 14416d43eaa3SSam Li } else if (!ret) { 14426d43eaa3SSam Li error_setg(errp, "Read 0 from chunk_sectors sysfs attribute"); 14436d43eaa3SSam Li return; 14446d43eaa3SSam Li } 14456d43eaa3SSam Li bs->bl.zone_size = ret << BDRV_SECTOR_BITS; 14466d43eaa3SSam Li 14476d43eaa3SSam Li ret = get_sysfs_long_val(st, "nr_zones"); 14486d43eaa3SSam Li if (ret < 0) { 14496d43eaa3SSam Li error_setg_errno(errp, -ret, "Unable to read nr_zones " 14506d43eaa3SSam Li "sysfs attribute"); 14516d43eaa3SSam Li return; 14526d43eaa3SSam Li } else if (!ret) { 14536d43eaa3SSam Li error_setg(errp, "Read 0 from nr_zones sysfs attribute"); 14546d43eaa3SSam Li return; 14556d43eaa3SSam Li } 14566d43eaa3SSam Li bs->bl.nr_zones = ret; 14576d43eaa3SSam Li 14586d43eaa3SSam Li ret = get_sysfs_long_val(st, "zone_append_max_bytes"); 14596d43eaa3SSam Li if (ret > 0) { 14606d43eaa3SSam Li bs->bl.max_append_sectors = ret >> BDRV_SECTOR_BITS; 14616d43eaa3SSam Li } 1462a3c41f06SSam Li 1463a3c41f06SSam Li ret = get_sysfs_long_val(st, "physical_block_size"); 1464a3c41f06SSam Li if (ret >= 0) { 1465a3c41f06SSam Li bs->bl.write_granularity = ret; 1466a3c41f06SSam Li } 1467a3c41f06SSam Li 1468a3c41f06SSam Li /* The refresh_limits() function can be called multiple times. */ 1469a3c41f06SSam Li g_free(bs->wps); 1470a3c41f06SSam Li bs->wps = g_malloc(sizeof(BlockZoneWps) + 1471a3c41f06SSam Li sizeof(int64_t) * bs->bl.nr_zones); 1472a3c41f06SSam Li ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 0); 1473a3c41f06SSam Li if (ret < 0) { 1474a3c41f06SSam Li error_setg_errno(errp, -ret, "report wps failed"); 1475a3c41f06SSam Li bs->wps = NULL; 1476a3c41f06SSam Li return; 1477a3c41f06SSam Li } 1478a3c41f06SSam Li qemu_co_mutex_init(&bs->wps->colock); 14796d43eaa3SSam Li } 14806d43eaa3SSam Li #else /* !defined(CONFIG_BLKZONED) */ 14816d43eaa3SSam Li static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, 14826d43eaa3SSam Li Error **errp) 14836d43eaa3SSam Li { 14846d43eaa3SSam Li bs->bl.zoned = BLK_Z_NONE; 14856d43eaa3SSam Li } 14866d43eaa3SSam Li #endif /* !defined(CONFIG_BLKZONED) */ 14876d43eaa3SSam Li 1488c1bb86cdSEric Blake static void raw_refresh_limits(BlockDriverState *bs, Error **errp) 1489c1bb86cdSEric Blake { 1490c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 149118473467SPaolo Bonzini struct stat st; 1492c1bb86cdSEric Blake 14935dbd0ce1SKevin Wolf s->needs_alignment = raw_needs_alignment(bs); 1494c1bb86cdSEric Blake raw_probe_alignment(bs, s->fd, errp); 14955dbd0ce1SKevin Wolf 1496c1bb86cdSEric Blake bs->bl.min_mem_alignment = s->buf_align; 14978e3b0cbbSMarc-André Lureau bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size()); 149818473467SPaolo Bonzini 149918473467SPaolo Bonzini /* 150018473467SPaolo Bonzini * Maximum transfers are best effort, so it is okay to ignore any 150118473467SPaolo Bonzini * errors. That said, based on the man page errors in fstat would be 150218473467SPaolo Bonzini * very much unexpected; the only possible case seems to be ENOMEM. 150318473467SPaolo Bonzini */ 150418473467SPaolo Bonzini if (fstat(s->fd, &st)) { 150518473467SPaolo Bonzini return; 150618473467SPaolo Bonzini } 150718473467SPaolo Bonzini 15080dfc7af2SAkihiko Odaki #if defined(__APPLE__) && (__MACH__) 15090dfc7af2SAkihiko Odaki struct statfs buf; 15100dfc7af2SAkihiko Odaki 15110dfc7af2SAkihiko Odaki if (!fstatfs(s->fd, &buf)) { 15120dfc7af2SAkihiko Odaki bs->bl.opt_transfer = buf.f_iosize; 15130dfc7af2SAkihiko Odaki bs->bl.pdiscard_alignment = buf.f_bsize; 15140dfc7af2SAkihiko Odaki } 15150dfc7af2SAkihiko Odaki #endif 15160dfc7af2SAkihiko Odaki 1517006e1962SDenis V. Lunev if (bdrv_is_sg(bs) || S_ISBLK(st.st_mode)) { 151818473467SPaolo Bonzini int ret = hdev_get_max_hw_transfer(s->fd, &st); 151918473467SPaolo Bonzini 152018473467SPaolo Bonzini if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) { 152118473467SPaolo Bonzini bs->bl.max_hw_transfer = ret; 152218473467SPaolo Bonzini } 152318473467SPaolo Bonzini 152418473467SPaolo Bonzini ret = hdev_get_max_segments(s->fd, &st); 152518473467SPaolo Bonzini if (ret > 0) { 1526cc071629SPaolo Bonzini bs->bl.max_hw_iov = ret; 152718473467SPaolo Bonzini } 152818473467SPaolo Bonzini } 1529a735b56eSSam Li 1530a735b56eSSam Li raw_refresh_zoned_limits(bs, &st, errp); 1531c1bb86cdSEric Blake } 1532c1bb86cdSEric Blake 1533c1bb86cdSEric Blake static int check_for_dasd(int fd) 1534c1bb86cdSEric Blake { 1535c1bb86cdSEric Blake #ifdef BIODASDINFO2 1536c1bb86cdSEric Blake struct dasd_information2_t info = {0}; 1537c1bb86cdSEric Blake 1538c1bb86cdSEric Blake return ioctl(fd, BIODASDINFO2, &info); 1539c1bb86cdSEric Blake #else 1540c1bb86cdSEric Blake return -1; 1541c1bb86cdSEric Blake #endif 1542c1bb86cdSEric Blake } 1543c1bb86cdSEric Blake 1544c1bb86cdSEric Blake /** 1545c1bb86cdSEric Blake * Try to get @bs's logical and physical block size. 1546c1bb86cdSEric Blake * On success, store them in @bsz and return zero. 1547c1bb86cdSEric Blake * On failure, return negative errno. 1548c1bb86cdSEric Blake */ 1549c1bb86cdSEric Blake static int hdev_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz) 1550c1bb86cdSEric Blake { 1551c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 1552c1bb86cdSEric Blake int ret; 1553c1bb86cdSEric Blake 15546d43eaa3SSam Li /* If DASD or zoned devices, get blocksizes */ 1555c1bb86cdSEric Blake if (check_for_dasd(s->fd) < 0) { 15566d43eaa3SSam Li /* zoned devices are not DASD */ 15576d43eaa3SSam Li if (bs->bl.zoned == BLK_Z_NONE) { 1558c1bb86cdSEric Blake return -ENOTSUP; 1559c1bb86cdSEric Blake } 15606d43eaa3SSam Li } 1561c1bb86cdSEric Blake ret = probe_logical_blocksize(s->fd, &bsz->log); 1562c1bb86cdSEric Blake if (ret < 0) { 1563c1bb86cdSEric Blake return ret; 1564c1bb86cdSEric Blake } 1565c1bb86cdSEric Blake return probe_physical_blocksize(s->fd, &bsz->phys); 1566c1bb86cdSEric Blake } 1567c1bb86cdSEric Blake 1568c1bb86cdSEric Blake /** 1569c1bb86cdSEric Blake * Try to get @bs's geometry: cyls, heads, sectors. 1570c1bb86cdSEric Blake * On success, store them in @geo and return 0. 1571c1bb86cdSEric Blake * On failure return -errno. 1572c1bb86cdSEric Blake * (Allows block driver to assign default geometry values that guest sees) 1573c1bb86cdSEric Blake */ 1574c1bb86cdSEric Blake #ifdef __linux__ 1575c1bb86cdSEric Blake static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo) 1576c1bb86cdSEric Blake { 1577c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 1578c1bb86cdSEric Blake struct hd_geometry ioctl_geo = {0}; 1579c1bb86cdSEric Blake 1580c1bb86cdSEric Blake /* If DASD, get its geometry */ 1581c1bb86cdSEric Blake if (check_for_dasd(s->fd) < 0) { 1582c1bb86cdSEric Blake return -ENOTSUP; 1583c1bb86cdSEric Blake } 1584c1bb86cdSEric Blake if (ioctl(s->fd, HDIO_GETGEO, &ioctl_geo) < 0) { 1585c1bb86cdSEric Blake return -errno; 1586c1bb86cdSEric Blake } 1587c1bb86cdSEric Blake /* HDIO_GETGEO may return success even though geo contains zeros 1588c1bb86cdSEric Blake (e.g. certain multipath setups) */ 1589c1bb86cdSEric Blake if (!ioctl_geo.heads || !ioctl_geo.sectors || !ioctl_geo.cylinders) { 1590c1bb86cdSEric Blake return -ENOTSUP; 1591c1bb86cdSEric Blake } 1592c1bb86cdSEric Blake /* Do not return a geometry for partition */ 1593c1bb86cdSEric Blake if (ioctl_geo.start != 0) { 1594c1bb86cdSEric Blake return -ENOTSUP; 1595c1bb86cdSEric Blake } 1596c1bb86cdSEric Blake geo->heads = ioctl_geo.heads; 1597c1bb86cdSEric Blake geo->sectors = ioctl_geo.sectors; 1598c1bb86cdSEric Blake geo->cylinders = ioctl_geo.cylinders; 1599c1bb86cdSEric Blake 1600c1bb86cdSEric Blake return 0; 1601c1bb86cdSEric Blake } 1602c1bb86cdSEric Blake #else /* __linux__ */ 1603c1bb86cdSEric Blake static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo) 1604c1bb86cdSEric Blake { 1605c1bb86cdSEric Blake return -ENOTSUP; 1606c1bb86cdSEric Blake } 1607c1bb86cdSEric Blake #endif 1608c1bb86cdSEric Blake 160903425671SKevin Wolf #if defined(__linux__) 161003425671SKevin Wolf static int handle_aiocb_ioctl(void *opaque) 1611c1bb86cdSEric Blake { 161203425671SKevin Wolf RawPosixAIOData *aiocb = opaque; 1613c1bb86cdSEric Blake int ret; 1614c1bb86cdSEric Blake 161537b0b24eSNikita Ivanov ret = RETRY_ON_EINTR( 161637b0b24eSNikita Ivanov ioctl(aiocb->aio_fildes, aiocb->ioctl.cmd, aiocb->ioctl.buf) 161737b0b24eSNikita Ivanov ); 1618c1bb86cdSEric Blake if (ret == -1) { 1619c1bb86cdSEric Blake return -errno; 1620c1bb86cdSEric Blake } 1621c1bb86cdSEric Blake 1622c1bb86cdSEric Blake return 0; 1623c1bb86cdSEric Blake } 162403425671SKevin Wolf #endif /* linux */ 1625c1bb86cdSEric Blake 162606dc9bd5SKevin Wolf static int handle_aiocb_flush(void *opaque) 1627c1bb86cdSEric Blake { 162806dc9bd5SKevin Wolf RawPosixAIOData *aiocb = opaque; 1629e5bcf967SKevin Wolf BDRVRawState *s = aiocb->bs->opaque; 1630c1bb86cdSEric Blake int ret; 1631c1bb86cdSEric Blake 1632e5bcf967SKevin Wolf if (s->page_cache_inconsistent) { 1633c7ddc882SDaniel P. Berrangé return -s->page_cache_inconsistent; 1634e5bcf967SKevin Wolf } 1635e5bcf967SKevin Wolf 1636c1bb86cdSEric Blake ret = qemu_fdatasync(aiocb->aio_fildes); 1637c1bb86cdSEric Blake if (ret == -1) { 163860ff2ae2SDaniel P. Berrangé trace_file_flush_fdatasync_failed(errno); 163960ff2ae2SDaniel P. Berrangé 1640e5bcf967SKevin Wolf /* There is no clear definition of the semantics of a failing fsync(), 1641e5bcf967SKevin Wolf * so we may have to assume the worst. The sad truth is that this 1642e5bcf967SKevin Wolf * assumption is correct for Linux. Some pages are now probably marked 1643e5bcf967SKevin Wolf * clean in the page cache even though they are inconsistent with the 1644e5bcf967SKevin Wolf * on-disk contents. The next fdatasync() call would succeed, but no 1645e5bcf967SKevin Wolf * further writeback attempt will be made. We can't get back to a state 1646e5bcf967SKevin Wolf * in which we know what is on disk (we would have to rewrite 1647e5bcf967SKevin Wolf * everything that was touched since the last fdatasync() at least), so 1648e5bcf967SKevin Wolf * make bdrv_flush() fail permanently. Given that the behaviour isn't 1649e5bcf967SKevin Wolf * really defined, I have little hope that other OSes are doing better. 1650e5bcf967SKevin Wolf * 1651e5bcf967SKevin Wolf * Obviously, this doesn't affect O_DIRECT, which bypasses the page 1652e5bcf967SKevin Wolf * cache. */ 1653e5bcf967SKevin Wolf if ((s->open_flags & O_DIRECT) == 0) { 1654c7ddc882SDaniel P. Berrangé s->page_cache_inconsistent = errno; 1655e5bcf967SKevin Wolf } 1656c1bb86cdSEric Blake return -errno; 1657c1bb86cdSEric Blake } 1658c1bb86cdSEric Blake return 0; 1659c1bb86cdSEric Blake } 1660c1bb86cdSEric Blake 1661c1bb86cdSEric Blake #ifdef CONFIG_PREADV 1662c1bb86cdSEric Blake 1663c1bb86cdSEric Blake static bool preadv_present = true; 1664c1bb86cdSEric Blake 1665c1bb86cdSEric Blake static ssize_t 1666c1bb86cdSEric Blake qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1667c1bb86cdSEric Blake { 1668c1bb86cdSEric Blake return preadv(fd, iov, nr_iov, offset); 1669c1bb86cdSEric Blake } 1670c1bb86cdSEric Blake 1671c1bb86cdSEric Blake static ssize_t 1672c1bb86cdSEric Blake qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1673c1bb86cdSEric Blake { 1674c1bb86cdSEric Blake return pwritev(fd, iov, nr_iov, offset); 1675c1bb86cdSEric Blake } 1676c1bb86cdSEric Blake 1677c1bb86cdSEric Blake #else 1678c1bb86cdSEric Blake 1679c1bb86cdSEric Blake static bool preadv_present = false; 1680c1bb86cdSEric Blake 1681c1bb86cdSEric Blake static ssize_t 1682c1bb86cdSEric Blake qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1683c1bb86cdSEric Blake { 1684c1bb86cdSEric Blake return -ENOSYS; 1685c1bb86cdSEric Blake } 1686c1bb86cdSEric Blake 1687c1bb86cdSEric Blake static ssize_t 1688c1bb86cdSEric Blake qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset) 1689c1bb86cdSEric Blake { 1690c1bb86cdSEric Blake return -ENOSYS; 1691c1bb86cdSEric Blake } 1692c1bb86cdSEric Blake 1693c1bb86cdSEric Blake #endif 1694c1bb86cdSEric Blake 1695c1bb86cdSEric Blake static ssize_t handle_aiocb_rw_vector(RawPosixAIOData *aiocb) 1696c1bb86cdSEric Blake { 1697c1bb86cdSEric Blake ssize_t len; 1698c1bb86cdSEric Blake 169937b0b24eSNikita Ivanov len = RETRY_ON_EINTR( 17004751d09aSSam Li (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) ? 170137b0b24eSNikita Ivanov qemu_pwritev(aiocb->aio_fildes, 1702d57c44d0SKevin Wolf aiocb->io.iov, 1703d57c44d0SKevin Wolf aiocb->io.niov, 170437b0b24eSNikita Ivanov aiocb->aio_offset) : 170537b0b24eSNikita Ivanov qemu_preadv(aiocb->aio_fildes, 1706d57c44d0SKevin Wolf aiocb->io.iov, 1707d57c44d0SKevin Wolf aiocb->io.niov, 170837b0b24eSNikita Ivanov aiocb->aio_offset) 170937b0b24eSNikita Ivanov ); 1710c1bb86cdSEric Blake 1711c1bb86cdSEric Blake if (len == -1) { 1712c1bb86cdSEric Blake return -errno; 1713c1bb86cdSEric Blake } 1714c1bb86cdSEric Blake return len; 1715c1bb86cdSEric Blake } 1716c1bb86cdSEric Blake 1717c1bb86cdSEric Blake /* 1718c1bb86cdSEric Blake * Read/writes the data to/from a given linear buffer. 1719c1bb86cdSEric Blake * 1720c1bb86cdSEric Blake * Returns the number of bytes handles or -errno in case of an error. Short 1721c1bb86cdSEric Blake * reads are only returned if the end of the file is reached. 1722c1bb86cdSEric Blake */ 1723c1bb86cdSEric Blake static ssize_t handle_aiocb_rw_linear(RawPosixAIOData *aiocb, char *buf) 1724c1bb86cdSEric Blake { 1725c1bb86cdSEric Blake ssize_t offset = 0; 1726c1bb86cdSEric Blake ssize_t len; 1727c1bb86cdSEric Blake 1728c1bb86cdSEric Blake while (offset < aiocb->aio_nbytes) { 17294751d09aSSam Li if (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) { 1730c1bb86cdSEric Blake len = pwrite(aiocb->aio_fildes, 1731c1bb86cdSEric Blake (const char *)buf + offset, 1732c1bb86cdSEric Blake aiocb->aio_nbytes - offset, 1733c1bb86cdSEric Blake aiocb->aio_offset + offset); 1734c1bb86cdSEric Blake } else { 1735c1bb86cdSEric Blake len = pread(aiocb->aio_fildes, 1736c1bb86cdSEric Blake buf + offset, 1737c1bb86cdSEric Blake aiocb->aio_nbytes - offset, 1738c1bb86cdSEric Blake aiocb->aio_offset + offset); 1739c1bb86cdSEric Blake } 1740c1bb86cdSEric Blake if (len == -1 && errno == EINTR) { 1741c1bb86cdSEric Blake continue; 1742c1bb86cdSEric Blake } else if (len == -1 && errno == EINVAL && 1743c1bb86cdSEric Blake (aiocb->bs->open_flags & BDRV_O_NOCACHE) && 1744c1bb86cdSEric Blake !(aiocb->aio_type & QEMU_AIO_WRITE) && 1745c1bb86cdSEric Blake offset > 0) { 1746c1bb86cdSEric Blake /* O_DIRECT pread() may fail with EINVAL when offset is unaligned 1747c1bb86cdSEric Blake * after a short read. Assume that O_DIRECT short reads only occur 1748c1bb86cdSEric Blake * at EOF. Therefore this is a short read, not an I/O error. 1749c1bb86cdSEric Blake */ 1750c1bb86cdSEric Blake break; 1751c1bb86cdSEric Blake } else if (len == -1) { 1752c1bb86cdSEric Blake offset = -errno; 1753c1bb86cdSEric Blake break; 1754c1bb86cdSEric Blake } else if (len == 0) { 1755c1bb86cdSEric Blake break; 1756c1bb86cdSEric Blake } 1757c1bb86cdSEric Blake offset += len; 1758c1bb86cdSEric Blake } 1759c1bb86cdSEric Blake 1760c1bb86cdSEric Blake return offset; 1761c1bb86cdSEric Blake } 1762c1bb86cdSEric Blake 1763999e6b69SKevin Wolf static int handle_aiocb_rw(void *opaque) 1764c1bb86cdSEric Blake { 1765999e6b69SKevin Wolf RawPosixAIOData *aiocb = opaque; 1766c1bb86cdSEric Blake ssize_t nbytes; 1767c1bb86cdSEric Blake char *buf; 1768c1bb86cdSEric Blake 1769c1bb86cdSEric Blake if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) { 1770c1bb86cdSEric Blake /* 1771c1bb86cdSEric Blake * If there is just a single buffer, and it is properly aligned 1772c1bb86cdSEric Blake * we can just use plain pread/pwrite without any problems. 1773c1bb86cdSEric Blake */ 1774d57c44d0SKevin Wolf if (aiocb->io.niov == 1) { 177554c7ca1bSKevin Wolf nbytes = handle_aiocb_rw_linear(aiocb, aiocb->io.iov->iov_base); 177654c7ca1bSKevin Wolf goto out; 1777c1bb86cdSEric Blake } 1778c1bb86cdSEric Blake /* 1779c1bb86cdSEric Blake * We have more than one iovec, and all are properly aligned. 1780c1bb86cdSEric Blake * 1781c1bb86cdSEric Blake * Try preadv/pwritev first and fall back to linearizing the 1782c1bb86cdSEric Blake * buffer if it's not supported. 1783c1bb86cdSEric Blake */ 1784c1bb86cdSEric Blake if (preadv_present) { 1785c1bb86cdSEric Blake nbytes = handle_aiocb_rw_vector(aiocb); 1786c1bb86cdSEric Blake if (nbytes == aiocb->aio_nbytes || 1787c1bb86cdSEric Blake (nbytes < 0 && nbytes != -ENOSYS)) { 178854c7ca1bSKevin Wolf goto out; 1789c1bb86cdSEric Blake } 1790c1bb86cdSEric Blake preadv_present = false; 1791c1bb86cdSEric Blake } 1792c1bb86cdSEric Blake 1793c1bb86cdSEric Blake /* 1794c1bb86cdSEric Blake * XXX(hch): short read/write. no easy way to handle the reminder 1795c1bb86cdSEric Blake * using these interfaces. For now retry using plain 1796c1bb86cdSEric Blake * pread/pwrite? 1797c1bb86cdSEric Blake */ 1798c1bb86cdSEric Blake } 1799c1bb86cdSEric Blake 1800c1bb86cdSEric Blake /* 1801c1bb86cdSEric Blake * Ok, we have to do it the hard way, copy all segments into 1802c1bb86cdSEric Blake * a single aligned buffer. 1803c1bb86cdSEric Blake */ 1804c1bb86cdSEric Blake buf = qemu_try_blockalign(aiocb->bs, aiocb->aio_nbytes); 1805c1bb86cdSEric Blake if (buf == NULL) { 180654c7ca1bSKevin Wolf nbytes = -ENOMEM; 180754c7ca1bSKevin Wolf goto out; 1808c1bb86cdSEric Blake } 1809c1bb86cdSEric Blake 1810c1bb86cdSEric Blake if (aiocb->aio_type & QEMU_AIO_WRITE) { 1811c1bb86cdSEric Blake char *p = buf; 1812c1bb86cdSEric Blake int i; 1813c1bb86cdSEric Blake 1814d57c44d0SKevin Wolf for (i = 0; i < aiocb->io.niov; ++i) { 1815d57c44d0SKevin Wolf memcpy(p, aiocb->io.iov[i].iov_base, aiocb->io.iov[i].iov_len); 1816d57c44d0SKevin Wolf p += aiocb->io.iov[i].iov_len; 1817c1bb86cdSEric Blake } 1818c1bb86cdSEric Blake assert(p - buf == aiocb->aio_nbytes); 1819c1bb86cdSEric Blake } 1820c1bb86cdSEric Blake 1821c1bb86cdSEric Blake nbytes = handle_aiocb_rw_linear(aiocb, buf); 18224751d09aSSam Li if (!(aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND))) { 1823c1bb86cdSEric Blake char *p = buf; 1824c1bb86cdSEric Blake size_t count = aiocb->aio_nbytes, copy; 1825c1bb86cdSEric Blake int i; 1826c1bb86cdSEric Blake 1827d57c44d0SKevin Wolf for (i = 0; i < aiocb->io.niov && count; ++i) { 1828c1bb86cdSEric Blake copy = count; 1829d57c44d0SKevin Wolf if (copy > aiocb->io.iov[i].iov_len) { 1830d57c44d0SKevin Wolf copy = aiocb->io.iov[i].iov_len; 1831c1bb86cdSEric Blake } 1832d57c44d0SKevin Wolf memcpy(aiocb->io.iov[i].iov_base, p, copy); 1833c1bb86cdSEric Blake assert(count >= copy); 1834c1bb86cdSEric Blake p += copy; 1835c1bb86cdSEric Blake count -= copy; 1836c1bb86cdSEric Blake } 1837c1bb86cdSEric Blake assert(count == 0); 1838c1bb86cdSEric Blake } 1839c1bb86cdSEric Blake qemu_vfree(buf); 1840c1bb86cdSEric Blake 184154c7ca1bSKevin Wolf out: 184254c7ca1bSKevin Wolf if (nbytes == aiocb->aio_nbytes) { 184354c7ca1bSKevin Wolf return 0; 184454c7ca1bSKevin Wolf } else if (nbytes >= 0 && nbytes < aiocb->aio_nbytes) { 184554c7ca1bSKevin Wolf if (aiocb->aio_type & QEMU_AIO_WRITE) { 184654c7ca1bSKevin Wolf return -EINVAL; 184754c7ca1bSKevin Wolf } else { 184854c7ca1bSKevin Wolf iov_memset(aiocb->io.iov, aiocb->io.niov, nbytes, 184954c7ca1bSKevin Wolf 0, aiocb->aio_nbytes - nbytes); 185054c7ca1bSKevin Wolf return 0; 185154c7ca1bSKevin Wolf } 185254c7ca1bSKevin Wolf } else { 185354c7ca1bSKevin Wolf assert(nbytes < 0); 1854c1bb86cdSEric Blake return nbytes; 1855c1bb86cdSEric Blake } 185654c7ca1bSKevin Wolf } 1857c1bb86cdSEric Blake 18580dfc7af2SAkihiko Odaki #if defined(CONFIG_FALLOCATE) || defined(BLKZEROOUT) || defined(BLKDISCARD) 1859c1bb86cdSEric Blake static int translate_err(int err) 1860c1bb86cdSEric Blake { 1861c1bb86cdSEric Blake if (err == -ENODEV || err == -ENOSYS || err == -EOPNOTSUPP || 1862c1bb86cdSEric Blake err == -ENOTTY) { 1863c1bb86cdSEric Blake err = -ENOTSUP; 1864c1bb86cdSEric Blake } 1865c1bb86cdSEric Blake return err; 1866c1bb86cdSEric Blake } 18670dfc7af2SAkihiko Odaki #endif 1868c1bb86cdSEric Blake 1869c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE 1870c1bb86cdSEric Blake static int do_fallocate(int fd, int mode, off_t offset, off_t len) 1871c1bb86cdSEric Blake { 1872c1bb86cdSEric Blake do { 1873c1bb86cdSEric Blake if (fallocate(fd, mode, offset, len) == 0) { 1874c1bb86cdSEric Blake return 0; 1875c1bb86cdSEric Blake } 1876c1bb86cdSEric Blake } while (errno == EINTR); 1877c1bb86cdSEric Blake return translate_err(-errno); 1878c1bb86cdSEric Blake } 1879c1bb86cdSEric Blake #endif 1880c1bb86cdSEric Blake 1881c1bb86cdSEric Blake static ssize_t handle_aiocb_write_zeroes_block(RawPosixAIOData *aiocb) 1882c1bb86cdSEric Blake { 1883c1bb86cdSEric Blake int ret = -ENOTSUP; 1884c1bb86cdSEric Blake BDRVRawState *s = aiocb->bs->opaque; 1885c1bb86cdSEric Blake 1886c1bb86cdSEric Blake if (!s->has_write_zeroes) { 1887c1bb86cdSEric Blake return -ENOTSUP; 1888c1bb86cdSEric Blake } 1889c1bb86cdSEric Blake 1890c1bb86cdSEric Blake #ifdef BLKZEROOUT 1891738301e1SKevin Wolf /* The BLKZEROOUT implementation in the kernel doesn't set 1892738301e1SKevin Wolf * BLKDEV_ZERO_NOFALLBACK, so we can't call this if we have to avoid slow 1893738301e1SKevin Wolf * fallbacks. */ 1894738301e1SKevin Wolf if (!(aiocb->aio_type & QEMU_AIO_NO_FALLBACK)) { 1895c1bb86cdSEric Blake do { 1896c1bb86cdSEric Blake uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes }; 1897c1bb86cdSEric Blake if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) { 1898c1bb86cdSEric Blake return 0; 1899c1bb86cdSEric Blake } 1900c1bb86cdSEric Blake } while (errno == EINTR); 1901c1bb86cdSEric Blake 1902c1bb86cdSEric Blake ret = translate_err(-errno); 1903c1bb86cdSEric Blake if (ret == -ENOTSUP) { 1904c1bb86cdSEric Blake s->has_write_zeroes = false; 1905c1bb86cdSEric Blake } 1906effecce6SKevin Wolf } 1907effecce6SKevin Wolf #endif 1908effecce6SKevin Wolf 1909c1bb86cdSEric Blake return ret; 1910c1bb86cdSEric Blake } 1911c1bb86cdSEric Blake 19127154d8aeSKevin Wolf static int handle_aiocb_write_zeroes(void *opaque) 1913c1bb86cdSEric Blake { 19147154d8aeSKevin Wolf RawPosixAIOData *aiocb = opaque; 191570d9110bSDenis V. Lunev #ifdef CONFIG_FALLOCATE 1916b2c6f23fSMax Reitz BDRVRawState *s = aiocb->bs->opaque; 191770d9110bSDenis V. Lunev int64_t len; 191870d9110bSDenis V. Lunev #endif 1919c1bb86cdSEric Blake 1920c1bb86cdSEric Blake if (aiocb->aio_type & QEMU_AIO_BLKDEV) { 1921c1bb86cdSEric Blake return handle_aiocb_write_zeroes_block(aiocb); 1922c1bb86cdSEric Blake } 1923c1bb86cdSEric Blake 1924c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE_ZERO_RANGE 1925c1bb86cdSEric Blake if (s->has_write_zeroes) { 1926c1bb86cdSEric Blake int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE, 1927c1bb86cdSEric Blake aiocb->aio_offset, aiocb->aio_nbytes); 1928fa95e9fbSThomas Huth if (ret == -ENOTSUP) { 1929fa95e9fbSThomas Huth s->has_write_zeroes = false; 1930fa95e9fbSThomas Huth } else if (ret == 0 || ret != -EINVAL) { 1931c1bb86cdSEric Blake return ret; 1932c1bb86cdSEric Blake } 1933fa95e9fbSThomas Huth /* 1934fa95e9fbSThomas Huth * Note: Some file systems do not like unaligned byte ranges, and 1935fa95e9fbSThomas Huth * return EINVAL in such a case, though they should not do it according 1936fa95e9fbSThomas Huth * to the man-page of fallocate(). Thus we simply ignore this return 1937fa95e9fbSThomas Huth * value and try the other fallbacks instead. 1938fa95e9fbSThomas Huth */ 1939c1bb86cdSEric Blake } 1940c1bb86cdSEric Blake #endif 1941c1bb86cdSEric Blake 1942c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 1943c1bb86cdSEric Blake if (s->has_discard && s->has_fallocate) { 1944c1bb86cdSEric Blake int ret = do_fallocate(s->fd, 1945c1bb86cdSEric Blake FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1946c1bb86cdSEric Blake aiocb->aio_offset, aiocb->aio_nbytes); 1947c1bb86cdSEric Blake if (ret == 0) { 1948c1bb86cdSEric Blake ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); 1949c1bb86cdSEric Blake if (ret == 0 || ret != -ENOTSUP) { 1950c1bb86cdSEric Blake return ret; 1951c1bb86cdSEric Blake } 1952c1bb86cdSEric Blake s->has_fallocate = false; 195373ebf297SThomas Huth } else if (ret == -EINVAL) { 195473ebf297SThomas Huth /* 195573ebf297SThomas Huth * Some file systems like older versions of GPFS do not like un- 195673ebf297SThomas Huth * aligned byte ranges, and return EINVAL in such a case, though 195773ebf297SThomas Huth * they should not do it according to the man-page of fallocate(). 195873ebf297SThomas Huth * Warn about the bad filesystem and try the final fallback instead. 195973ebf297SThomas Huth */ 196073ebf297SThomas Huth warn_report_once("Your file system is misbehaving: " 196173ebf297SThomas Huth "fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. " 196268857f13SMichael Tokarev "Please report this bug to your file system " 196373ebf297SThomas Huth "vendor."); 1964c1bb86cdSEric Blake } else if (ret != -ENOTSUP) { 1965c1bb86cdSEric Blake return ret; 1966c1bb86cdSEric Blake } else { 1967c1bb86cdSEric Blake s->has_discard = false; 1968c1bb86cdSEric Blake } 1969c1bb86cdSEric Blake } 1970c1bb86cdSEric Blake #endif 1971c1bb86cdSEric Blake 1972c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE 197370d9110bSDenis V. Lunev /* Last resort: we are trying to extend the file with zeroed data. This 197470d9110bSDenis V. Lunev * can be done via fallocate(fd, 0) */ 197536c6c877SPaolo Bonzini len = raw_getlength(aiocb->bs); 197670d9110bSDenis V. Lunev if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) { 1977c1bb86cdSEric Blake int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); 1978c1bb86cdSEric Blake if (ret == 0 || ret != -ENOTSUP) { 1979c1bb86cdSEric Blake return ret; 1980c1bb86cdSEric Blake } 1981c1bb86cdSEric Blake s->has_fallocate = false; 1982c1bb86cdSEric Blake } 1983c1bb86cdSEric Blake #endif 1984c1bb86cdSEric Blake 1985c1bb86cdSEric Blake return -ENOTSUP; 1986c1bb86cdSEric Blake } 1987c1bb86cdSEric Blake 19887154d8aeSKevin Wolf static int handle_aiocb_write_zeroes_unmap(void *opaque) 198934fa110eSKevin Wolf { 19907154d8aeSKevin Wolf RawPosixAIOData *aiocb = opaque; 199134fa110eSKevin Wolf BDRVRawState *s G_GNUC_UNUSED = aiocb->bs->opaque; 199234fa110eSKevin Wolf 199334fa110eSKevin Wolf /* First try to write zeros and unmap at the same time */ 199434fa110eSKevin Wolf 199534fa110eSKevin Wolf #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 1996b3ac2b94SSimran Singhal int ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 199734fa110eSKevin Wolf aiocb->aio_offset, aiocb->aio_nbytes); 1998bae127d4SAntoine Damhet switch (ret) { 1999bae127d4SAntoine Damhet case -ENOTSUP: 2000bae127d4SAntoine Damhet case -EINVAL: 2001ece4fa91SMaxim Levitsky case -EBUSY: 2002bae127d4SAntoine Damhet break; 2003bae127d4SAntoine Damhet default: 200434fa110eSKevin Wolf return ret; 200534fa110eSKevin Wolf } 200634fa110eSKevin Wolf #endif 200734fa110eSKevin Wolf 200834fa110eSKevin Wolf /* If we couldn't manage to unmap while guaranteed that the area reads as 200934fa110eSKevin Wolf * all-zero afterwards, just write zeroes without unmapping */ 2010b3ac2b94SSimran Singhal return handle_aiocb_write_zeroes(aiocb); 201134fa110eSKevin Wolf } 201234fa110eSKevin Wolf 20131efad060SFam Zheng #ifndef HAVE_COPY_FILE_RANGE 20141efad060SFam Zheng static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd, 20151efad060SFam Zheng off_t *out_off, size_t len, unsigned int flags) 20161efad060SFam Zheng { 20171efad060SFam Zheng #ifdef __NR_copy_file_range 20181efad060SFam Zheng return syscall(__NR_copy_file_range, in_fd, in_off, out_fd, 20191efad060SFam Zheng out_off, len, flags); 20201efad060SFam Zheng #else 20211efad060SFam Zheng errno = ENOSYS; 20221efad060SFam Zheng return -1; 20231efad060SFam Zheng #endif 20241efad060SFam Zheng } 20251efad060SFam Zheng #endif 20261efad060SFam Zheng 20276d43eaa3SSam Li /* 20286d43eaa3SSam Li * parse_zone - Fill a zone descriptor 20296d43eaa3SSam Li */ 20306d43eaa3SSam Li #if defined(CONFIG_BLKZONED) 20316d43eaa3SSam Li static inline int parse_zone(struct BlockZoneDescriptor *zone, 20326d43eaa3SSam Li const struct blk_zone *blkz) { 20336d43eaa3SSam Li zone->start = blkz->start << BDRV_SECTOR_BITS; 20346d43eaa3SSam Li zone->length = blkz->len << BDRV_SECTOR_BITS; 20356d43eaa3SSam Li zone->wp = blkz->wp << BDRV_SECTOR_BITS; 20366d43eaa3SSam Li 20376d43eaa3SSam Li #ifdef HAVE_BLK_ZONE_REP_CAPACITY 20386d43eaa3SSam Li zone->cap = blkz->capacity << BDRV_SECTOR_BITS; 20396d43eaa3SSam Li #else 20406d43eaa3SSam Li zone->cap = blkz->len << BDRV_SECTOR_BITS; 20416d43eaa3SSam Li #endif 20426d43eaa3SSam Li 20436d43eaa3SSam Li switch (blkz->type) { 20446d43eaa3SSam Li case BLK_ZONE_TYPE_SEQWRITE_REQ: 20456d43eaa3SSam Li zone->type = BLK_ZT_SWR; 20466d43eaa3SSam Li break; 20476d43eaa3SSam Li case BLK_ZONE_TYPE_SEQWRITE_PREF: 20486d43eaa3SSam Li zone->type = BLK_ZT_SWP; 20496d43eaa3SSam Li break; 20506d43eaa3SSam Li case BLK_ZONE_TYPE_CONVENTIONAL: 20516d43eaa3SSam Li zone->type = BLK_ZT_CONV; 20526d43eaa3SSam Li break; 20536d43eaa3SSam Li default: 20546d43eaa3SSam Li error_report("Unsupported zone type: 0x%x", blkz->type); 20556d43eaa3SSam Li return -ENOTSUP; 20566d43eaa3SSam Li } 20576d43eaa3SSam Li 20586d43eaa3SSam Li switch (blkz->cond) { 20596d43eaa3SSam Li case BLK_ZONE_COND_NOT_WP: 20606d43eaa3SSam Li zone->state = BLK_ZS_NOT_WP; 20616d43eaa3SSam Li break; 20626d43eaa3SSam Li case BLK_ZONE_COND_EMPTY: 20636d43eaa3SSam Li zone->state = BLK_ZS_EMPTY; 20646d43eaa3SSam Li break; 20656d43eaa3SSam Li case BLK_ZONE_COND_IMP_OPEN: 20666d43eaa3SSam Li zone->state = BLK_ZS_IOPEN; 20676d43eaa3SSam Li break; 20686d43eaa3SSam Li case BLK_ZONE_COND_EXP_OPEN: 20696d43eaa3SSam Li zone->state = BLK_ZS_EOPEN; 20706d43eaa3SSam Li break; 20716d43eaa3SSam Li case BLK_ZONE_COND_CLOSED: 20726d43eaa3SSam Li zone->state = BLK_ZS_CLOSED; 20736d43eaa3SSam Li break; 20746d43eaa3SSam Li case BLK_ZONE_COND_READONLY: 20756d43eaa3SSam Li zone->state = BLK_ZS_RDONLY; 20766d43eaa3SSam Li break; 20776d43eaa3SSam Li case BLK_ZONE_COND_FULL: 20786d43eaa3SSam Li zone->state = BLK_ZS_FULL; 20796d43eaa3SSam Li break; 20806d43eaa3SSam Li case BLK_ZONE_COND_OFFLINE: 20816d43eaa3SSam Li zone->state = BLK_ZS_OFFLINE; 20826d43eaa3SSam Li break; 20836d43eaa3SSam Li default: 20846d43eaa3SSam Li error_report("Unsupported zone state: 0x%x", blkz->cond); 20856d43eaa3SSam Li return -ENOTSUP; 20866d43eaa3SSam Li } 20876d43eaa3SSam Li return 0; 20886d43eaa3SSam Li } 20896d43eaa3SSam Li #endif 20906d43eaa3SSam Li 20916d43eaa3SSam Li #if defined(CONFIG_BLKZONED) 20926d43eaa3SSam Li static int handle_aiocb_zone_report(void *opaque) 20936d43eaa3SSam Li { 20946d43eaa3SSam Li RawPosixAIOData *aiocb = opaque; 20956d43eaa3SSam Li int fd = aiocb->aio_fildes; 20966d43eaa3SSam Li unsigned int *nr_zones = aiocb->zone_report.nr_zones; 20976d43eaa3SSam Li BlockZoneDescriptor *zones = aiocb->zone_report.zones; 20986d43eaa3SSam Li /* zoned block devices use 512-byte sectors */ 20996d43eaa3SSam Li uint64_t sector = aiocb->aio_offset / 512; 21006d43eaa3SSam Li 21016d43eaa3SSam Li struct blk_zone *blkz; 21026d43eaa3SSam Li size_t rep_size; 21036d43eaa3SSam Li unsigned int nrz; 21046d43eaa3SSam Li int ret; 21056d43eaa3SSam Li unsigned int n = 0, i = 0; 21066d43eaa3SSam Li 21076d43eaa3SSam Li nrz = *nr_zones; 21086d43eaa3SSam Li rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone); 21096d43eaa3SSam Li g_autofree struct blk_zone_report *rep = NULL; 21106d43eaa3SSam Li rep = g_malloc(rep_size); 21116d43eaa3SSam Li 21126d43eaa3SSam Li blkz = (struct blk_zone *)(rep + 1); 21136d43eaa3SSam Li while (n < nrz) { 21146d43eaa3SSam Li memset(rep, 0, rep_size); 21156d43eaa3SSam Li rep->sector = sector; 21166d43eaa3SSam Li rep->nr_zones = nrz - n; 21176d43eaa3SSam Li 21186d43eaa3SSam Li do { 21196d43eaa3SSam Li ret = ioctl(fd, BLKREPORTZONE, rep); 21206d43eaa3SSam Li } while (ret != 0 && errno == EINTR); 21216d43eaa3SSam Li if (ret != 0) { 21226d43eaa3SSam Li error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d", 21236d43eaa3SSam Li fd, sector, errno); 21246d43eaa3SSam Li return -errno; 21256d43eaa3SSam Li } 21266d43eaa3SSam Li 21276d43eaa3SSam Li if (!rep->nr_zones) { 21286d43eaa3SSam Li break; 21296d43eaa3SSam Li } 21306d43eaa3SSam Li 21316d43eaa3SSam Li for (i = 0; i < rep->nr_zones; i++, n++) { 21326d43eaa3SSam Li ret = parse_zone(&zones[n], &blkz[i]); 21336d43eaa3SSam Li if (ret != 0) { 21346d43eaa3SSam Li return ret; 21356d43eaa3SSam Li } 21366d43eaa3SSam Li 21376d43eaa3SSam Li /* The next report should start after the last zone reported */ 21386d43eaa3SSam Li sector = blkz[i].start + blkz[i].len; 21396d43eaa3SSam Li } 21406d43eaa3SSam Li } 21416d43eaa3SSam Li 21426d43eaa3SSam Li *nr_zones = n; 21436d43eaa3SSam Li return 0; 21446d43eaa3SSam Li } 21456d43eaa3SSam Li #endif 21466d43eaa3SSam Li 21476d43eaa3SSam Li #if defined(CONFIG_BLKZONED) 21486d43eaa3SSam Li static int handle_aiocb_zone_mgmt(void *opaque) 21496d43eaa3SSam Li { 21506d43eaa3SSam Li RawPosixAIOData *aiocb = opaque; 21516d43eaa3SSam Li int fd = aiocb->aio_fildes; 21526d43eaa3SSam Li uint64_t sector = aiocb->aio_offset / 512; 21536d43eaa3SSam Li int64_t nr_sectors = aiocb->aio_nbytes / 512; 21546d43eaa3SSam Li struct blk_zone_range range; 21556d43eaa3SSam Li int ret; 21566d43eaa3SSam Li 21576d43eaa3SSam Li /* Execute the operation */ 21586d43eaa3SSam Li range.sector = sector; 21596d43eaa3SSam Li range.nr_sectors = nr_sectors; 21606d43eaa3SSam Li do { 21616d43eaa3SSam Li ret = ioctl(fd, aiocb->zone_mgmt.op, &range); 21626d43eaa3SSam Li } while (ret != 0 && errno == EINTR); 21636d43eaa3SSam Li 2164a3c41f06SSam Li return ret < 0 ? -errno : ret; 21656d43eaa3SSam Li } 21666d43eaa3SSam Li #endif 21676d43eaa3SSam Li 216858a209c4SKevin Wolf static int handle_aiocb_copy_range(void *opaque) 21691efad060SFam Zheng { 217058a209c4SKevin Wolf RawPosixAIOData *aiocb = opaque; 21711efad060SFam Zheng uint64_t bytes = aiocb->aio_nbytes; 21721efad060SFam Zheng off_t in_off = aiocb->aio_offset; 2173d57c44d0SKevin Wolf off_t out_off = aiocb->copy_range.aio_offset2; 21741efad060SFam Zheng 21751efad060SFam Zheng while (bytes) { 21761efad060SFam Zheng ssize_t ret = copy_file_range(aiocb->aio_fildes, &in_off, 2177d57c44d0SKevin Wolf aiocb->copy_range.aio_fd2, &out_off, 21781efad060SFam Zheng bytes, 0); 2179ecc983a5SFam Zheng trace_file_copy_file_range(aiocb->bs, aiocb->aio_fildes, in_off, 2180d57c44d0SKevin Wolf aiocb->copy_range.aio_fd2, out_off, bytes, 2181d57c44d0SKevin Wolf 0, ret); 2182c436e3d0SFam Zheng if (ret == 0) { 2183c436e3d0SFam Zheng /* No progress (e.g. when beyond EOF), let the caller fall back to 2184c436e3d0SFam Zheng * buffer I/O. */ 2185c436e3d0SFam Zheng return -ENOSPC; 21861efad060SFam Zheng } 21871efad060SFam Zheng if (ret < 0) { 2188c436e3d0SFam Zheng switch (errno) { 2189c436e3d0SFam Zheng case ENOSYS: 21901efad060SFam Zheng return -ENOTSUP; 2191c436e3d0SFam Zheng case EINTR: 2192c436e3d0SFam Zheng continue; 2193c436e3d0SFam Zheng default: 21941efad060SFam Zheng return -errno; 21951efad060SFam Zheng } 21961efad060SFam Zheng } 21971efad060SFam Zheng bytes -= ret; 21981efad060SFam Zheng } 21991efad060SFam Zheng return 0; 22001efad060SFam Zheng } 22011efad060SFam Zheng 220246ee0f46SKevin Wolf static int handle_aiocb_discard(void *opaque) 2203c1bb86cdSEric Blake { 220446ee0f46SKevin Wolf RawPosixAIOData *aiocb = opaque; 220513a02833SAri Sundholm int ret = -ENOTSUP; 2206c1bb86cdSEric Blake BDRVRawState *s = aiocb->bs->opaque; 2207c1bb86cdSEric Blake 2208c1bb86cdSEric Blake if (!s->has_discard) { 2209c1bb86cdSEric Blake return -ENOTSUP; 2210c1bb86cdSEric Blake } 2211c1bb86cdSEric Blake 2212c1bb86cdSEric Blake if (aiocb->aio_type & QEMU_AIO_BLKDEV) { 2213c1bb86cdSEric Blake #ifdef BLKDISCARD 2214c1bb86cdSEric Blake do { 2215c1bb86cdSEric Blake uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes }; 2216c1bb86cdSEric Blake if (ioctl(aiocb->aio_fildes, BLKDISCARD, range) == 0) { 2217c1bb86cdSEric Blake return 0; 2218c1bb86cdSEric Blake } 2219c1bb86cdSEric Blake } while (errno == EINTR); 2220c1bb86cdSEric Blake 22210dfc7af2SAkihiko Odaki ret = translate_err(-errno); 2222c1bb86cdSEric Blake #endif 2223c1bb86cdSEric Blake } else { 2224c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 2225c1bb86cdSEric Blake ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 2226c1bb86cdSEric Blake aiocb->aio_offset, aiocb->aio_nbytes); 222713a02833SAri Sundholm ret = translate_err(ret); 22280dfc7af2SAkihiko Odaki #elif defined(__APPLE__) && (__MACH__) 22290dfc7af2SAkihiko Odaki fpunchhole_t fpunchhole; 22300dfc7af2SAkihiko Odaki fpunchhole.fp_flags = 0; 22310dfc7af2SAkihiko Odaki fpunchhole.reserved = 0; 22320dfc7af2SAkihiko Odaki fpunchhole.fp_offset = aiocb->aio_offset; 22330dfc7af2SAkihiko Odaki fpunchhole.fp_length = aiocb->aio_nbytes; 22340dfc7af2SAkihiko Odaki if (fcntl(s->fd, F_PUNCHHOLE, &fpunchhole) == -1) { 22350dfc7af2SAkihiko Odaki ret = errno == ENODEV ? -ENOTSUP : -errno; 22360dfc7af2SAkihiko Odaki } else { 22370dfc7af2SAkihiko Odaki ret = 0; 22380dfc7af2SAkihiko Odaki } 2239c1bb86cdSEric Blake #endif 2240c1bb86cdSEric Blake } 2241c1bb86cdSEric Blake 2242c1bb86cdSEric Blake if (ret == -ENOTSUP) { 2243c1bb86cdSEric Blake s->has_discard = false; 2244c1bb86cdSEric Blake } 2245c1bb86cdSEric Blake return ret; 2246c1bb86cdSEric Blake } 2247c1bb86cdSEric Blake 22483a20013fSNir Soffer /* 22493a20013fSNir Soffer * Help alignment probing by allocating the first block. 22503a20013fSNir Soffer * 22513a20013fSNir Soffer * When reading with direct I/O from unallocated area on Gluster backed by XFS, 22523a20013fSNir Soffer * reading succeeds regardless of request length. In this case we fallback to 22533a20013fSNir Soffer * safe alignment which is not optimal. Allocating the first block avoids this 22543a20013fSNir Soffer * fallback. 22553a20013fSNir Soffer * 22563a20013fSNir Soffer * fd may be opened with O_DIRECT, but we don't know the buffer alignment or 22573a20013fSNir Soffer * request alignment, so we use safe values. 22583a20013fSNir Soffer * 22593a20013fSNir Soffer * Returns: 0 on success, -errno on failure. Since this is an optimization, 22603a20013fSNir Soffer * caller may ignore failures. 22613a20013fSNir Soffer */ 22623a20013fSNir Soffer static int allocate_first_block(int fd, size_t max_size) 22633a20013fSNir Soffer { 22643a20013fSNir Soffer size_t write_size = (max_size < MAX_BLOCKSIZE) 22653a20013fSNir Soffer ? BDRV_SECTOR_SIZE 22663a20013fSNir Soffer : MAX_BLOCKSIZE; 22678e3b0cbbSMarc-André Lureau size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size()); 22683a20013fSNir Soffer void *buf; 22693a20013fSNir Soffer ssize_t n; 22703a20013fSNir Soffer int ret; 22713a20013fSNir Soffer 22723a20013fSNir Soffer buf = qemu_memalign(max_align, write_size); 22733a20013fSNir Soffer memset(buf, 0, write_size); 22743a20013fSNir Soffer 227537b0b24eSNikita Ivanov n = RETRY_ON_EINTR(pwrite(fd, buf, write_size, 0)); 22763a20013fSNir Soffer 22773a20013fSNir Soffer ret = (n == -1) ? -errno : 0; 22783a20013fSNir Soffer 22793a20013fSNir Soffer qemu_vfree(buf); 22803a20013fSNir Soffer return ret; 22813a20013fSNir Soffer } 22823a20013fSNir Soffer 228329cb4c01SKevin Wolf static int handle_aiocb_truncate(void *opaque) 228493f4e2ffSKevin Wolf { 228529cb4c01SKevin Wolf RawPosixAIOData *aiocb = opaque; 228693f4e2ffSKevin Wolf int result = 0; 228793f4e2ffSKevin Wolf int64_t current_length = 0; 228893f4e2ffSKevin Wolf char *buf = NULL; 228993f4e2ffSKevin Wolf struct stat st; 229093f4e2ffSKevin Wolf int fd = aiocb->aio_fildes; 229193f4e2ffSKevin Wolf int64_t offset = aiocb->aio_offset; 2292d57c44d0SKevin Wolf PreallocMode prealloc = aiocb->truncate.prealloc; 2293d57c44d0SKevin Wolf Error **errp = aiocb->truncate.errp; 229493f4e2ffSKevin Wolf 229593f4e2ffSKevin Wolf if (fstat(fd, &st) < 0) { 229693f4e2ffSKevin Wolf result = -errno; 229793f4e2ffSKevin Wolf error_setg_errno(errp, -result, "Could not stat file"); 229893f4e2ffSKevin Wolf return result; 229993f4e2ffSKevin Wolf } 230093f4e2ffSKevin Wolf 230193f4e2ffSKevin Wolf current_length = st.st_size; 2302d57c44d0SKevin Wolf if (current_length > offset && prealloc != PREALLOC_MODE_OFF) { 230393f4e2ffSKevin Wolf error_setg(errp, "Cannot use preallocation for shrinking files"); 230493f4e2ffSKevin Wolf return -ENOTSUP; 230593f4e2ffSKevin Wolf } 230693f4e2ffSKevin Wolf 2307d57c44d0SKevin Wolf switch (prealloc) { 230893f4e2ffSKevin Wolf #ifdef CONFIG_POSIX_FALLOCATE 230993f4e2ffSKevin Wolf case PREALLOC_MODE_FALLOC: 231093f4e2ffSKevin Wolf /* 231193f4e2ffSKevin Wolf * Truncating before posix_fallocate() makes it about twice slower on 231293f4e2ffSKevin Wolf * file systems that do not support fallocate(), trying to check if a 231393f4e2ffSKevin Wolf * block is allocated before allocating it, so don't do that here. 231493f4e2ffSKevin Wolf */ 231593f4e2ffSKevin Wolf if (offset != current_length) { 231693f4e2ffSKevin Wolf result = -posix_fallocate(fd, current_length, 231793f4e2ffSKevin Wolf offset - current_length); 231893f4e2ffSKevin Wolf if (result != 0) { 231993f4e2ffSKevin Wolf /* posix_fallocate() doesn't set errno. */ 232093f4e2ffSKevin Wolf error_setg_errno(errp, -result, 232193f4e2ffSKevin Wolf "Could not preallocate new data"); 23223a20013fSNir Soffer } else if (current_length == 0) { 23233a20013fSNir Soffer /* 23243a20013fSNir Soffer * posix_fallocate() uses fallocate() if the filesystem 23253a20013fSNir Soffer * supports it, or fallback to manually writing zeroes. If 23263a20013fSNir Soffer * fallocate() was used, unaligned reads from the fallocated 23273a20013fSNir Soffer * area in raw_probe_alignment() will succeed, hence we need to 23283a20013fSNir Soffer * allocate the first block. 23293a20013fSNir Soffer * 23303a20013fSNir Soffer * Optimize future alignment probing; ignore failures. 23313a20013fSNir Soffer */ 23323a20013fSNir Soffer allocate_first_block(fd, offset); 233393f4e2ffSKevin Wolf } 233493f4e2ffSKevin Wolf } else { 233593f4e2ffSKevin Wolf result = 0; 233693f4e2ffSKevin Wolf } 233793f4e2ffSKevin Wolf goto out; 233893f4e2ffSKevin Wolf #endif 233993f4e2ffSKevin Wolf case PREALLOC_MODE_FULL: 234093f4e2ffSKevin Wolf { 234193f4e2ffSKevin Wolf int64_t num = 0, left = offset - current_length; 234293f4e2ffSKevin Wolf off_t seek_result; 234393f4e2ffSKevin Wolf 234493f4e2ffSKevin Wolf /* 234593f4e2ffSKevin Wolf * Knowing the final size from the beginning could allow the file 234693f4e2ffSKevin Wolf * system driver to do less allocations and possibly avoid 234793f4e2ffSKevin Wolf * fragmentation of the file. 234893f4e2ffSKevin Wolf */ 234993f4e2ffSKevin Wolf if (ftruncate(fd, offset) != 0) { 235093f4e2ffSKevin Wolf result = -errno; 235193f4e2ffSKevin Wolf error_setg_errno(errp, -result, "Could not resize file"); 235293f4e2ffSKevin Wolf goto out; 235393f4e2ffSKevin Wolf } 235493f4e2ffSKevin Wolf 235593f4e2ffSKevin Wolf buf = g_malloc0(65536); 235693f4e2ffSKevin Wolf 235793f4e2ffSKevin Wolf seek_result = lseek(fd, current_length, SEEK_SET); 235893f4e2ffSKevin Wolf if (seek_result < 0) { 235993f4e2ffSKevin Wolf result = -errno; 236093f4e2ffSKevin Wolf error_setg_errno(errp, -result, 236193f4e2ffSKevin Wolf "Failed to seek to the old end of file"); 236293f4e2ffSKevin Wolf goto out; 236393f4e2ffSKevin Wolf } 236493f4e2ffSKevin Wolf 236593f4e2ffSKevin Wolf while (left > 0) { 236693f4e2ffSKevin Wolf num = MIN(left, 65536); 236793f4e2ffSKevin Wolf result = write(fd, buf, num); 236893f4e2ffSKevin Wolf if (result < 0) { 2369a1c81f4fSFam Zheng if (errno == EINTR) { 2370a1c81f4fSFam Zheng continue; 2371a1c81f4fSFam Zheng } 237293f4e2ffSKevin Wolf result = -errno; 237393f4e2ffSKevin Wolf error_setg_errno(errp, -result, 237493f4e2ffSKevin Wolf "Could not write zeros for preallocation"); 237593f4e2ffSKevin Wolf goto out; 237693f4e2ffSKevin Wolf } 237793f4e2ffSKevin Wolf left -= result; 237893f4e2ffSKevin Wolf } 237993f4e2ffSKevin Wolf if (result >= 0) { 238093f4e2ffSKevin Wolf result = fsync(fd); 238193f4e2ffSKevin Wolf if (result < 0) { 238293f4e2ffSKevin Wolf result = -errno; 238393f4e2ffSKevin Wolf error_setg_errno(errp, -result, 238493f4e2ffSKevin Wolf "Could not flush file to disk"); 238593f4e2ffSKevin Wolf goto out; 238693f4e2ffSKevin Wolf } 238793f4e2ffSKevin Wolf } 238893f4e2ffSKevin Wolf goto out; 238993f4e2ffSKevin Wolf } 239093f4e2ffSKevin Wolf case PREALLOC_MODE_OFF: 239193f4e2ffSKevin Wolf if (ftruncate(fd, offset) != 0) { 239293f4e2ffSKevin Wolf result = -errno; 239393f4e2ffSKevin Wolf error_setg_errno(errp, -result, "Could not resize file"); 23943a20013fSNir Soffer } else if (current_length == 0 && offset > current_length) { 23953a20013fSNir Soffer /* Optimize future alignment probing; ignore failures. */ 23963a20013fSNir Soffer allocate_first_block(fd, offset); 239793f4e2ffSKevin Wolf } 239893f4e2ffSKevin Wolf return result; 239993f4e2ffSKevin Wolf default: 240093f4e2ffSKevin Wolf result = -ENOTSUP; 240193f4e2ffSKevin Wolf error_setg(errp, "Unsupported preallocation mode: %s", 2402d57c44d0SKevin Wolf PreallocMode_str(prealloc)); 240393f4e2ffSKevin Wolf return result; 240493f4e2ffSKevin Wolf } 240593f4e2ffSKevin Wolf 240693f4e2ffSKevin Wolf out: 240793f4e2ffSKevin Wolf if (result < 0) { 240893f4e2ffSKevin Wolf if (ftruncate(fd, current_length) < 0) { 240993f4e2ffSKevin Wolf error_report("Failed to restore old file length: %s", 241093f4e2ffSKevin Wolf strerror(errno)); 241193f4e2ffSKevin Wolf } 241293f4e2ffSKevin Wolf } 241393f4e2ffSKevin Wolf 241493f4e2ffSKevin Wolf g_free(buf); 241593f4e2ffSKevin Wolf return result; 241693f4e2ffSKevin Wolf } 241793f4e2ffSKevin Wolf 24180fdb7311SEmanuele Giuseppe Esposito static int coroutine_fn raw_thread_pool_submit(ThreadPoolFunc func, void *arg) 24195d5de250SKevin Wolf { 2420aef04fc7SEmanuele Giuseppe Esposito return thread_pool_submit_co(func, arg); 24215d5de250SKevin Wolf } 24225d5de250SKevin Wolf 2423a7c5f67aSKeith Busch /* 2424a7c5f67aSKeith Busch * Check if all memory in this vector is sector aligned. 2425a7c5f67aSKeith Busch */ 2426a7c5f67aSKeith Busch static bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) 2427a7c5f67aSKeith Busch { 2428a7c5f67aSKeith Busch int i; 2429a7c5f67aSKeith Busch size_t alignment = bdrv_min_mem_align(bs); 243025474d90SKeith Busch size_t len = bs->bl.request_alignment; 2431a7c5f67aSKeith Busch IO_CODE(); 2432a7c5f67aSKeith Busch 2433a7c5f67aSKeith Busch for (i = 0; i < qiov->niov; i++) { 2434a7c5f67aSKeith Busch if ((uintptr_t) qiov->iov[i].iov_base % alignment) { 2435a7c5f67aSKeith Busch return false; 2436a7c5f67aSKeith Busch } 243725474d90SKeith Busch if (qiov->iov[i].iov_len % len) { 2438a7c5f67aSKeith Busch return false; 2439a7c5f67aSKeith Busch } 2440a7c5f67aSKeith Busch } 2441a7c5f67aSKeith Busch 2442a7c5f67aSKeith Busch return true; 2443a7c5f67aSKeith Busch } 2444a7c5f67aSKeith Busch 2445c1bb86cdSEric Blake static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, 2446c1bb86cdSEric Blake uint64_t bytes, QEMUIOVector *qiov, int type) 2447c1bb86cdSEric Blake { 2448c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 2449999e6b69SKevin Wolf RawPosixAIOData acb; 2450a3c41f06SSam Li int ret; 2451c1bb86cdSEric Blake 2452c1bb86cdSEric Blake if (fd_open(bs) < 0) 2453c1bb86cdSEric Blake return -EIO; 2454a3c41f06SSam Li #if defined(CONFIG_BLKZONED) 24554751d09aSSam Li if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && bs->wps) { 2456a3c41f06SSam Li qemu_co_mutex_lock(&bs->wps->colock); 24574751d09aSSam Li if (type & QEMU_AIO_ZONE_APPEND && bs->bl.zone_size) { 24584751d09aSSam Li int index = offset / bs->bl.zone_size; 24594751d09aSSam Li offset = bs->wps->wp[index]; 24604751d09aSSam Li } 2461a3c41f06SSam Li } 2462a3c41f06SSam Li #endif 2463c1bb86cdSEric Blake 2464c1bb86cdSEric Blake /* 2465c6447510SAarushi Mehta * When using O_DIRECT, the request must be aligned to be able to use 2466c6447510SAarushi Mehta * either libaio or io_uring interface. If not fail back to regular thread 2467c6447510SAarushi Mehta * pool read/write code which emulates this for us if we 2468c6447510SAarushi Mehta * set QEMU_AIO_MISALIGNED. 2469c1bb86cdSEric Blake */ 2470c6447510SAarushi Mehta if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) { 2471c1bb86cdSEric Blake type |= QEMU_AIO_MISALIGNED; 2472c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 2473c6447510SAarushi Mehta } else if (s->use_linux_io_uring) { 2474c6447510SAarushi Mehta assert(qiov->size == bytes); 2475a3c41f06SSam Li ret = luring_co_submit(bs, s->fd, offset, qiov, type); 2476a3c41f06SSam Li goto out; 2477c6447510SAarushi Mehta #endif 2478c1bb86cdSEric Blake #ifdef CONFIG_LINUX_AIO 2479c1bb86cdSEric Blake } else if (s->use_linux_aio) { 2480c1bb86cdSEric Blake assert(qiov->size == bytes); 2481a3c41f06SSam Li ret = laio_co_submit(s->fd, offset, qiov, type, 2482a3c41f06SSam Li s->aio_max_batch); 2483a3c41f06SSam Li goto out; 2484c1bb86cdSEric Blake #endif 2485c1bb86cdSEric Blake } 2486c1bb86cdSEric Blake 2487999e6b69SKevin Wolf acb = (RawPosixAIOData) { 2488999e6b69SKevin Wolf .bs = bs, 2489999e6b69SKevin Wolf .aio_fildes = s->fd, 2490999e6b69SKevin Wolf .aio_type = type, 2491999e6b69SKevin Wolf .aio_offset = offset, 2492999e6b69SKevin Wolf .aio_nbytes = bytes, 2493999e6b69SKevin Wolf .io = { 2494999e6b69SKevin Wolf .iov = qiov->iov, 2495999e6b69SKevin Wolf .niov = qiov->niov, 2496999e6b69SKevin Wolf }, 2497999e6b69SKevin Wolf }; 2498999e6b69SKevin Wolf 2499999e6b69SKevin Wolf assert(qiov->size == bytes); 2500a3c41f06SSam Li ret = raw_thread_pool_submit(handle_aiocb_rw, &acb); 2501a3c41f06SSam Li goto out; /* Avoid the compiler err of unused label */ 2502a3c41f06SSam Li 2503a3c41f06SSam Li out: 2504a3c41f06SSam Li #if defined(CONFIG_BLKZONED) 2505a3c41f06SSam Li { 2506a3c41f06SSam Li BlockZoneWps *wps = bs->wps; 2507a3c41f06SSam Li if (ret == 0) { 25084751d09aSSam Li if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) 25094751d09aSSam Li && wps && bs->bl.zone_size) { 2510a3c41f06SSam Li uint64_t *wp = &wps->wp[offset / bs->bl.zone_size]; 2511a3c41f06SSam Li if (!BDRV_ZT_IS_CONV(*wp)) { 25124751d09aSSam Li if (type & QEMU_AIO_ZONE_APPEND) { 25134751d09aSSam Li *s->offset = *wp; 25146c811e19SSam Li trace_zbd_zone_append_complete(bs, *s->offset 25156c811e19SSam Li >> BDRV_SECTOR_BITS); 25164751d09aSSam Li } 2517a3c41f06SSam Li /* Advance the wp if needed */ 2518a3c41f06SSam Li if (offset + bytes > *wp) { 2519a3c41f06SSam Li *wp = offset + bytes; 2520a3c41f06SSam Li } 2521a3c41f06SSam Li } 2522a3c41f06SSam Li } 2523a3c41f06SSam Li } else { 25244751d09aSSam Li if (type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) { 2525a3c41f06SSam Li update_zones_wp(bs, s->fd, 0, 1); 2526a3c41f06SSam Li } 2527a3c41f06SSam Li } 2528a3c41f06SSam Li 25294751d09aSSam Li if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && wps) { 2530a3c41f06SSam Li qemu_co_mutex_unlock(&wps->colock); 2531a3c41f06SSam Li } 2532a3c41f06SSam Li } 2533a3c41f06SSam Li #endif 2534a3c41f06SSam Li return ret; 2535c1bb86cdSEric Blake } 2536c1bb86cdSEric Blake 2537f7ef38ddSVladimir Sementsov-Ogievskiy static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset, 2538f7ef38ddSVladimir Sementsov-Ogievskiy int64_t bytes, QEMUIOVector *qiov, 2539f7ef38ddSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags) 2540c1bb86cdSEric Blake { 2541c1bb86cdSEric Blake return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_READ); 2542c1bb86cdSEric Blake } 2543c1bb86cdSEric Blake 2544e75abedaSVladimir Sementsov-Ogievskiy static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset, 2545e75abedaSVladimir Sementsov-Ogievskiy int64_t bytes, QEMUIOVector *qiov, 2546e75abedaSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags) 2547c1bb86cdSEric Blake { 2548c1bb86cdSEric Blake return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE); 2549c1bb86cdSEric Blake } 2550c1bb86cdSEric Blake 2551dda56b75SPaolo Bonzini static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs) 2552c1bb86cdSEric Blake { 2553c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 255406dc9bd5SKevin Wolf RawPosixAIOData acb; 255533d70fb6SKevin Wolf int ret; 2556c1bb86cdSEric Blake 255733d70fb6SKevin Wolf ret = fd_open(bs); 255833d70fb6SKevin Wolf if (ret < 0) { 255933d70fb6SKevin Wolf return ret; 256033d70fb6SKevin Wolf } 2561c1bb86cdSEric Blake 256206dc9bd5SKevin Wolf acb = (RawPosixAIOData) { 256306dc9bd5SKevin Wolf .bs = bs, 256406dc9bd5SKevin Wolf .aio_fildes = s->fd, 256506dc9bd5SKevin Wolf .aio_type = QEMU_AIO_FLUSH, 256606dc9bd5SKevin Wolf }; 256706dc9bd5SKevin Wolf 2568c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 2569c6447510SAarushi Mehta if (s->use_linux_io_uring) { 2570a75e4e43SEmanuele Giuseppe Esposito return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH); 2571c6447510SAarushi Mehta } 2572c6447510SAarushi Mehta #endif 25730fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_flush, &acb); 2574c1bb86cdSEric Blake } 2575c1bb86cdSEric Blake 2576ed6e2161SNishanth Aravamudan static void raw_aio_attach_aio_context(BlockDriverState *bs, 2577ed6e2161SNishanth Aravamudan AioContext *new_context) 2578ed6e2161SNishanth Aravamudan { 2579c6447510SAarushi Mehta BDRVRawState __attribute__((unused)) *s = bs->opaque; 2580ed6e2161SNishanth Aravamudan #ifdef CONFIG_LINUX_AIO 2581ed6e2161SNishanth Aravamudan if (s->use_linux_aio) { 2582cb09104eSMarkus Armbruster Error *local_err = NULL; 2583ed6e2161SNishanth Aravamudan if (!aio_setup_linux_aio(new_context, &local_err)) { 2584ed6e2161SNishanth Aravamudan error_reportf_err(local_err, "Unable to use native AIO, " 2585ed6e2161SNishanth Aravamudan "falling back to thread pool: "); 2586ed6e2161SNishanth Aravamudan s->use_linux_aio = false; 2587ed6e2161SNishanth Aravamudan } 2588ed6e2161SNishanth Aravamudan } 2589ed6e2161SNishanth Aravamudan #endif 2590c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING 2591c6447510SAarushi Mehta if (s->use_linux_io_uring) { 2592cb8d0851SPan Nengyuan Error *local_err = NULL; 2593c6447510SAarushi Mehta if (!aio_setup_linux_io_uring(new_context, &local_err)) { 2594c6447510SAarushi Mehta error_reportf_err(local_err, "Unable to use linux io_uring, " 2595c6447510SAarushi Mehta "falling back to thread pool: "); 2596c6447510SAarushi Mehta s->use_linux_io_uring = false; 2597c6447510SAarushi Mehta } 2598c6447510SAarushi Mehta } 2599c6447510SAarushi Mehta #endif 2600ed6e2161SNishanth Aravamudan } 2601ed6e2161SNishanth Aravamudan 2602c1bb86cdSEric Blake static void raw_close(BlockDriverState *bs) 2603c1bb86cdSEric Blake { 2604c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 2605c1bb86cdSEric Blake 2606c1bb86cdSEric Blake if (s->fd >= 0) { 2607a3c41f06SSam Li #if defined(CONFIG_BLKZONED) 2608a3c41f06SSam Li g_free(bs->wps); 2609a3c41f06SSam Li #endif 2610c1bb86cdSEric Blake qemu_close(s->fd); 2611c1bb86cdSEric Blake s->fd = -1; 2612c1bb86cdSEric Blake } 2613c1bb86cdSEric Blake } 2614c1bb86cdSEric Blake 2615d0bc9e5dSMax Reitz /** 2616d0bc9e5dSMax Reitz * Truncates the given regular file @fd to @offset and, when growing, fills the 2617d0bc9e5dSMax Reitz * new space according to @prealloc. 2618d0bc9e5dSMax Reitz * 2619d0bc9e5dSMax Reitz * Returns: 0 on success, -errno on failure. 2620d0bc9e5dSMax Reitz */ 262193f4e2ffSKevin Wolf static int coroutine_fn 262293f4e2ffSKevin Wolf raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset, 262393f4e2ffSKevin Wolf PreallocMode prealloc, Error **errp) 26249f63b07eSMax Reitz { 262529cb4c01SKevin Wolf RawPosixAIOData acb; 2626d0bc9e5dSMax Reitz 262729cb4c01SKevin Wolf acb = (RawPosixAIOData) { 262893f4e2ffSKevin Wolf .bs = bs, 262993f4e2ffSKevin Wolf .aio_fildes = fd, 263093f4e2ffSKevin Wolf .aio_type = QEMU_AIO_TRUNCATE, 263193f4e2ffSKevin Wolf .aio_offset = offset, 2632d57c44d0SKevin Wolf .truncate = { 263393f4e2ffSKevin Wolf .prealloc = prealloc, 263493f4e2ffSKevin Wolf .errp = errp, 2635d57c44d0SKevin Wolf }, 263693f4e2ffSKevin Wolf }; 2637d0bc9e5dSMax Reitz 26380fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_truncate, &acb); 26399f63b07eSMax Reitz } 26409f63b07eSMax Reitz 2641061ca8a3SKevin Wolf static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset, 2642c80d8b06SMax Reitz bool exact, PreallocMode prealloc, 264392b92799SKevin Wolf BdrvRequestFlags flags, Error **errp) 2644c1bb86cdSEric Blake { 2645c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 2646c1bb86cdSEric Blake struct stat st; 2647f59adb32SMax Reitz int ret; 2648c1bb86cdSEric Blake 2649c1bb86cdSEric Blake if (fstat(s->fd, &st)) { 2650f59adb32SMax Reitz ret = -errno; 2651f59adb32SMax Reitz error_setg_errno(errp, -ret, "Failed to fstat() the file"); 2652f59adb32SMax Reitz return ret; 2653c1bb86cdSEric Blake } 2654c1bb86cdSEric Blake 2655c1bb86cdSEric Blake if (S_ISREG(st.st_mode)) { 265682325ae5SMax Reitz /* Always resizes to the exact @offset */ 265793f4e2ffSKevin Wolf return raw_regular_truncate(bs, s->fd, offset, prealloc, errp); 2658c1bb86cdSEric Blake } 265935d72602SMax Reitz 266035d72602SMax Reitz if (prealloc != PREALLOC_MODE_OFF) { 266135d72602SMax Reitz error_setg(errp, "Preallocation mode '%s' unsupported for this " 2662977c736fSMarkus Armbruster "non-regular file", PreallocMode_str(prealloc)); 266335d72602SMax Reitz return -ENOTSUP; 266435d72602SMax Reitz } 266535d72602SMax Reitz 266635d72602SMax Reitz if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) { 266736c6c877SPaolo Bonzini int64_t cur_length = raw_getlength(bs); 266882325ae5SMax Reitz 266982325ae5SMax Reitz if (offset != cur_length && exact) { 267082325ae5SMax Reitz error_setg(errp, "Cannot resize device files"); 267182325ae5SMax Reitz return -ENOTSUP; 267282325ae5SMax Reitz } else if (offset > cur_length) { 2673f59adb32SMax Reitz error_setg(errp, "Cannot grow device files"); 2674c1bb86cdSEric Blake return -EINVAL; 2675c1bb86cdSEric Blake } 2676c1bb86cdSEric Blake } else { 2677f59adb32SMax Reitz error_setg(errp, "Resizing this file is not supported"); 2678c1bb86cdSEric Blake return -ENOTSUP; 2679c1bb86cdSEric Blake } 2680c1bb86cdSEric Blake 2681c1bb86cdSEric Blake return 0; 2682c1bb86cdSEric Blake } 2683c1bb86cdSEric Blake 2684c1bb86cdSEric Blake #ifdef __OpenBSD__ 268536c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs) 2686c1bb86cdSEric Blake { 2687c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 2688c1bb86cdSEric Blake int fd = s->fd; 2689c1bb86cdSEric Blake struct stat st; 2690c1bb86cdSEric Blake 2691c1bb86cdSEric Blake if (fstat(fd, &st)) 2692c1bb86cdSEric Blake return -errno; 2693c1bb86cdSEric Blake if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) { 2694c1bb86cdSEric Blake struct disklabel dl; 2695c1bb86cdSEric Blake 2696c1bb86cdSEric Blake if (ioctl(fd, DIOCGDINFO, &dl)) 2697c1bb86cdSEric Blake return -errno; 2698c1bb86cdSEric Blake return (uint64_t)dl.d_secsize * 2699c1bb86cdSEric Blake dl.d_partitions[DISKPART(st.st_rdev)].p_size; 2700c1bb86cdSEric Blake } else 2701c1bb86cdSEric Blake return st.st_size; 2702c1bb86cdSEric Blake } 2703c1bb86cdSEric Blake #elif defined(__NetBSD__) 270436c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs) 2705c1bb86cdSEric Blake { 2706c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 2707c1bb86cdSEric Blake int fd = s->fd; 2708c1bb86cdSEric Blake struct stat st; 2709c1bb86cdSEric Blake 2710c1bb86cdSEric Blake if (fstat(fd, &st)) 2711c1bb86cdSEric Blake return -errno; 2712c1bb86cdSEric Blake if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) { 2713c1bb86cdSEric Blake struct dkwedge_info dkw; 2714c1bb86cdSEric Blake 2715c1bb86cdSEric Blake if (ioctl(fd, DIOCGWEDGEINFO, &dkw) != -1) { 2716c1bb86cdSEric Blake return dkw.dkw_size * 512; 2717c1bb86cdSEric Blake } else { 2718c1bb86cdSEric Blake struct disklabel dl; 2719c1bb86cdSEric Blake 2720c1bb86cdSEric Blake if (ioctl(fd, DIOCGDINFO, &dl)) 2721c1bb86cdSEric Blake return -errno; 2722c1bb86cdSEric Blake return (uint64_t)dl.d_secsize * 2723c1bb86cdSEric Blake dl.d_partitions[DISKPART(st.st_rdev)].p_size; 2724c1bb86cdSEric Blake } 2725c1bb86cdSEric Blake } else 2726c1bb86cdSEric Blake return st.st_size; 2727c1bb86cdSEric Blake } 2728c1bb86cdSEric Blake #elif defined(__sun__) 272936c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs) 2730c1bb86cdSEric Blake { 2731c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 2732c1bb86cdSEric Blake struct dk_minfo minfo; 2733c1bb86cdSEric Blake int ret; 2734c1bb86cdSEric Blake int64_t size; 2735c1bb86cdSEric Blake 2736c1bb86cdSEric Blake ret = fd_open(bs); 2737c1bb86cdSEric Blake if (ret < 0) { 2738c1bb86cdSEric Blake return ret; 2739c1bb86cdSEric Blake } 2740c1bb86cdSEric Blake 2741c1bb86cdSEric Blake /* 2742c1bb86cdSEric Blake * Use the DKIOCGMEDIAINFO ioctl to read the size. 2743c1bb86cdSEric Blake */ 2744c1bb86cdSEric Blake ret = ioctl(s->fd, DKIOCGMEDIAINFO, &minfo); 2745c1bb86cdSEric Blake if (ret != -1) { 2746c1bb86cdSEric Blake return minfo.dki_lbsize * minfo.dki_capacity; 2747c1bb86cdSEric Blake } 2748c1bb86cdSEric Blake 2749c1bb86cdSEric Blake /* 2750c1bb86cdSEric Blake * There are reports that lseek on some devices fails, but 2751c1bb86cdSEric Blake * irc discussion said that contingency on contingency was overkill. 2752c1bb86cdSEric Blake */ 2753c1bb86cdSEric Blake size = lseek(s->fd, 0, SEEK_END); 2754c1bb86cdSEric Blake if (size < 0) { 2755c1bb86cdSEric Blake return -errno; 2756c1bb86cdSEric Blake } 2757c1bb86cdSEric Blake return size; 2758c1bb86cdSEric Blake } 2759c1bb86cdSEric Blake #elif defined(CONFIG_BSD) 276036c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs) 2761c1bb86cdSEric Blake { 2762c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 2763c1bb86cdSEric Blake int fd = s->fd; 2764c1bb86cdSEric Blake int64_t size; 2765c1bb86cdSEric Blake struct stat sb; 2766c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 2767c1bb86cdSEric Blake int reopened = 0; 2768c1bb86cdSEric Blake #endif 2769c1bb86cdSEric Blake int ret; 2770c1bb86cdSEric Blake 2771c1bb86cdSEric Blake ret = fd_open(bs); 2772c1bb86cdSEric Blake if (ret < 0) 2773c1bb86cdSEric Blake return ret; 2774c1bb86cdSEric Blake 2775c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 2776c1bb86cdSEric Blake again: 2777c1bb86cdSEric Blake #endif 2778c1bb86cdSEric Blake if (!fstat(fd, &sb) && (S_IFCHR & sb.st_mode)) { 2779267cd53fSPaolo Bonzini size = 0; 2780c1bb86cdSEric Blake #ifdef DIOCGMEDIASIZE 2781267cd53fSPaolo Bonzini if (ioctl(fd, DIOCGMEDIASIZE, (off_t *)&size)) { 2782c1bb86cdSEric Blake size = 0; 2783c1bb86cdSEric Blake } 2784267cd53fSPaolo Bonzini #endif 2785267cd53fSPaolo Bonzini #ifdef DIOCGPART 2786267cd53fSPaolo Bonzini if (size == 0) { 2787267cd53fSPaolo Bonzini struct partinfo pi; 2788267cd53fSPaolo Bonzini if (ioctl(fd, DIOCGPART, &pi) == 0) { 2789267cd53fSPaolo Bonzini size = pi.media_size; 2790267cd53fSPaolo Bonzini } 2791267cd53fSPaolo Bonzini } 2792c1bb86cdSEric Blake #endif 279309e20abdSJoelle van Dyne #if defined(DKIOCGETBLOCKCOUNT) && defined(DKIOCGETBLOCKSIZE) 2794267cd53fSPaolo Bonzini if (size == 0) { 2795c1bb86cdSEric Blake uint64_t sectors = 0; 2796c1bb86cdSEric Blake uint32_t sector_size = 0; 2797c1bb86cdSEric Blake 2798c1bb86cdSEric Blake if (ioctl(fd, DKIOCGETBLOCKCOUNT, §ors) == 0 2799c1bb86cdSEric Blake && ioctl(fd, DKIOCGETBLOCKSIZE, §or_size) == 0) { 2800c1bb86cdSEric Blake size = sectors * sector_size; 2801c1bb86cdSEric Blake } 2802c1bb86cdSEric Blake } 2803c1bb86cdSEric Blake #endif 2804267cd53fSPaolo Bonzini if (size == 0) { 2805267cd53fSPaolo Bonzini size = lseek(fd, 0LL, SEEK_END); 2806267cd53fSPaolo Bonzini } 2807267cd53fSPaolo Bonzini if (size < 0) { 2808267cd53fSPaolo Bonzini return -errno; 2809267cd53fSPaolo Bonzini } 2810c1bb86cdSEric Blake #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 2811c1bb86cdSEric Blake switch(s->type) { 2812c1bb86cdSEric Blake case FTYPE_CD: 2813c1bb86cdSEric Blake /* XXX FreeBSD acd returns UINT_MAX sectors for an empty drive */ 2814c1bb86cdSEric Blake if (size == 2048LL * (unsigned)-1) 2815c1bb86cdSEric Blake size = 0; 2816c1bb86cdSEric Blake /* XXX no disc? maybe we need to reopen... */ 2817c1bb86cdSEric Blake if (size <= 0 && !reopened && cdrom_reopen(bs) >= 0) { 2818c1bb86cdSEric Blake reopened = 1; 2819c1bb86cdSEric Blake goto again; 2820c1bb86cdSEric Blake } 2821c1bb86cdSEric Blake } 2822c1bb86cdSEric Blake #endif 2823c1bb86cdSEric Blake } else { 2824c1bb86cdSEric Blake size = lseek(fd, 0, SEEK_END); 2825c1bb86cdSEric Blake if (size < 0) { 2826c1bb86cdSEric Blake return -errno; 2827c1bb86cdSEric Blake } 2828c1bb86cdSEric Blake } 2829c1bb86cdSEric Blake return size; 2830c1bb86cdSEric Blake } 2831c1bb86cdSEric Blake #else 283236c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs) 2833c1bb86cdSEric Blake { 2834c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 2835c1bb86cdSEric Blake int ret; 2836c1bb86cdSEric Blake int64_t size; 2837c1bb86cdSEric Blake 2838c1bb86cdSEric Blake ret = fd_open(bs); 2839c1bb86cdSEric Blake if (ret < 0) { 2840c1bb86cdSEric Blake return ret; 2841c1bb86cdSEric Blake } 2842c1bb86cdSEric Blake 2843c1bb86cdSEric Blake size = lseek(s->fd, 0, SEEK_END); 2844c1bb86cdSEric Blake if (size < 0) { 2845c1bb86cdSEric Blake return -errno; 2846c1bb86cdSEric Blake } 2847c1bb86cdSEric Blake return size; 2848c1bb86cdSEric Blake } 2849c1bb86cdSEric Blake #endif 2850c1bb86cdSEric Blake 285136c6c877SPaolo Bonzini static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs) 285236c6c877SPaolo Bonzini { 285336c6c877SPaolo Bonzini return raw_getlength(bs); 285436c6c877SPaolo Bonzini } 285536c6c877SPaolo Bonzini 285682618d7bSEmanuele Giuseppe Esposito static int64_t coroutine_fn raw_co_get_allocated_file_size(BlockDriverState *bs) 2857c1bb86cdSEric Blake { 2858c1bb86cdSEric Blake struct stat st; 2859c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 2860c1bb86cdSEric Blake 2861c1bb86cdSEric Blake if (fstat(s->fd, &st) < 0) { 2862c1bb86cdSEric Blake return -errno; 2863c1bb86cdSEric Blake } 2864c1bb86cdSEric Blake return (int64_t)st.st_blocks * 512; 2865c1bb86cdSEric Blake } 2866c1bb86cdSEric Blake 286793f4e2ffSKevin Wolf static int coroutine_fn 286893f4e2ffSKevin Wolf raw_co_create(BlockdevCreateOptions *options, Error **errp) 2869c1bb86cdSEric Blake { 2870927f11e1SKevin Wolf BlockdevCreateOptionsFile *file_opts; 28717c20c808SMax Reitz Error *local_err = NULL; 2872c1bb86cdSEric Blake int fd; 2873d815efcaSMax Reitz uint64_t perm, shared; 2874c1bb86cdSEric Blake int result = 0; 2875c1bb86cdSEric Blake 2876927f11e1SKevin Wolf /* Validate options and set default values */ 2877927f11e1SKevin Wolf assert(options->driver == BLOCKDEV_DRIVER_FILE); 2878927f11e1SKevin Wolf file_opts = &options->u.file; 2879c1bb86cdSEric Blake 2880927f11e1SKevin Wolf if (!file_opts->has_nocow) { 2881927f11e1SKevin Wolf file_opts->nocow = false; 2882927f11e1SKevin Wolf } 2883927f11e1SKevin Wolf if (!file_opts->has_preallocation) { 2884927f11e1SKevin Wolf file_opts->preallocation = PREALLOC_MODE_OFF; 2885c1bb86cdSEric Blake } 2886ffa244c8SKevin Wolf if (!file_opts->has_extent_size_hint) { 2887ffa244c8SKevin Wolf file_opts->extent_size_hint = 1 * MiB; 2888ffa244c8SKevin Wolf } 2889ffa244c8SKevin Wolf if (file_opts->extent_size_hint > UINT32_MAX) { 2890ffa244c8SKevin Wolf result = -EINVAL; 2891ffa244c8SKevin Wolf error_setg(errp, "Extent size hint is too large"); 2892ffa244c8SKevin Wolf goto out; 2893ffa244c8SKevin Wolf } 2894c1bb86cdSEric Blake 2895927f11e1SKevin Wolf /* Create file */ 2896b18a24a9SDaniel P. Berrangé fd = qemu_create(file_opts->filename, O_RDWR | O_BINARY, 0644, errp); 2897c1bb86cdSEric Blake if (fd < 0) { 2898c1bb86cdSEric Blake result = -errno; 2899c1bb86cdSEric Blake goto out; 2900c1bb86cdSEric Blake } 2901c1bb86cdSEric Blake 2902b8cf1913SMax Reitz /* Take permissions: We want to discard everything, so we need 2903b8cf1913SMax Reitz * BLK_PERM_WRITE; and truncation to the desired size requires 2904b8cf1913SMax Reitz * BLK_PERM_RESIZE. 2905b8cf1913SMax Reitz * On the other hand, we cannot share the RESIZE permission 2906b8cf1913SMax Reitz * because we promise that after this function, the file has the 2907b8cf1913SMax Reitz * size given in the options. If someone else were to resize it 2908b8cf1913SMax Reitz * concurrently, we could not guarantee that. 2909b8cf1913SMax Reitz * Note that after this function, we can no longer guarantee that 2910b8cf1913SMax Reitz * the file is not touched by a third party, so it may be resized 2911b8cf1913SMax Reitz * then. */ 2912b8cf1913SMax Reitz perm = BLK_PERM_WRITE | BLK_PERM_RESIZE; 2913b8cf1913SMax Reitz shared = BLK_PERM_ALL & ~BLK_PERM_RESIZE; 2914b8cf1913SMax Reitz 2915b8cf1913SMax Reitz /* Step one: Take locks */ 29162996ffadSFam Zheng result = raw_apply_lock_bytes(NULL, fd, perm, ~shared, false, errp); 2917b8cf1913SMax Reitz if (result < 0) { 2918b8cf1913SMax Reitz goto out_close; 2919b8cf1913SMax Reitz } 2920b8cf1913SMax Reitz 2921b8cf1913SMax Reitz /* Step two: Check that nobody else has taken conflicting locks */ 2922b8cf1913SMax Reitz result = raw_check_lock_bytes(fd, perm, shared, errp); 2923b8cf1913SMax Reitz if (result < 0) { 2924b857431dSFam Zheng error_append_hint(errp, 2925b857431dSFam Zheng "Is another process using the image [%s]?\n", 2926b857431dSFam Zheng file_opts->filename); 29277c20c808SMax Reitz goto out_unlock; 2928b8cf1913SMax Reitz } 2929b8cf1913SMax Reitz 2930b8cf1913SMax Reitz /* Clear the file by truncating it to 0 */ 293193f4e2ffSKevin Wolf result = raw_regular_truncate(NULL, fd, 0, PREALLOC_MODE_OFF, errp); 2932b8cf1913SMax Reitz if (result < 0) { 29337c20c808SMax Reitz goto out_unlock; 2934b8cf1913SMax Reitz } 2935b8cf1913SMax Reitz 2936927f11e1SKevin Wolf if (file_opts->nocow) { 2937c1bb86cdSEric Blake #ifdef __linux__ 2938c1bb86cdSEric Blake /* Set NOCOW flag to solve performance issue on fs like btrfs. 2939c1bb86cdSEric Blake * This is an optimisation. The FS_IOC_SETFLAGS ioctl return value 2940c1bb86cdSEric Blake * will be ignored since any failure of this operation should not 2941c1bb86cdSEric Blake * block the left work. 2942c1bb86cdSEric Blake */ 2943c1bb86cdSEric Blake int attr; 2944c1bb86cdSEric Blake if (ioctl(fd, FS_IOC_GETFLAGS, &attr) == 0) { 2945c1bb86cdSEric Blake attr |= FS_NOCOW_FL; 2946c1bb86cdSEric Blake ioctl(fd, FS_IOC_SETFLAGS, &attr); 2947c1bb86cdSEric Blake } 2948c1bb86cdSEric Blake #endif 2949c1bb86cdSEric Blake } 2950ffa244c8SKevin Wolf #ifdef FS_IOC_FSSETXATTR 2951ffa244c8SKevin Wolf /* 2952ffa244c8SKevin Wolf * Try to set the extent size hint. Failure is not fatal, and a warning is 2953ffa244c8SKevin Wolf * only printed if the option was explicitly specified. 2954ffa244c8SKevin Wolf */ 2955ffa244c8SKevin Wolf { 2956ffa244c8SKevin Wolf struct fsxattr attr; 2957ffa244c8SKevin Wolf result = ioctl(fd, FS_IOC_FSGETXATTR, &attr); 2958ffa244c8SKevin Wolf if (result == 0) { 2959ffa244c8SKevin Wolf attr.fsx_xflags |= FS_XFLAG_EXTSIZE; 2960ffa244c8SKevin Wolf attr.fsx_extsize = file_opts->extent_size_hint; 2961ffa244c8SKevin Wolf result = ioctl(fd, FS_IOC_FSSETXATTR, &attr); 2962ffa244c8SKevin Wolf } 2963ffa244c8SKevin Wolf if (result < 0 && file_opts->has_extent_size_hint && 2964ffa244c8SKevin Wolf file_opts->extent_size_hint) 2965ffa244c8SKevin Wolf { 2966ffa244c8SKevin Wolf warn_report("Failed to set extent size hint: %s", 2967ffa244c8SKevin Wolf strerror(errno)); 2968ffa244c8SKevin Wolf } 2969ffa244c8SKevin Wolf } 2970ffa244c8SKevin Wolf #endif 2971c1bb86cdSEric Blake 2972b8cf1913SMax Reitz /* Resize and potentially preallocate the file to the desired 2973b8cf1913SMax Reitz * final size */ 297493f4e2ffSKevin Wolf result = raw_regular_truncate(NULL, fd, file_opts->size, 297593f4e2ffSKevin Wolf file_opts->preallocation, errp); 29769f63b07eSMax Reitz if (result < 0) { 29777c20c808SMax Reitz goto out_unlock; 29787c20c808SMax Reitz } 29797c20c808SMax Reitz 29807c20c808SMax Reitz out_unlock: 29812996ffadSFam Zheng raw_apply_lock_bytes(NULL, fd, 0, 0, true, &local_err); 29827c20c808SMax Reitz if (local_err) { 29837c20c808SMax Reitz /* The above call should not fail, and if it does, that does 29847c20c808SMax Reitz * not mean the whole creation operation has failed. So 29857c20c808SMax Reitz * report it the user for their convenience, but do not report 29867c20c808SMax Reitz * it to the caller. */ 2987db0754dfSFam Zheng warn_report_err(local_err); 29885a1dad9dSNir Soffer } 29895a1dad9dSNir Soffer 29905a1dad9dSNir Soffer out_close: 2991c1bb86cdSEric Blake if (qemu_close(fd) != 0 && result == 0) { 2992c1bb86cdSEric Blake result = -errno; 2993c1bb86cdSEric Blake error_setg_errno(errp, -result, "Could not close the new file"); 2994c1bb86cdSEric Blake } 2995c1bb86cdSEric Blake out: 2996c1bb86cdSEric Blake return result; 2997c1bb86cdSEric Blake } 2998c1bb86cdSEric Blake 29994ec8df01SKevin Wolf static int coroutine_fn GRAPH_RDLOCK 30004ec8df01SKevin Wolf raw_co_create_opts(BlockDriver *drv, const char *filename, 30014ec8df01SKevin Wolf QemuOpts *opts, Error **errp) 3002927f11e1SKevin Wolf { 3003927f11e1SKevin Wolf BlockdevCreateOptions options; 3004927f11e1SKevin Wolf int64_t total_size = 0; 3005ffa244c8SKevin Wolf int64_t extent_size_hint = 0; 3006ffa244c8SKevin Wolf bool has_extent_size_hint = false; 3007927f11e1SKevin Wolf bool nocow = false; 3008927f11e1SKevin Wolf PreallocMode prealloc; 3009927f11e1SKevin Wolf char *buf = NULL; 3010927f11e1SKevin Wolf Error *local_err = NULL; 3011927f11e1SKevin Wolf 3012927f11e1SKevin Wolf /* Skip file: protocol prefix */ 3013927f11e1SKevin Wolf strstart(filename, "file:", &filename); 3014927f11e1SKevin Wolf 3015927f11e1SKevin Wolf /* Read out options */ 3016927f11e1SKevin Wolf total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 3017927f11e1SKevin Wolf BDRV_SECTOR_SIZE); 3018ffa244c8SKevin Wolf if (qemu_opt_get(opts, BLOCK_OPT_EXTENT_SIZE_HINT)) { 3019ffa244c8SKevin Wolf has_extent_size_hint = true; 3020ffa244c8SKevin Wolf extent_size_hint = 3021ffa244c8SKevin Wolf qemu_opt_get_size_del(opts, BLOCK_OPT_EXTENT_SIZE_HINT, -1); 3022ffa244c8SKevin Wolf } 3023927f11e1SKevin Wolf nocow = qemu_opt_get_bool(opts, BLOCK_OPT_NOCOW, false); 3024927f11e1SKevin Wolf buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 3025927f11e1SKevin Wolf prealloc = qapi_enum_parse(&PreallocMode_lookup, buf, 3026927f11e1SKevin Wolf PREALLOC_MODE_OFF, &local_err); 3027927f11e1SKevin Wolf g_free(buf); 3028927f11e1SKevin Wolf if (local_err) { 3029927f11e1SKevin Wolf error_propagate(errp, local_err); 3030927f11e1SKevin Wolf return -EINVAL; 3031927f11e1SKevin Wolf } 3032927f11e1SKevin Wolf 3033927f11e1SKevin Wolf options = (BlockdevCreateOptions) { 3034927f11e1SKevin Wolf .driver = BLOCKDEV_DRIVER_FILE, 3035927f11e1SKevin Wolf .u.file = { 3036927f11e1SKevin Wolf .filename = (char *) filename, 3037927f11e1SKevin Wolf .size = total_size, 3038927f11e1SKevin Wolf .has_preallocation = true, 3039927f11e1SKevin Wolf .preallocation = prealloc, 3040927f11e1SKevin Wolf .has_nocow = true, 3041927f11e1SKevin Wolf .nocow = nocow, 3042ffa244c8SKevin Wolf .has_extent_size_hint = has_extent_size_hint, 3043ffa244c8SKevin Wolf .extent_size_hint = extent_size_hint, 3044927f11e1SKevin Wolf }, 3045927f11e1SKevin Wolf }; 3046927f11e1SKevin Wolf return raw_co_create(&options, errp); 3047927f11e1SKevin Wolf } 3048927f11e1SKevin Wolf 30499bffae14SDaniel Henrique Barboza static int coroutine_fn raw_co_delete_file(BlockDriverState *bs, 30509bffae14SDaniel Henrique Barboza Error **errp) 30519bffae14SDaniel Henrique Barboza { 30529bffae14SDaniel Henrique Barboza struct stat st; 30539bffae14SDaniel Henrique Barboza int ret; 30549bffae14SDaniel Henrique Barboza 30559bffae14SDaniel Henrique Barboza if (!(stat(bs->filename, &st) == 0) || !S_ISREG(st.st_mode)) { 30569bffae14SDaniel Henrique Barboza error_setg_errno(errp, ENOENT, "%s is not a regular file", 30579bffae14SDaniel Henrique Barboza bs->filename); 30589bffae14SDaniel Henrique Barboza return -ENOENT; 30599bffae14SDaniel Henrique Barboza } 30609bffae14SDaniel Henrique Barboza 30619bffae14SDaniel Henrique Barboza ret = unlink(bs->filename); 30629bffae14SDaniel Henrique Barboza if (ret < 0) { 30639bffae14SDaniel Henrique Barboza ret = -errno; 30649bffae14SDaniel Henrique Barboza error_setg_errno(errp, -ret, "Error when deleting file %s", 30659bffae14SDaniel Henrique Barboza bs->filename); 30669bffae14SDaniel Henrique Barboza } 30679bffae14SDaniel Henrique Barboza 30689bffae14SDaniel Henrique Barboza return ret; 30699bffae14SDaniel Henrique Barboza } 30709bffae14SDaniel Henrique Barboza 3071c1bb86cdSEric Blake /* 3072c1bb86cdSEric Blake * Find allocation range in @bs around offset @start. 3073c1bb86cdSEric Blake * May change underlying file descriptor's file offset. 3074c1bb86cdSEric Blake * If @start is not in a hole, store @start in @data, and the 3075c1bb86cdSEric Blake * beginning of the next hole in @hole, and return 0. 3076c1bb86cdSEric Blake * If @start is in a non-trailing hole, store @start in @hole and the 3077c1bb86cdSEric Blake * beginning of the next non-hole in @data, and return 0. 3078c1bb86cdSEric Blake * If @start is in a trailing hole or beyond EOF, return -ENXIO. 3079c1bb86cdSEric Blake * If we can't find out, return a negative errno other than -ENXIO. 3080c1bb86cdSEric Blake */ 3081c1bb86cdSEric Blake static int find_allocation(BlockDriverState *bs, off_t start, 3082c1bb86cdSEric Blake off_t *data, off_t *hole) 3083c1bb86cdSEric Blake { 3084c1bb86cdSEric Blake #if defined SEEK_HOLE && defined SEEK_DATA 3085c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 3086c1bb86cdSEric Blake off_t offs; 3087c1bb86cdSEric Blake 3088c1bb86cdSEric Blake /* 3089c1bb86cdSEric Blake * SEEK_DATA cases: 3090c1bb86cdSEric Blake * D1. offs == start: start is in data 3091c1bb86cdSEric Blake * D2. offs > start: start is in a hole, next data at offs 3092c1bb86cdSEric Blake * D3. offs < 0, errno = ENXIO: either start is in a trailing hole 3093c1bb86cdSEric Blake * or start is beyond EOF 3094c1bb86cdSEric Blake * If the latter happens, the file has been truncated behind 3095c1bb86cdSEric Blake * our back since we opened it. All bets are off then. 3096c1bb86cdSEric Blake * Treating like a trailing hole is simplest. 3097c1bb86cdSEric Blake * D4. offs < 0, errno != ENXIO: we learned nothing 3098c1bb86cdSEric Blake */ 3099c1bb86cdSEric Blake offs = lseek(s->fd, start, SEEK_DATA); 3100c1bb86cdSEric Blake if (offs < 0) { 3101c1bb86cdSEric Blake return -errno; /* D3 or D4 */ 3102c1bb86cdSEric Blake } 3103a03083a0SJeff Cody 3104a03083a0SJeff Cody if (offs < start) { 3105a03083a0SJeff Cody /* This is not a valid return by lseek(). We are safe to just return 3106a03083a0SJeff Cody * -EIO in this case, and we'll treat it like D4. */ 3107a03083a0SJeff Cody return -EIO; 3108a03083a0SJeff Cody } 3109c1bb86cdSEric Blake 3110c1bb86cdSEric Blake if (offs > start) { 3111c1bb86cdSEric Blake /* D2: in hole, next data at offs */ 3112c1bb86cdSEric Blake *hole = start; 3113c1bb86cdSEric Blake *data = offs; 3114c1bb86cdSEric Blake return 0; 3115c1bb86cdSEric Blake } 3116c1bb86cdSEric Blake 3117c1bb86cdSEric Blake /* D1: in data, end not yet known */ 3118c1bb86cdSEric Blake 3119c1bb86cdSEric Blake /* 3120c1bb86cdSEric Blake * SEEK_HOLE cases: 3121c1bb86cdSEric Blake * H1. offs == start: start is in a hole 3122c1bb86cdSEric Blake * If this happens here, a hole has been dug behind our back 3123c1bb86cdSEric Blake * since the previous lseek(). 3124c1bb86cdSEric Blake * H2. offs > start: either start is in data, next hole at offs, 3125c1bb86cdSEric Blake * or start is in trailing hole, EOF at offs 3126c1bb86cdSEric Blake * Linux treats trailing holes like any other hole: offs == 3127c1bb86cdSEric Blake * start. Solaris seeks to EOF instead: offs > start (blech). 3128c1bb86cdSEric Blake * If that happens here, a hole has been dug behind our back 3129c1bb86cdSEric Blake * since the previous lseek(). 3130c1bb86cdSEric Blake * H3. offs < 0, errno = ENXIO: start is beyond EOF 3131c1bb86cdSEric Blake * If this happens, the file has been truncated behind our 3132c1bb86cdSEric Blake * back since we opened it. Treat it like a trailing hole. 3133c1bb86cdSEric Blake * H4. offs < 0, errno != ENXIO: we learned nothing 3134c1bb86cdSEric Blake * Pretend we know nothing at all, i.e. "forget" about D1. 3135c1bb86cdSEric Blake */ 3136c1bb86cdSEric Blake offs = lseek(s->fd, start, SEEK_HOLE); 3137c1bb86cdSEric Blake if (offs < 0) { 3138c1bb86cdSEric Blake return -errno; /* D1 and (H3 or H4) */ 3139c1bb86cdSEric Blake } 3140a03083a0SJeff Cody 3141a03083a0SJeff Cody if (offs < start) { 3142a03083a0SJeff Cody /* This is not a valid return by lseek(). We are safe to just return 3143a03083a0SJeff Cody * -EIO in this case, and we'll treat it like H4. */ 3144a03083a0SJeff Cody return -EIO; 3145a03083a0SJeff Cody } 3146c1bb86cdSEric Blake 3147c1bb86cdSEric Blake if (offs > start) { 3148c1bb86cdSEric Blake /* 3149c1bb86cdSEric Blake * D1 and H2: either in data, next hole at offs, or it was in 3150c1bb86cdSEric Blake * data but is now in a trailing hole. In the latter case, 3151c1bb86cdSEric Blake * all bets are off. Treating it as if it there was data all 3152c1bb86cdSEric Blake * the way to EOF is safe, so simply do that. 3153c1bb86cdSEric Blake */ 3154c1bb86cdSEric Blake *data = start; 3155c1bb86cdSEric Blake *hole = offs; 3156c1bb86cdSEric Blake return 0; 3157c1bb86cdSEric Blake } 3158c1bb86cdSEric Blake 3159c1bb86cdSEric Blake /* D1 and H1 */ 3160c1bb86cdSEric Blake return -EBUSY; 3161c1bb86cdSEric Blake #else 3162c1bb86cdSEric Blake return -ENOTSUP; 3163c1bb86cdSEric Blake #endif 3164c1bb86cdSEric Blake } 3165c1bb86cdSEric Blake 3166c1bb86cdSEric Blake /* 3167a290f085SEric Blake * Returns the allocation status of the specified offset. 3168c1bb86cdSEric Blake * 3169a290f085SEric Blake * The block layer guarantees 'offset' and 'bytes' are within bounds. 3170c1bb86cdSEric Blake * 3171a290f085SEric Blake * 'pnum' is set to the number of bytes (including and immediately following 3172a290f085SEric Blake * the specified offset) that are known to be in the same 3173c1bb86cdSEric Blake * allocated/unallocated state. 3174c1bb86cdSEric Blake * 3175869e7ee8SHanna Reitz * 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may 3176869e7ee8SHanna Reitz * well exceed it. 3177c1bb86cdSEric Blake */ 3178a290f085SEric Blake static int coroutine_fn raw_co_block_status(BlockDriverState *bs, 3179a290f085SEric Blake bool want_zero, 3180a290f085SEric Blake int64_t offset, 3181a290f085SEric Blake int64_t bytes, int64_t *pnum, 3182a290f085SEric Blake int64_t *map, 3183c1bb86cdSEric Blake BlockDriverState **file) 3184c1bb86cdSEric Blake { 3185a290f085SEric Blake off_t data = 0, hole = 0; 3186c1bb86cdSEric Blake int ret; 3187c1bb86cdSEric Blake 31889c3db310SMax Reitz assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment)); 31899c3db310SMax Reitz 3190c1bb86cdSEric Blake ret = fd_open(bs); 3191c1bb86cdSEric Blake if (ret < 0) { 3192c1bb86cdSEric Blake return ret; 3193c1bb86cdSEric Blake } 3194c1bb86cdSEric Blake 3195a290f085SEric Blake if (!want_zero) { 3196a290f085SEric Blake *pnum = bytes; 3197a290f085SEric Blake *map = offset; 3198a290f085SEric Blake *file = bs; 3199a290f085SEric Blake return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 3200c1bb86cdSEric Blake } 3201c1bb86cdSEric Blake 3202a290f085SEric Blake ret = find_allocation(bs, offset, &data, &hole); 3203c1bb86cdSEric Blake if (ret == -ENXIO) { 3204c1bb86cdSEric Blake /* Trailing hole */ 3205a290f085SEric Blake *pnum = bytes; 3206c1bb86cdSEric Blake ret = BDRV_BLOCK_ZERO; 3207c1bb86cdSEric Blake } else if (ret < 0) { 3208c1bb86cdSEric Blake /* No info available, so pretend there are no holes */ 3209a290f085SEric Blake *pnum = bytes; 3210c1bb86cdSEric Blake ret = BDRV_BLOCK_DATA; 3211a290f085SEric Blake } else if (data == offset) { 3212a290f085SEric Blake /* On a data extent, compute bytes to the end of the extent, 3213c1bb86cdSEric Blake * possibly including a partial sector at EOF. */ 3214869e7ee8SHanna Reitz *pnum = hole - offset; 32159c3db310SMax Reitz 32169c3db310SMax Reitz /* 32179c3db310SMax Reitz * We are not allowed to return partial sectors, though, so 32189c3db310SMax Reitz * round up if necessary. 32199c3db310SMax Reitz */ 32209c3db310SMax Reitz if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) { 322136c6c877SPaolo Bonzini int64_t file_length = raw_getlength(bs); 32229c3db310SMax Reitz if (file_length > 0) { 32239c3db310SMax Reitz /* Ignore errors, this is just a safeguard */ 32249c3db310SMax Reitz assert(hole == file_length); 32259c3db310SMax Reitz } 32269c3db310SMax Reitz *pnum = ROUND_UP(*pnum, bs->bl.request_alignment); 32279c3db310SMax Reitz } 32289c3db310SMax Reitz 3229c1bb86cdSEric Blake ret = BDRV_BLOCK_DATA; 3230c1bb86cdSEric Blake } else { 3231a290f085SEric Blake /* On a hole, compute bytes to the beginning of the next extent. */ 3232a290f085SEric Blake assert(hole == offset); 3233869e7ee8SHanna Reitz *pnum = data - offset; 3234c1bb86cdSEric Blake ret = BDRV_BLOCK_ZERO; 3235c1bb86cdSEric Blake } 3236a290f085SEric Blake *map = offset; 3237c1bb86cdSEric Blake *file = bs; 3238a290f085SEric Blake return ret | BDRV_BLOCK_OFFSET_VALID; 3239c1bb86cdSEric Blake } 3240c1bb86cdSEric Blake 324131be8a2aSStefan Hajnoczi #if defined(__linux__) 324231be8a2aSStefan Hajnoczi /* Verify that the file is not in the page cache */ 324336c6c877SPaolo Bonzini static void check_cache_dropped(BlockDriverState *bs, Error **errp) 324431be8a2aSStefan Hajnoczi { 324531be8a2aSStefan Hajnoczi const size_t window_size = 128 * 1024 * 1024; 324631be8a2aSStefan Hajnoczi BDRVRawState *s = bs->opaque; 324731be8a2aSStefan Hajnoczi void *window = NULL; 324831be8a2aSStefan Hajnoczi size_t length = 0; 324931be8a2aSStefan Hajnoczi unsigned char *vec; 325031be8a2aSStefan Hajnoczi size_t page_size; 325131be8a2aSStefan Hajnoczi off_t offset; 325231be8a2aSStefan Hajnoczi off_t end; 325331be8a2aSStefan Hajnoczi 325431be8a2aSStefan Hajnoczi /* mincore(2) page status information requires 1 byte per page */ 325531be8a2aSStefan Hajnoczi page_size = sysconf(_SC_PAGESIZE); 325631be8a2aSStefan Hajnoczi vec = g_malloc(DIV_ROUND_UP(window_size, page_size)); 325731be8a2aSStefan Hajnoczi 325836c6c877SPaolo Bonzini end = raw_getlength(bs); 325931be8a2aSStefan Hajnoczi 326031be8a2aSStefan Hajnoczi for (offset = 0; offset < end; offset += window_size) { 326131be8a2aSStefan Hajnoczi void *new_window; 326231be8a2aSStefan Hajnoczi size_t new_length; 326331be8a2aSStefan Hajnoczi size_t vec_end; 326431be8a2aSStefan Hajnoczi size_t i; 326531be8a2aSStefan Hajnoczi int ret; 326631be8a2aSStefan Hajnoczi 326731be8a2aSStefan Hajnoczi /* Unmap previous window if size has changed */ 326831be8a2aSStefan Hajnoczi new_length = MIN(end - offset, window_size); 326931be8a2aSStefan Hajnoczi if (new_length != length) { 327031be8a2aSStefan Hajnoczi munmap(window, length); 327131be8a2aSStefan Hajnoczi window = NULL; 327231be8a2aSStefan Hajnoczi length = 0; 327331be8a2aSStefan Hajnoczi } 327431be8a2aSStefan Hajnoczi 327531be8a2aSStefan Hajnoczi new_window = mmap(window, new_length, PROT_NONE, MAP_PRIVATE, 327631be8a2aSStefan Hajnoczi s->fd, offset); 327731be8a2aSStefan Hajnoczi if (new_window == MAP_FAILED) { 327831be8a2aSStefan Hajnoczi error_setg_errno(errp, errno, "mmap failed"); 327931be8a2aSStefan Hajnoczi break; 328031be8a2aSStefan Hajnoczi } 328131be8a2aSStefan Hajnoczi 328231be8a2aSStefan Hajnoczi window = new_window; 328331be8a2aSStefan Hajnoczi length = new_length; 328431be8a2aSStefan Hajnoczi 328531be8a2aSStefan Hajnoczi ret = mincore(window, length, vec); 328631be8a2aSStefan Hajnoczi if (ret < 0) { 328731be8a2aSStefan Hajnoczi error_setg_errno(errp, errno, "mincore failed"); 328831be8a2aSStefan Hajnoczi break; 328931be8a2aSStefan Hajnoczi } 329031be8a2aSStefan Hajnoczi 329131be8a2aSStefan Hajnoczi vec_end = DIV_ROUND_UP(length, page_size); 329231be8a2aSStefan Hajnoczi for (i = 0; i < vec_end; i++) { 329331be8a2aSStefan Hajnoczi if (vec[i] & 0x1) { 329431be8a2aSStefan Hajnoczi break; 329531be8a2aSStefan Hajnoczi } 329631be8a2aSStefan Hajnoczi } 329777ed971bSMarkus Armbruster if (i < vec_end) { 329877ed971bSMarkus Armbruster error_setg(errp, "page cache still in use!"); 329977ed971bSMarkus Armbruster break; 330077ed971bSMarkus Armbruster } 330131be8a2aSStefan Hajnoczi } 330231be8a2aSStefan Hajnoczi 330331be8a2aSStefan Hajnoczi if (window) { 330431be8a2aSStefan Hajnoczi munmap(window, length); 330531be8a2aSStefan Hajnoczi } 330631be8a2aSStefan Hajnoczi 330731be8a2aSStefan Hajnoczi g_free(vec); 330831be8a2aSStefan Hajnoczi } 330931be8a2aSStefan Hajnoczi #endif /* __linux__ */ 331031be8a2aSStefan Hajnoczi 331188095349SEmanuele Giuseppe Esposito static void coroutine_fn GRAPH_RDLOCK 331288095349SEmanuele Giuseppe Esposito raw_co_invalidate_cache(BlockDriverState *bs, Error **errp) 3313dd577a26SStefan Hajnoczi { 3314dd577a26SStefan Hajnoczi BDRVRawState *s = bs->opaque; 3315dd577a26SStefan Hajnoczi int ret; 3316dd577a26SStefan Hajnoczi 3317dd577a26SStefan Hajnoczi ret = fd_open(bs); 3318dd577a26SStefan Hajnoczi if (ret < 0) { 3319dd577a26SStefan Hajnoczi error_setg_errno(errp, -ret, "The file descriptor is not open"); 3320dd577a26SStefan Hajnoczi return; 3321dd577a26SStefan Hajnoczi } 3322dd577a26SStefan Hajnoczi 3323f357fcd8SStefan Hajnoczi if (!s->drop_cache) { 3324f357fcd8SStefan Hajnoczi return; 3325f357fcd8SStefan Hajnoczi } 3326f357fcd8SStefan Hajnoczi 3327dd577a26SStefan Hajnoczi if (s->open_flags & O_DIRECT) { 3328dd577a26SStefan Hajnoczi return; /* No host kernel page cache */ 3329dd577a26SStefan Hajnoczi } 3330dd577a26SStefan Hajnoczi 3331dd577a26SStefan Hajnoczi #if defined(__linux__) 3332dd577a26SStefan Hajnoczi /* This sets the scene for the next syscall... */ 3333dd577a26SStefan Hajnoczi ret = bdrv_co_flush(bs); 3334dd577a26SStefan Hajnoczi if (ret < 0) { 3335dd577a26SStefan Hajnoczi error_setg_errno(errp, -ret, "flush failed"); 3336dd577a26SStefan Hajnoczi return; 3337dd577a26SStefan Hajnoczi } 3338dd577a26SStefan Hajnoczi 3339dd577a26SStefan Hajnoczi /* Linux does not invalidate pages that are dirty, locked, or mmapped by a 3340dd577a26SStefan Hajnoczi * process. These limitations are okay because we just fsynced the file, 3341dd577a26SStefan Hajnoczi * we don't use mmap, and the file should not be in use by other processes. 3342dd577a26SStefan Hajnoczi */ 3343dd577a26SStefan Hajnoczi ret = posix_fadvise(s->fd, 0, 0, POSIX_FADV_DONTNEED); 3344dd577a26SStefan Hajnoczi if (ret != 0) { /* the return value is a positive errno */ 3345dd577a26SStefan Hajnoczi error_setg_errno(errp, ret, "fadvise failed"); 3346dd577a26SStefan Hajnoczi return; 3347dd577a26SStefan Hajnoczi } 334831be8a2aSStefan Hajnoczi 334931be8a2aSStefan Hajnoczi if (s->check_cache_dropped) { 335031be8a2aSStefan Hajnoczi check_cache_dropped(bs, errp); 335131be8a2aSStefan Hajnoczi } 3352dd577a26SStefan Hajnoczi #else /* __linux__ */ 3353dd577a26SStefan Hajnoczi /* Do nothing. Live migration to a remote host with cache.direct=off is 3354dd577a26SStefan Hajnoczi * unsupported on other host operating systems. Cache consistency issues 3355dd577a26SStefan Hajnoczi * may occur but no error is reported here, partly because that's the 3356dd577a26SStefan Hajnoczi * historical behavior and partly because it's hard to differentiate valid 3357dd577a26SStefan Hajnoczi * configurations that should not cause errors. 3358dd577a26SStefan Hajnoczi */ 3359dd577a26SStefan Hajnoczi #endif /* !__linux__ */ 3360dd577a26SStefan Hajnoczi } 3361dd577a26SStefan Hajnoczi 33621c450366SAnton Nefedov static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret) 33631c450366SAnton Nefedov { 33641c450366SAnton Nefedov if (ret) { 33651c450366SAnton Nefedov s->stats.discard_nb_failed++; 33661c450366SAnton Nefedov } else { 33671c450366SAnton Nefedov s->stats.discard_nb_ok++; 33681c450366SAnton Nefedov s->stats.discard_bytes_ok += nbytes; 33691c450366SAnton Nefedov } 33701c450366SAnton Nefedov } 33711c450366SAnton Nefedov 33726d43eaa3SSam Li /* 33736d43eaa3SSam Li * zone report - Get a zone block device's information in the form 33746d43eaa3SSam Li * of an array of zone descriptors. 33756d43eaa3SSam Li * zones is an array of zone descriptors to hold zone information on reply; 33766d43eaa3SSam Li * offset can be any byte within the entire size of the device; 33776d43eaa3SSam Li * nr_zones is the maxium number of sectors the command should operate on. 33786d43eaa3SSam Li */ 33796d43eaa3SSam Li #if defined(CONFIG_BLKZONED) 33806d43eaa3SSam Li static int coroutine_fn raw_co_zone_report(BlockDriverState *bs, int64_t offset, 33816d43eaa3SSam Li unsigned int *nr_zones, 33826d43eaa3SSam Li BlockZoneDescriptor *zones) { 33836d43eaa3SSam Li BDRVRawState *s = bs->opaque; 33846d43eaa3SSam Li RawPosixAIOData acb = (RawPosixAIOData) { 33856d43eaa3SSam Li .bs = bs, 33866d43eaa3SSam Li .aio_fildes = s->fd, 33876d43eaa3SSam Li .aio_type = QEMU_AIO_ZONE_REPORT, 33886d43eaa3SSam Li .aio_offset = offset, 33896d43eaa3SSam Li .zone_report = { 33906d43eaa3SSam Li .nr_zones = nr_zones, 33916d43eaa3SSam Li .zones = zones, 33926d43eaa3SSam Li }, 33936d43eaa3SSam Li }; 33946d43eaa3SSam Li 3395142e307eSSam Li trace_zbd_zone_report(bs, *nr_zones, offset >> BDRV_SECTOR_BITS); 33966d43eaa3SSam Li return raw_thread_pool_submit(handle_aiocb_zone_report, &acb); 33976d43eaa3SSam Li } 33986d43eaa3SSam Li #endif 33996d43eaa3SSam Li 34006d43eaa3SSam Li /* 34016d43eaa3SSam Li * zone management operations - Execute an operation on a zone 34026d43eaa3SSam Li */ 34036d43eaa3SSam Li #if defined(CONFIG_BLKZONED) 34046d43eaa3SSam Li static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, 34056d43eaa3SSam Li int64_t offset, int64_t len) { 34066d43eaa3SSam Li BDRVRawState *s = bs->opaque; 34076d43eaa3SSam Li RawPosixAIOData acb; 34086d43eaa3SSam Li int64_t zone_size, zone_size_mask; 34096d43eaa3SSam Li const char *op_name; 34106d43eaa3SSam Li unsigned long zo; 34116d43eaa3SSam Li int ret; 3412a3c41f06SSam Li BlockZoneWps *wps = bs->wps; 34136d43eaa3SSam Li int64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS; 34146d43eaa3SSam Li 34156d43eaa3SSam Li zone_size = bs->bl.zone_size; 34166d43eaa3SSam Li zone_size_mask = zone_size - 1; 34176d43eaa3SSam Li if (offset & zone_size_mask) { 34186d43eaa3SSam Li error_report("sector offset %" PRId64 " is not aligned to zone size " 34196d43eaa3SSam Li "%" PRId64 "", offset / 512, zone_size / 512); 34206d43eaa3SSam Li return -EINVAL; 34216d43eaa3SSam Li } 34226d43eaa3SSam Li 34236d43eaa3SSam Li if (((offset + len) < capacity && len & zone_size_mask) || 34246d43eaa3SSam Li offset + len > capacity) { 34256d43eaa3SSam Li error_report("number of sectors %" PRId64 " is not aligned to zone size" 34266d43eaa3SSam Li " %" PRId64 "", len / 512, zone_size / 512); 34276d43eaa3SSam Li return -EINVAL; 34286d43eaa3SSam Li } 34296d43eaa3SSam Li 3430a3c41f06SSam Li uint32_t i = offset / bs->bl.zone_size; 3431a3c41f06SSam Li uint32_t nrz = len / bs->bl.zone_size; 3432a3c41f06SSam Li uint64_t *wp = &wps->wp[i]; 3433a3c41f06SSam Li if (BDRV_ZT_IS_CONV(*wp) && len != capacity) { 3434a3c41f06SSam Li error_report("zone mgmt operations are not allowed for conventional zones"); 3435a3c41f06SSam Li return -EIO; 3436a3c41f06SSam Li } 3437a3c41f06SSam Li 34386d43eaa3SSam Li switch (op) { 34396d43eaa3SSam Li case BLK_ZO_OPEN: 34406d43eaa3SSam Li op_name = "BLKOPENZONE"; 34416d43eaa3SSam Li zo = BLKOPENZONE; 34426d43eaa3SSam Li break; 34436d43eaa3SSam Li case BLK_ZO_CLOSE: 34446d43eaa3SSam Li op_name = "BLKCLOSEZONE"; 34456d43eaa3SSam Li zo = BLKCLOSEZONE; 34466d43eaa3SSam Li break; 34476d43eaa3SSam Li case BLK_ZO_FINISH: 34486d43eaa3SSam Li op_name = "BLKFINISHZONE"; 34496d43eaa3SSam Li zo = BLKFINISHZONE; 34506d43eaa3SSam Li break; 34516d43eaa3SSam Li case BLK_ZO_RESET: 34526d43eaa3SSam Li op_name = "BLKRESETZONE"; 34536d43eaa3SSam Li zo = BLKRESETZONE; 34546d43eaa3SSam Li break; 34556d43eaa3SSam Li default: 34566d43eaa3SSam Li error_report("Unsupported zone op: 0x%x", op); 34576d43eaa3SSam Li return -ENOTSUP; 34586d43eaa3SSam Li } 34596d43eaa3SSam Li 34606d43eaa3SSam Li acb = (RawPosixAIOData) { 34616d43eaa3SSam Li .bs = bs, 34626d43eaa3SSam Li .aio_fildes = s->fd, 34636d43eaa3SSam Li .aio_type = QEMU_AIO_ZONE_MGMT, 34646d43eaa3SSam Li .aio_offset = offset, 34656d43eaa3SSam Li .aio_nbytes = len, 34666d43eaa3SSam Li .zone_mgmt = { 34676d43eaa3SSam Li .op = zo, 34686d43eaa3SSam Li }, 34696d43eaa3SSam Li }; 34706d43eaa3SSam Li 3471142e307eSSam Li trace_zbd_zone_mgmt(bs, op_name, offset >> BDRV_SECTOR_BITS, 3472142e307eSSam Li len >> BDRV_SECTOR_BITS); 34736d43eaa3SSam Li ret = raw_thread_pool_submit(handle_aiocb_zone_mgmt, &acb); 34746d43eaa3SSam Li if (ret != 0) { 3475a3c41f06SSam Li update_zones_wp(bs, s->fd, offset, i); 34766d43eaa3SSam Li error_report("ioctl %s failed %d", op_name, ret); 3477a3c41f06SSam Li return ret; 3478a3c41f06SSam Li } 3479a3c41f06SSam Li 3480a3c41f06SSam Li if (zo == BLKRESETZONE && len == capacity) { 3481a3c41f06SSam Li ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 1); 3482a3c41f06SSam Li if (ret < 0) { 3483a3c41f06SSam Li error_report("reporting single wp failed"); 3484a3c41f06SSam Li return ret; 3485a3c41f06SSam Li } 3486a3c41f06SSam Li } else if (zo == BLKRESETZONE) { 3487a3c41f06SSam Li for (unsigned int j = 0; j < nrz; ++j) { 3488a3c41f06SSam Li wp[j] = offset + j * zone_size; 3489a3c41f06SSam Li } 3490a3c41f06SSam Li } else if (zo == BLKFINISHZONE) { 3491a3c41f06SSam Li for (unsigned int j = 0; j < nrz; ++j) { 3492a3c41f06SSam Li /* The zoned device allows the last zone smaller that the 3493a3c41f06SSam Li * zone size. */ 3494a3c41f06SSam Li wp[j] = MIN(offset + (j + 1) * zone_size, offset + len); 3495a3c41f06SSam Li } 34966d43eaa3SSam Li } 34976d43eaa3SSam Li 34986d43eaa3SSam Li return ret; 34996d43eaa3SSam Li } 35006d43eaa3SSam Li #endif 35016d43eaa3SSam Li 35024751d09aSSam Li #if defined(CONFIG_BLKZONED) 35034751d09aSSam Li static int coroutine_fn raw_co_zone_append(BlockDriverState *bs, 35044751d09aSSam Li int64_t *offset, 35054751d09aSSam Li QEMUIOVector *qiov, 35064751d09aSSam Li BdrvRequestFlags flags) { 35074751d09aSSam Li assert(flags == 0); 35084751d09aSSam Li int64_t zone_size_mask = bs->bl.zone_size - 1; 35094751d09aSSam Li int64_t iov_len = 0; 35104751d09aSSam Li int64_t len = 0; 35114751d09aSSam Li BDRVRawState *s = bs->opaque; 35124751d09aSSam Li s->offset = offset; 35134751d09aSSam Li 35144751d09aSSam Li if (*offset & zone_size_mask) { 35154751d09aSSam Li error_report("sector offset %" PRId64 " is not aligned to zone size " 35164751d09aSSam Li "%" PRId32 "", *offset / 512, bs->bl.zone_size / 512); 35174751d09aSSam Li return -EINVAL; 35184751d09aSSam Li } 35194751d09aSSam Li 35204751d09aSSam Li int64_t wg = bs->bl.write_granularity; 35214751d09aSSam Li int64_t wg_mask = wg - 1; 35224751d09aSSam Li for (int i = 0; i < qiov->niov; i++) { 35234751d09aSSam Li iov_len = qiov->iov[i].iov_len; 35244751d09aSSam Li if (iov_len & wg_mask) { 35254751d09aSSam Li error_report("len of IOVector[%d] %" PRId64 " is not aligned to " 35264751d09aSSam Li "block size %" PRId64 "", i, iov_len, wg); 35274751d09aSSam Li return -EINVAL; 35284751d09aSSam Li } 35294751d09aSSam Li len += iov_len; 35304751d09aSSam Li } 35314751d09aSSam Li 35326c811e19SSam Li trace_zbd_zone_append(bs, *offset >> BDRV_SECTOR_BITS); 35334751d09aSSam Li return raw_co_prw(bs, *offset, len, qiov, QEMU_AIO_ZONE_APPEND); 35344751d09aSSam Li } 35354751d09aSSam Li #endif 35364751d09aSSam Li 353733d70fb6SKevin Wolf static coroutine_fn int 35380c802287SVladimir Sementsov-Ogievskiy raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes, 35390c802287SVladimir Sementsov-Ogievskiy bool blkdev) 3540c1bb86cdSEric Blake { 3541c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 354246ee0f46SKevin Wolf RawPosixAIOData acb; 35431c450366SAnton Nefedov int ret; 3544c1bb86cdSEric Blake 354546ee0f46SKevin Wolf acb = (RawPosixAIOData) { 354646ee0f46SKevin Wolf .bs = bs, 354746ee0f46SKevin Wolf .aio_fildes = s->fd, 354846ee0f46SKevin Wolf .aio_type = QEMU_AIO_DISCARD, 354946ee0f46SKevin Wolf .aio_offset = offset, 355046ee0f46SKevin Wolf .aio_nbytes = bytes, 355146ee0f46SKevin Wolf }; 355246ee0f46SKevin Wolf 355346ee0f46SKevin Wolf if (blkdev) { 355446ee0f46SKevin Wolf acb.aio_type |= QEMU_AIO_BLKDEV; 355546ee0f46SKevin Wolf } 355646ee0f46SKevin Wolf 35570fdb7311SEmanuele Giuseppe Esposito ret = raw_thread_pool_submit(handle_aiocb_discard, &acb); 35581c450366SAnton Nefedov raw_account_discard(s, bytes, ret); 35591c450366SAnton Nefedov return ret; 356046ee0f46SKevin Wolf } 356146ee0f46SKevin Wolf 356246ee0f46SKevin Wolf static coroutine_fn int 35630c802287SVladimir Sementsov-Ogievskiy raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) 356446ee0f46SKevin Wolf { 356546ee0f46SKevin Wolf return raw_do_pdiscard(bs, offset, bytes, false); 3566c1bb86cdSEric Blake } 3567c1bb86cdSEric Blake 35687154d8aeSKevin Wolf static int coroutine_fn 3569f34b2bcfSVladimir Sementsov-Ogievskiy raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, 35707154d8aeSKevin Wolf BdrvRequestFlags flags, bool blkdev) 35717154d8aeSKevin Wolf { 35727154d8aeSKevin Wolf BDRVRawState *s = bs->opaque; 35737154d8aeSKevin Wolf RawPosixAIOData acb; 35747154d8aeSKevin Wolf ThreadPoolFunc *handler; 35757154d8aeSKevin Wolf 3576292d06b9SMax Reitz #ifdef CONFIG_FALLOCATE 3577292d06b9SMax Reitz if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { 3578292d06b9SMax Reitz BdrvTrackedRequest *req; 3579292d06b9SMax Reitz 3580292d06b9SMax Reitz /* 3581292d06b9SMax Reitz * This is a workaround for a bug in the Linux XFS driver, 3582292d06b9SMax Reitz * where writes submitted through the AIO interface will be 3583292d06b9SMax Reitz * discarded if they happen beyond a concurrently running 3584292d06b9SMax Reitz * fallocate() that increases the file length (i.e., both the 3585292d06b9SMax Reitz * write and the fallocate() happen beyond the EOF). 3586292d06b9SMax Reitz * 3587292d06b9SMax Reitz * To work around it, we extend the tracked request for this 3588292d06b9SMax Reitz * zero write until INT64_MAX (effectively infinity), and mark 3589292d06b9SMax Reitz * it as serializing. 3590292d06b9SMax Reitz * 3591292d06b9SMax Reitz * We have to enable this workaround for all filesystems and 3592292d06b9SMax Reitz * AIO modes (not just XFS with aio=native), because for 3593292d06b9SMax Reitz * remote filesystems we do not know the host configuration. 3594292d06b9SMax Reitz */ 3595292d06b9SMax Reitz 3596292d06b9SMax Reitz req = bdrv_co_get_self_request(bs); 3597292d06b9SMax Reitz assert(req); 3598292d06b9SMax Reitz assert(req->type == BDRV_TRACKED_WRITE); 3599292d06b9SMax Reitz assert(req->offset <= offset); 3600292d06b9SMax Reitz assert(req->offset + req->bytes >= offset + bytes); 3601292d06b9SMax Reitz 36028b117001SVladimir Sementsov-Ogievskiy req->bytes = BDRV_MAX_LENGTH - req->offset; 36038b117001SVladimir Sementsov-Ogievskiy 360469b55e03SVladimir Sementsov-Ogievskiy bdrv_check_request(req->offset, req->bytes, &error_abort); 3605292d06b9SMax Reitz 36068ac5aab2SVladimir Sementsov-Ogievskiy bdrv_make_request_serialising(req, bs->bl.request_alignment); 3607292d06b9SMax Reitz } 3608292d06b9SMax Reitz #endif 3609292d06b9SMax Reitz 36107154d8aeSKevin Wolf acb = (RawPosixAIOData) { 36117154d8aeSKevin Wolf .bs = bs, 36127154d8aeSKevin Wolf .aio_fildes = s->fd, 36137154d8aeSKevin Wolf .aio_type = QEMU_AIO_WRITE_ZEROES, 36147154d8aeSKevin Wolf .aio_offset = offset, 36157154d8aeSKevin Wolf .aio_nbytes = bytes, 36167154d8aeSKevin Wolf }; 36177154d8aeSKevin Wolf 36187154d8aeSKevin Wolf if (blkdev) { 36197154d8aeSKevin Wolf acb.aio_type |= QEMU_AIO_BLKDEV; 36207154d8aeSKevin Wolf } 3621738301e1SKevin Wolf if (flags & BDRV_REQ_NO_FALLBACK) { 3622738301e1SKevin Wolf acb.aio_type |= QEMU_AIO_NO_FALLBACK; 3623738301e1SKevin Wolf } 36247154d8aeSKevin Wolf 36257154d8aeSKevin Wolf if (flags & BDRV_REQ_MAY_UNMAP) { 36267154d8aeSKevin Wolf acb.aio_type |= QEMU_AIO_DISCARD; 36277154d8aeSKevin Wolf handler = handle_aiocb_write_zeroes_unmap; 36287154d8aeSKevin Wolf } else { 36297154d8aeSKevin Wolf handler = handle_aiocb_write_zeroes; 36307154d8aeSKevin Wolf } 36317154d8aeSKevin Wolf 36320fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handler, &acb); 36337154d8aeSKevin Wolf } 36347154d8aeSKevin Wolf 3635c1bb86cdSEric Blake static int coroutine_fn raw_co_pwrite_zeroes( 3636c1bb86cdSEric Blake BlockDriverState *bs, int64_t offset, 3637f34b2bcfSVladimir Sementsov-Ogievskiy int64_t bytes, BdrvRequestFlags flags) 3638c1bb86cdSEric Blake { 36397154d8aeSKevin Wolf return raw_do_pwrite_zeroes(bs, offset, bytes, flags, false); 3640c1bb86cdSEric Blake } 3641c1bb86cdSEric Blake 36423d47eb0aSEmanuele Giuseppe Esposito static int coroutine_fn 36433d47eb0aSEmanuele Giuseppe Esposito raw_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 3644c1bb86cdSEric Blake { 3645c1bb86cdSEric Blake return 0; 3646c1bb86cdSEric Blake } 3647c1bb86cdSEric Blake 36487f36a50aSHanna Reitz static ImageInfoSpecific *raw_get_specific_info(BlockDriverState *bs, 36497f36a50aSHanna Reitz Error **errp) 36507f36a50aSHanna Reitz { 36517f36a50aSHanna Reitz ImageInfoSpecificFile *file_info = g_new0(ImageInfoSpecificFile, 1); 36527f36a50aSHanna Reitz ImageInfoSpecific *spec_info = g_new(ImageInfoSpecific, 1); 36537f36a50aSHanna Reitz 36547f36a50aSHanna Reitz *spec_info = (ImageInfoSpecific){ 36557f36a50aSHanna Reitz .type = IMAGE_INFO_SPECIFIC_KIND_FILE, 36567f36a50aSHanna Reitz .u.file.data = file_info, 36577f36a50aSHanna Reitz }; 36587f36a50aSHanna Reitz 36597f36a50aSHanna Reitz #ifdef FS_IOC_FSGETXATTR 36607f36a50aSHanna Reitz { 36617f36a50aSHanna Reitz BDRVRawState *s = bs->opaque; 36627f36a50aSHanna Reitz struct fsxattr attr; 36637f36a50aSHanna Reitz int ret; 36647f36a50aSHanna Reitz 36657f36a50aSHanna Reitz ret = ioctl(s->fd, FS_IOC_FSGETXATTR, &attr); 36667f36a50aSHanna Reitz if (!ret && attr.fsx_extsize != 0) { 36677f36a50aSHanna Reitz file_info->has_extent_size_hint = true; 36687f36a50aSHanna Reitz file_info->extent_size_hint = attr.fsx_extsize; 36697f36a50aSHanna Reitz } 36707f36a50aSHanna Reitz } 36717f36a50aSHanna Reitz #endif 36727f36a50aSHanna Reitz 36737f36a50aSHanna Reitz return spec_info; 36747f36a50aSHanna Reitz } 36757f36a50aSHanna Reitz 3676d9245599SAnton Nefedov static BlockStatsSpecificFile get_blockstats_specific_file(BlockDriverState *bs) 3677d9245599SAnton Nefedov { 3678d9245599SAnton Nefedov BDRVRawState *s = bs->opaque; 3679d9245599SAnton Nefedov return (BlockStatsSpecificFile) { 3680d9245599SAnton Nefedov .discard_nb_ok = s->stats.discard_nb_ok, 3681d9245599SAnton Nefedov .discard_nb_failed = s->stats.discard_nb_failed, 3682d9245599SAnton Nefedov .discard_bytes_ok = s->stats.discard_bytes_ok, 3683d9245599SAnton Nefedov }; 3684d9245599SAnton Nefedov } 3685d9245599SAnton Nefedov 3686d9245599SAnton Nefedov static BlockStatsSpecific *raw_get_specific_stats(BlockDriverState *bs) 3687d9245599SAnton Nefedov { 3688d9245599SAnton Nefedov BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1); 3689d9245599SAnton Nefedov 3690d9245599SAnton Nefedov stats->driver = BLOCKDEV_DRIVER_FILE; 3691d9245599SAnton Nefedov stats->u.file = get_blockstats_specific_file(bs); 3692d9245599SAnton Nefedov 3693d9245599SAnton Nefedov return stats; 3694d9245599SAnton Nefedov } 3695d9245599SAnton Nefedov 369614176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE) 3697d9245599SAnton Nefedov static BlockStatsSpecific *hdev_get_specific_stats(BlockDriverState *bs) 3698d9245599SAnton Nefedov { 3699d9245599SAnton Nefedov BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1); 3700d9245599SAnton Nefedov 3701d9245599SAnton Nefedov stats->driver = BLOCKDEV_DRIVER_HOST_DEVICE; 3702d9245599SAnton Nefedov stats->u.host_device = get_blockstats_specific_file(bs); 3703d9245599SAnton Nefedov 3704d9245599SAnton Nefedov return stats; 3705d9245599SAnton Nefedov } 370614176c8dSJoelle van Dyne #endif /* HAVE_HOST_BLOCK_DEVICE */ 3707d9245599SAnton Nefedov 3708c1bb86cdSEric Blake static QemuOptsList raw_create_opts = { 3709c1bb86cdSEric Blake .name = "raw-create-opts", 3710c1bb86cdSEric Blake .head = QTAILQ_HEAD_INITIALIZER(raw_create_opts.head), 3711c1bb86cdSEric Blake .desc = { 3712c1bb86cdSEric Blake { 3713c1bb86cdSEric Blake .name = BLOCK_OPT_SIZE, 3714c1bb86cdSEric Blake .type = QEMU_OPT_SIZE, 3715c1bb86cdSEric Blake .help = "Virtual disk size" 3716c1bb86cdSEric Blake }, 3717c1bb86cdSEric Blake { 3718c1bb86cdSEric Blake .name = BLOCK_OPT_NOCOW, 3719c1bb86cdSEric Blake .type = QEMU_OPT_BOOL, 3720c1bb86cdSEric Blake .help = "Turn off copy-on-write (valid only on btrfs)" 3721c1bb86cdSEric Blake }, 3722c1bb86cdSEric Blake { 3723c1bb86cdSEric Blake .name = BLOCK_OPT_PREALLOC, 3724c1bb86cdSEric Blake .type = QEMU_OPT_STRING, 3725abea0053SStefano Garzarella .help = "Preallocation mode (allowed values: off" 3726abea0053SStefano Garzarella #ifdef CONFIG_POSIX_FALLOCATE 3727abea0053SStefano Garzarella ", falloc" 3728abea0053SStefano Garzarella #endif 3729abea0053SStefano Garzarella ", full)" 3730c1bb86cdSEric Blake }, 3731ffa244c8SKevin Wolf { 3732ffa244c8SKevin Wolf .name = BLOCK_OPT_EXTENT_SIZE_HINT, 3733ffa244c8SKevin Wolf .type = QEMU_OPT_SIZE, 3734ffa244c8SKevin Wolf .help = "Extent size hint for the image file, 0 to disable" 3735ffa244c8SKevin Wolf }, 3736c1bb86cdSEric Blake { /* end of list */ } 3737c1bb86cdSEric Blake } 3738c1bb86cdSEric Blake }; 3739c1bb86cdSEric Blake 3740244a5668SFam Zheng static int raw_check_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared, 3741244a5668SFam Zheng Error **errp) 3742244a5668SFam Zheng { 37436ceabe6fSKevin Wolf BDRVRawState *s = bs->opaque; 374472373e40SVladimir Sementsov-Ogievskiy int input_flags = s->reopen_state ? s->reopen_state->flags : bs->open_flags; 37456ceabe6fSKevin Wolf int open_flags; 37466ceabe6fSKevin Wolf int ret; 37476ceabe6fSKevin Wolf 37486ceabe6fSKevin Wolf /* We may need a new fd if auto-read-only switches the mode */ 374972373e40SVladimir Sementsov-Ogievskiy ret = raw_reconfigure_getfd(bs, input_flags, &open_flags, perm, 37506ceabe6fSKevin Wolf false, errp); 37516ceabe6fSKevin Wolf if (ret < 0) { 37526ceabe6fSKevin Wolf return ret; 37536ceabe6fSKevin Wolf } else if (ret != s->fd) { 375472373e40SVladimir Sementsov-Ogievskiy Error *local_err = NULL; 375572373e40SVladimir Sementsov-Ogievskiy 375672373e40SVladimir Sementsov-Ogievskiy /* 375772373e40SVladimir Sementsov-Ogievskiy * Fail already check_perm() if we can't get a working O_DIRECT 375872373e40SVladimir Sementsov-Ogievskiy * alignment with the new fd. 375972373e40SVladimir Sementsov-Ogievskiy */ 376072373e40SVladimir Sementsov-Ogievskiy raw_probe_alignment(bs, ret, &local_err); 376172373e40SVladimir Sementsov-Ogievskiy if (local_err) { 376272373e40SVladimir Sementsov-Ogievskiy error_propagate(errp, local_err); 376372373e40SVladimir Sementsov-Ogievskiy return -EINVAL; 376472373e40SVladimir Sementsov-Ogievskiy } 376572373e40SVladimir Sementsov-Ogievskiy 37666ceabe6fSKevin Wolf s->perm_change_fd = ret; 3767094e3639SMax Reitz s->perm_change_flags = open_flags; 37686ceabe6fSKevin Wolf } 37696ceabe6fSKevin Wolf 37706ceabe6fSKevin Wolf /* Prepare permissions on old fd to avoid conflicts between old and new, 37716ceabe6fSKevin Wolf * but keep everything locked that new will need. */ 37726ceabe6fSKevin Wolf ret = raw_handle_perm_lock(bs, RAW_PL_PREPARE, perm, shared, errp); 37736ceabe6fSKevin Wolf if (ret < 0) { 37746ceabe6fSKevin Wolf goto fail; 37756ceabe6fSKevin Wolf } 37766ceabe6fSKevin Wolf 37776ceabe6fSKevin Wolf /* Copy locks to the new fd */ 3778eb43ea16SLi Feng if (s->perm_change_fd && s->use_lock) { 37796ceabe6fSKevin Wolf ret = raw_apply_lock_bytes(NULL, s->perm_change_fd, perm, ~shared, 37806ceabe6fSKevin Wolf false, errp); 37816ceabe6fSKevin Wolf if (ret < 0) { 37826ceabe6fSKevin Wolf raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL); 37836ceabe6fSKevin Wolf goto fail; 37846ceabe6fSKevin Wolf } 37856ceabe6fSKevin Wolf } 37866ceabe6fSKevin Wolf return 0; 37876ceabe6fSKevin Wolf 37886ceabe6fSKevin Wolf fail: 378972373e40SVladimir Sementsov-Ogievskiy if (s->perm_change_fd) { 37906ceabe6fSKevin Wolf qemu_close(s->perm_change_fd); 37916ceabe6fSKevin Wolf } 37926ceabe6fSKevin Wolf s->perm_change_fd = 0; 37936ceabe6fSKevin Wolf return ret; 3794244a5668SFam Zheng } 3795244a5668SFam Zheng 3796244a5668SFam Zheng static void raw_set_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared) 3797244a5668SFam Zheng { 3798244a5668SFam Zheng BDRVRawState *s = bs->opaque; 37996ceabe6fSKevin Wolf 38006ceabe6fSKevin Wolf /* For reopen, we have already switched to the new fd (.bdrv_set_perm is 38016ceabe6fSKevin Wolf * called after .bdrv_reopen_commit) */ 38026ceabe6fSKevin Wolf if (s->perm_change_fd && s->fd != s->perm_change_fd) { 38036ceabe6fSKevin Wolf qemu_close(s->fd); 38046ceabe6fSKevin Wolf s->fd = s->perm_change_fd; 3805094e3639SMax Reitz s->open_flags = s->perm_change_flags; 38066ceabe6fSKevin Wolf } 38076ceabe6fSKevin Wolf s->perm_change_fd = 0; 38086ceabe6fSKevin Wolf 3809244a5668SFam Zheng raw_handle_perm_lock(bs, RAW_PL_COMMIT, perm, shared, NULL); 3810244a5668SFam Zheng s->perm = perm; 3811244a5668SFam Zheng s->shared_perm = shared; 3812244a5668SFam Zheng } 3813244a5668SFam Zheng 3814244a5668SFam Zheng static void raw_abort_perm_update(BlockDriverState *bs) 3815244a5668SFam Zheng { 38166ceabe6fSKevin Wolf BDRVRawState *s = bs->opaque; 38176ceabe6fSKevin Wolf 38186ceabe6fSKevin Wolf /* For reopen, .bdrv_reopen_abort is called afterwards and will close 38196ceabe6fSKevin Wolf * the file descriptor. */ 382072373e40SVladimir Sementsov-Ogievskiy if (s->perm_change_fd) { 38216ceabe6fSKevin Wolf qemu_close(s->perm_change_fd); 38226ceabe6fSKevin Wolf } 38236ceabe6fSKevin Wolf s->perm_change_fd = 0; 38246ceabe6fSKevin Wolf 3825244a5668SFam Zheng raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL); 3826244a5668SFam Zheng } 3827244a5668SFam Zheng 3828742bf09bSEmanuele Giuseppe Esposito static int coroutine_fn GRAPH_RDLOCK raw_co_copy_range_from( 382948535049SVladimir Sementsov-Ogievskiy BlockDriverState *bs, BdrvChild *src, int64_t src_offset, 383048535049SVladimir Sementsov-Ogievskiy BdrvChild *dst, int64_t dst_offset, int64_t bytes, 383167b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) 38321efad060SFam Zheng { 383367b51fb9SVladimir Sementsov-Ogievskiy return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, 383467b51fb9SVladimir Sementsov-Ogievskiy read_flags, write_flags); 38351efad060SFam Zheng } 38361efad060SFam Zheng 3837742bf09bSEmanuele Giuseppe Esposito static int coroutine_fn GRAPH_RDLOCK 3838742bf09bSEmanuele Giuseppe Esposito raw_co_copy_range_to(BlockDriverState *bs, 3839742bf09bSEmanuele Giuseppe Esposito BdrvChild *src, int64_t src_offset, 3840742bf09bSEmanuele Giuseppe Esposito BdrvChild *dst, int64_t dst_offset, 3841742bf09bSEmanuele Giuseppe Esposito int64_t bytes, BdrvRequestFlags read_flags, 384267b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags write_flags) 38431efad060SFam Zheng { 384458a209c4SKevin Wolf RawPosixAIOData acb; 38451efad060SFam Zheng BDRVRawState *s = bs->opaque; 38461efad060SFam Zheng BDRVRawState *src_s; 38471efad060SFam Zheng 38481efad060SFam Zheng assert(dst->bs == bs); 38491efad060SFam Zheng if (src->bs->drv->bdrv_co_copy_range_to != raw_co_copy_range_to) { 38501efad060SFam Zheng return -ENOTSUP; 38511efad060SFam Zheng } 38521efad060SFam Zheng 38531efad060SFam Zheng src_s = src->bs->opaque; 38549f850f67SFam Zheng if (fd_open(src->bs) < 0 || fd_open(dst->bs) < 0) { 38551efad060SFam Zheng return -EIO; 38561efad060SFam Zheng } 385758a209c4SKevin Wolf 385858a209c4SKevin Wolf acb = (RawPosixAIOData) { 385958a209c4SKevin Wolf .bs = bs, 386058a209c4SKevin Wolf .aio_type = QEMU_AIO_COPY_RANGE, 386158a209c4SKevin Wolf .aio_fildes = src_s->fd, 386258a209c4SKevin Wolf .aio_offset = src_offset, 386358a209c4SKevin Wolf .aio_nbytes = bytes, 386458a209c4SKevin Wolf .copy_range = { 386558a209c4SKevin Wolf .aio_fd2 = s->fd, 386658a209c4SKevin Wolf .aio_offset2 = dst_offset, 386758a209c4SKevin Wolf }, 386858a209c4SKevin Wolf }; 386958a209c4SKevin Wolf 38700fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_copy_range, &acb); 38711efad060SFam Zheng } 38721efad060SFam Zheng 3873c1bb86cdSEric Blake BlockDriver bdrv_file = { 3874c1bb86cdSEric Blake .format_name = "file", 3875c1bb86cdSEric Blake .protocol_name = "file", 3876c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState), 3877c1bb86cdSEric Blake .bdrv_needs_filename = true, 3878c1bb86cdSEric Blake .bdrv_probe = NULL, /* no probe for protocols */ 3879c1bb86cdSEric Blake .bdrv_parse_filename = raw_parse_filename, 3880c1bb86cdSEric Blake .bdrv_file_open = raw_open, 3881c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare, 3882c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit, 3883c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort, 3884c1bb86cdSEric Blake .bdrv_close = raw_close, 3885927f11e1SKevin Wolf .bdrv_co_create = raw_co_create, 3886efc75e2aSStefan Hajnoczi .bdrv_co_create_opts = raw_co_create_opts, 3887c1bb86cdSEric Blake .bdrv_has_zero_init = bdrv_has_zero_init_1, 3888a290f085SEric Blake .bdrv_co_block_status = raw_co_block_status, 3889dd577a26SStefan Hajnoczi .bdrv_co_invalidate_cache = raw_co_invalidate_cache, 3890c1bb86cdSEric Blake .bdrv_co_pwrite_zeroes = raw_co_pwrite_zeroes, 38919bffae14SDaniel Henrique Barboza .bdrv_co_delete_file = raw_co_delete_file, 3892c1bb86cdSEric Blake 3893c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv, 3894c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev, 389533d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk, 389633d70fb6SKevin Wolf .bdrv_co_pdiscard = raw_co_pdiscard, 38971efad060SFam Zheng .bdrv_co_copy_range_from = raw_co_copy_range_from, 38981efad060SFam Zheng .bdrv_co_copy_range_to = raw_co_copy_range_to, 3899c1bb86cdSEric Blake .bdrv_refresh_limits = raw_refresh_limits, 3900ed6e2161SNishanth Aravamudan .bdrv_attach_aio_context = raw_aio_attach_aio_context, 3901c1bb86cdSEric Blake 3902061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate, 3903c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength, 39043d47eb0aSEmanuele Giuseppe Esposito .bdrv_co_get_info = raw_co_get_info, 39057f36a50aSHanna Reitz .bdrv_get_specific_info = raw_get_specific_info, 390682618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size, 3907d9245599SAnton Nefedov .bdrv_get_specific_stats = raw_get_specific_stats, 3908244a5668SFam Zheng .bdrv_check_perm = raw_check_perm, 3909244a5668SFam Zheng .bdrv_set_perm = raw_set_perm, 3910244a5668SFam Zheng .bdrv_abort_perm_update = raw_abort_perm_update, 3911c1bb86cdSEric Blake .create_opts = &raw_create_opts, 39128a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts, 3913c1bb86cdSEric Blake }; 3914c1bb86cdSEric Blake 3915c1bb86cdSEric Blake /***********************************************/ 3916c1bb86cdSEric Blake /* host device */ 3917c1bb86cdSEric Blake 391814176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE) 391914176c8dSJoelle van Dyne 3920c1bb86cdSEric Blake #if defined(__APPLE__) && defined(__MACH__) 3921c1bb86cdSEric Blake static kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath, 3922c1bb86cdSEric Blake CFIndex maxPathSize, int flags); 3923aa44d3f6SPhilippe Mathieu-Daudé 3924aa44d3f6SPhilippe Mathieu-Daudé #if !defined(MAC_OS_VERSION_12_0) \ 3925aa44d3f6SPhilippe Mathieu-Daudé || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0) 3926aa44d3f6SPhilippe Mathieu-Daudé #define IOMainPort IOMasterPort 3927aa44d3f6SPhilippe Mathieu-Daudé #endif 3928aa44d3f6SPhilippe Mathieu-Daudé 3929c1bb86cdSEric Blake static char *FindEjectableOpticalMedia(io_iterator_t *mediaIterator) 3930c1bb86cdSEric Blake { 3931c1bb86cdSEric Blake kern_return_t kernResult = KERN_FAILURE; 3932aa44d3f6SPhilippe Mathieu-Daudé mach_port_t mainPort; 3933c1bb86cdSEric Blake CFMutableDictionaryRef classesToMatch; 3934c1bb86cdSEric Blake const char *matching_array[] = {kIODVDMediaClass, kIOCDMediaClass}; 3935c1bb86cdSEric Blake char *mediaType = NULL; 3936c1bb86cdSEric Blake 3937aa44d3f6SPhilippe Mathieu-Daudé kernResult = IOMainPort(MACH_PORT_NULL, &mainPort); 3938c1bb86cdSEric Blake if ( KERN_SUCCESS != kernResult ) { 3939aa44d3f6SPhilippe Mathieu-Daudé printf("IOMainPort returned %d\n", kernResult); 3940c1bb86cdSEric Blake } 3941c1bb86cdSEric Blake 3942c1bb86cdSEric Blake int index; 3943c1bb86cdSEric Blake for (index = 0; index < ARRAY_SIZE(matching_array); index++) { 3944c1bb86cdSEric Blake classesToMatch = IOServiceMatching(matching_array[index]); 3945c1bb86cdSEric Blake if (classesToMatch == NULL) { 3946c1bb86cdSEric Blake error_report("IOServiceMatching returned NULL for %s", 3947c1bb86cdSEric Blake matching_array[index]); 3948c1bb86cdSEric Blake continue; 3949c1bb86cdSEric Blake } 3950c1bb86cdSEric Blake CFDictionarySetValue(classesToMatch, CFSTR(kIOMediaEjectableKey), 3951c1bb86cdSEric Blake kCFBooleanTrue); 3952aa44d3f6SPhilippe Mathieu-Daudé kernResult = IOServiceGetMatchingServices(mainPort, classesToMatch, 3953c1bb86cdSEric Blake mediaIterator); 3954c1bb86cdSEric Blake if (kernResult != KERN_SUCCESS) { 3955c1bb86cdSEric Blake error_report("Note: IOServiceGetMatchingServices returned %d", 3956c1bb86cdSEric Blake kernResult); 3957c1bb86cdSEric Blake continue; 3958c1bb86cdSEric Blake } 3959c1bb86cdSEric Blake 3960c1bb86cdSEric Blake /* If a match was found, leave the loop */ 3961c1bb86cdSEric Blake if (*mediaIterator != 0) { 39624f7d28d7SLaurent Vivier trace_file_FindEjectableOpticalMedia(matching_array[index]); 3963c1bb86cdSEric Blake mediaType = g_strdup(matching_array[index]); 3964c1bb86cdSEric Blake break; 3965c1bb86cdSEric Blake } 3966c1bb86cdSEric Blake } 3967c1bb86cdSEric Blake return mediaType; 3968c1bb86cdSEric Blake } 3969c1bb86cdSEric Blake 3970c1bb86cdSEric Blake kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath, 3971c1bb86cdSEric Blake CFIndex maxPathSize, int flags) 3972c1bb86cdSEric Blake { 3973c1bb86cdSEric Blake io_object_t nextMedia; 3974c1bb86cdSEric Blake kern_return_t kernResult = KERN_FAILURE; 3975c1bb86cdSEric Blake *bsdPath = '\0'; 3976c1bb86cdSEric Blake nextMedia = IOIteratorNext( mediaIterator ); 3977c1bb86cdSEric Blake if ( nextMedia ) 3978c1bb86cdSEric Blake { 3979c1bb86cdSEric Blake CFTypeRef bsdPathAsCFString; 3980c1bb86cdSEric Blake bsdPathAsCFString = IORegistryEntryCreateCFProperty( nextMedia, CFSTR( kIOBSDNameKey ), kCFAllocatorDefault, 0 ); 3981c1bb86cdSEric Blake if ( bsdPathAsCFString ) { 3982c1bb86cdSEric Blake size_t devPathLength; 3983c1bb86cdSEric Blake strcpy( bsdPath, _PATH_DEV ); 3984c1bb86cdSEric Blake if (flags & BDRV_O_NOCACHE) { 3985c1bb86cdSEric Blake strcat(bsdPath, "r"); 3986c1bb86cdSEric Blake } 3987c1bb86cdSEric Blake devPathLength = strlen( bsdPath ); 3988c1bb86cdSEric Blake if ( CFStringGetCString( bsdPathAsCFString, bsdPath + devPathLength, maxPathSize - devPathLength, kCFStringEncodingASCII ) ) { 3989c1bb86cdSEric Blake kernResult = KERN_SUCCESS; 3990c1bb86cdSEric Blake } 3991c1bb86cdSEric Blake CFRelease( bsdPathAsCFString ); 3992c1bb86cdSEric Blake } 3993c1bb86cdSEric Blake IOObjectRelease( nextMedia ); 3994c1bb86cdSEric Blake } 3995c1bb86cdSEric Blake 3996c1bb86cdSEric Blake return kernResult; 3997c1bb86cdSEric Blake } 3998c1bb86cdSEric Blake 3999c1bb86cdSEric Blake /* Sets up a real cdrom for use in QEMU */ 4000c1bb86cdSEric Blake static bool setup_cdrom(char *bsd_path, Error **errp) 4001c1bb86cdSEric Blake { 4002c1bb86cdSEric Blake int index, num_of_test_partitions = 2, fd; 4003c1bb86cdSEric Blake char test_partition[MAXPATHLEN]; 4004c1bb86cdSEric Blake bool partition_found = false; 4005c1bb86cdSEric Blake 4006c1bb86cdSEric Blake /* look for a working partition */ 4007c1bb86cdSEric Blake for (index = 0; index < num_of_test_partitions; index++) { 4008c1bb86cdSEric Blake snprintf(test_partition, sizeof(test_partition), "%ss%d", bsd_path, 4009c1bb86cdSEric Blake index); 4010b18a24a9SDaniel P. Berrangé fd = qemu_open(test_partition, O_RDONLY | O_BINARY | O_LARGEFILE, NULL); 4011c1bb86cdSEric Blake if (fd >= 0) { 4012c1bb86cdSEric Blake partition_found = true; 4013c1bb86cdSEric Blake qemu_close(fd); 4014c1bb86cdSEric Blake break; 4015c1bb86cdSEric Blake } 4016c1bb86cdSEric Blake } 4017c1bb86cdSEric Blake 4018c1bb86cdSEric Blake /* if a working partition on the device was not found */ 4019c1bb86cdSEric Blake if (partition_found == false) { 4020c1bb86cdSEric Blake error_setg(errp, "Failed to find a working partition on disc"); 4021c1bb86cdSEric Blake } else { 40224f7d28d7SLaurent Vivier trace_file_setup_cdrom(test_partition); 4023c1bb86cdSEric Blake pstrcpy(bsd_path, MAXPATHLEN, test_partition); 4024c1bb86cdSEric Blake } 4025c1bb86cdSEric Blake return partition_found; 4026c1bb86cdSEric Blake } 4027c1bb86cdSEric Blake 4028c1bb86cdSEric Blake /* Prints directions on mounting and unmounting a device */ 4029c1bb86cdSEric Blake static void print_unmounting_directions(const char *file_name) 4030c1bb86cdSEric Blake { 4031c1bb86cdSEric Blake error_report("If device %s is mounted on the desktop, unmount" 4032c1bb86cdSEric Blake " it first before using it in QEMU", file_name); 4033c1bb86cdSEric Blake error_report("Command to unmount device: diskutil unmountDisk %s", 4034c1bb86cdSEric Blake file_name); 4035c1bb86cdSEric Blake error_report("Command to mount device: diskutil mountDisk %s", file_name); 4036c1bb86cdSEric Blake } 4037c1bb86cdSEric Blake 4038c1bb86cdSEric Blake #endif /* defined(__APPLE__) && defined(__MACH__) */ 4039c1bb86cdSEric Blake 4040c1bb86cdSEric Blake static int hdev_probe_device(const char *filename) 4041c1bb86cdSEric Blake { 4042c1bb86cdSEric Blake struct stat st; 4043c1bb86cdSEric Blake 4044c1bb86cdSEric Blake /* allow a dedicated CD-ROM driver to match with a higher priority */ 4045c1bb86cdSEric Blake if (strstart(filename, "/dev/cdrom", NULL)) 4046c1bb86cdSEric Blake return 50; 4047c1bb86cdSEric Blake 4048c1bb86cdSEric Blake if (stat(filename, &st) >= 0 && 4049c1bb86cdSEric Blake (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) { 4050c1bb86cdSEric Blake return 100; 4051c1bb86cdSEric Blake } 4052c1bb86cdSEric Blake 4053c1bb86cdSEric Blake return 0; 4054c1bb86cdSEric Blake } 4055c1bb86cdSEric Blake 4056c1bb86cdSEric Blake static void hdev_parse_filename(const char *filename, QDict *options, 4057c1bb86cdSEric Blake Error **errp) 4058c1bb86cdSEric Blake { 405903c320d8SMax Reitz bdrv_parse_filename_strip_prefix(filename, "host_device:", options); 4060c1bb86cdSEric Blake } 4061c1bb86cdSEric Blake 4062c1bb86cdSEric Blake static bool hdev_is_sg(BlockDriverState *bs) 4063c1bb86cdSEric Blake { 4064c1bb86cdSEric Blake 4065c1bb86cdSEric Blake #if defined(__linux__) 4066c1bb86cdSEric Blake 4067c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 4068c1bb86cdSEric Blake struct stat st; 4069c1bb86cdSEric Blake struct sg_scsi_id scsiid; 4070c1bb86cdSEric Blake int sg_version; 4071c1bb86cdSEric Blake int ret; 4072c1bb86cdSEric Blake 4073c1bb86cdSEric Blake if (stat(bs->filename, &st) < 0 || !S_ISCHR(st.st_mode)) { 4074c1bb86cdSEric Blake return false; 4075c1bb86cdSEric Blake } 4076c1bb86cdSEric Blake 4077c1bb86cdSEric Blake ret = ioctl(s->fd, SG_GET_VERSION_NUM, &sg_version); 4078c1bb86cdSEric Blake if (ret < 0) { 4079c1bb86cdSEric Blake return false; 4080c1bb86cdSEric Blake } 4081c1bb86cdSEric Blake 4082c1bb86cdSEric Blake ret = ioctl(s->fd, SG_GET_SCSI_ID, &scsiid); 4083c1bb86cdSEric Blake if (ret >= 0) { 40844f7d28d7SLaurent Vivier trace_file_hdev_is_sg(scsiid.scsi_type, sg_version); 4085c1bb86cdSEric Blake return true; 4086c1bb86cdSEric Blake } 4087c1bb86cdSEric Blake 4088c1bb86cdSEric Blake #endif 4089c1bb86cdSEric Blake 4090c1bb86cdSEric Blake return false; 4091c1bb86cdSEric Blake } 4092c1bb86cdSEric Blake 4093c1bb86cdSEric Blake static int hdev_open(BlockDriverState *bs, QDict *options, int flags, 4094c1bb86cdSEric Blake Error **errp) 4095c1bb86cdSEric Blake { 4096c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 4097c1bb86cdSEric Blake int ret; 4098c1bb86cdSEric Blake 4099c1bb86cdSEric Blake #if defined(__APPLE__) && defined(__MACH__) 4100129c7d1cSMarkus Armbruster /* 4101129c7d1cSMarkus Armbruster * Caution: while qdict_get_str() is fine, getting non-string types 4102129c7d1cSMarkus Armbruster * would require more care. When @options come from -blockdev or 4103129c7d1cSMarkus Armbruster * blockdev_add, its members are typed according to the QAPI 4104129c7d1cSMarkus Armbruster * schema, but when they come from -drive, they're all QString. 4105129c7d1cSMarkus Armbruster */ 4106c1bb86cdSEric Blake const char *filename = qdict_get_str(options, "filename"); 4107c1bb86cdSEric Blake char bsd_path[MAXPATHLEN] = ""; 4108c1bb86cdSEric Blake bool error_occurred = false; 4109c1bb86cdSEric Blake 4110c1bb86cdSEric Blake /* If using a real cdrom */ 4111c1bb86cdSEric Blake if (strcmp(filename, "/dev/cdrom") == 0) { 4112c1bb86cdSEric Blake char *mediaType = NULL; 4113c1bb86cdSEric Blake kern_return_t ret_val; 4114c1bb86cdSEric Blake io_iterator_t mediaIterator = 0; 4115c1bb86cdSEric Blake 4116c1bb86cdSEric Blake mediaType = FindEjectableOpticalMedia(&mediaIterator); 4117c1bb86cdSEric Blake if (mediaType == NULL) { 4118c1bb86cdSEric Blake error_setg(errp, "Please make sure your CD/DVD is in the optical" 4119c1bb86cdSEric Blake " drive"); 4120c1bb86cdSEric Blake error_occurred = true; 4121c1bb86cdSEric Blake goto hdev_open_Mac_error; 4122c1bb86cdSEric Blake } 4123c1bb86cdSEric Blake 4124c1bb86cdSEric Blake ret_val = GetBSDPath(mediaIterator, bsd_path, sizeof(bsd_path), flags); 4125c1bb86cdSEric Blake if (ret_val != KERN_SUCCESS) { 4126c1bb86cdSEric Blake error_setg(errp, "Could not get BSD path for optical drive"); 4127c1bb86cdSEric Blake error_occurred = true; 4128c1bb86cdSEric Blake goto hdev_open_Mac_error; 4129c1bb86cdSEric Blake } 4130c1bb86cdSEric Blake 4131c1bb86cdSEric Blake /* If a real optical drive was not found */ 4132c1bb86cdSEric Blake if (bsd_path[0] == '\0') { 4133c1bb86cdSEric Blake error_setg(errp, "Failed to obtain bsd path for optical drive"); 4134c1bb86cdSEric Blake error_occurred = true; 4135c1bb86cdSEric Blake goto hdev_open_Mac_error; 4136c1bb86cdSEric Blake } 4137c1bb86cdSEric Blake 4138c1bb86cdSEric Blake /* If using a cdrom disc and finding a partition on the disc failed */ 4139c1bb86cdSEric Blake if (strncmp(mediaType, kIOCDMediaClass, 9) == 0 && 4140c1bb86cdSEric Blake setup_cdrom(bsd_path, errp) == false) { 4141c1bb86cdSEric Blake print_unmounting_directions(bsd_path); 4142c1bb86cdSEric Blake error_occurred = true; 4143c1bb86cdSEric Blake goto hdev_open_Mac_error; 4144c1bb86cdSEric Blake } 4145c1bb86cdSEric Blake 414646f5ac20SEric Blake qdict_put_str(options, "filename", bsd_path); 4147c1bb86cdSEric Blake 4148c1bb86cdSEric Blake hdev_open_Mac_error: 4149c1bb86cdSEric Blake g_free(mediaType); 4150c1bb86cdSEric Blake if (mediaIterator) { 4151c1bb86cdSEric Blake IOObjectRelease(mediaIterator); 4152c1bb86cdSEric Blake } 4153c1bb86cdSEric Blake if (error_occurred) { 4154c1bb86cdSEric Blake return -ENOENT; 4155c1bb86cdSEric Blake } 4156c1bb86cdSEric Blake } 4157c1bb86cdSEric Blake #endif /* defined(__APPLE__) && defined(__MACH__) */ 4158c1bb86cdSEric Blake 4159c1bb86cdSEric Blake s->type = FTYPE_FILE; 4160c1bb86cdSEric Blake 4161668f62ecSMarkus Armbruster ret = raw_open_common(bs, options, flags, 0, true, errp); 4162c1bb86cdSEric Blake if (ret < 0) { 4163c1bb86cdSEric Blake #if defined(__APPLE__) && defined(__MACH__) 4164c1bb86cdSEric Blake if (*bsd_path) { 4165c1bb86cdSEric Blake filename = bsd_path; 4166c1bb86cdSEric Blake } 4167c1bb86cdSEric Blake /* if a physical device experienced an error while being opened */ 4168c1bb86cdSEric Blake if (strncmp(filename, "/dev/", 5) == 0) { 4169c1bb86cdSEric Blake print_unmounting_directions(filename); 4170c1bb86cdSEric Blake } 4171c1bb86cdSEric Blake #endif /* defined(__APPLE__) && defined(__MACH__) */ 4172c1bb86cdSEric Blake return ret; 4173c1bb86cdSEric Blake } 4174c1bb86cdSEric Blake 4175c1bb86cdSEric Blake /* Since this does ioctl the device must be already opened */ 4176c1bb86cdSEric Blake bs->sg = hdev_is_sg(bs); 4177c1bb86cdSEric Blake 4178c1bb86cdSEric Blake return ret; 4179c1bb86cdSEric Blake } 4180c1bb86cdSEric Blake 4181c1bb86cdSEric Blake #if defined(__linux__) 41822f3a7ab3SKevin Wolf static int coroutine_fn 41832f3a7ab3SKevin Wolf hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) 4184c1bb86cdSEric Blake { 4185c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 418603425671SKevin Wolf RawPosixAIOData acb; 41872f3a7ab3SKevin Wolf int ret; 4188c1bb86cdSEric Blake 41892f3a7ab3SKevin Wolf ret = fd_open(bs); 41902f3a7ab3SKevin Wolf if (ret < 0) { 41912f3a7ab3SKevin Wolf return ret; 41922f3a7ab3SKevin Wolf } 4193c1bb86cdSEric Blake 41947c9e5276SPaolo Bonzini if (req == SG_IO && s->pr_mgr) { 41957c9e5276SPaolo Bonzini struct sg_io_hdr *io_hdr = buf; 41967c9e5276SPaolo Bonzini if (io_hdr->cmdp[0] == PERSISTENT_RESERVE_OUT || 41977c9e5276SPaolo Bonzini io_hdr->cmdp[0] == PERSISTENT_RESERVE_IN) { 41980fdb7311SEmanuele Giuseppe Esposito return pr_manager_execute(s->pr_mgr, qemu_get_current_aio_context(), 41992f3a7ab3SKevin Wolf s->fd, io_hdr); 42007c9e5276SPaolo Bonzini } 42017c9e5276SPaolo Bonzini } 42027c9e5276SPaolo Bonzini 420303425671SKevin Wolf acb = (RawPosixAIOData) { 420403425671SKevin Wolf .bs = bs, 420503425671SKevin Wolf .aio_type = QEMU_AIO_IOCTL, 420603425671SKevin Wolf .aio_fildes = s->fd, 420703425671SKevin Wolf .aio_offset = 0, 420803425671SKevin Wolf .ioctl = { 420903425671SKevin Wolf .buf = buf, 421003425671SKevin Wolf .cmd = req, 421103425671SKevin Wolf }, 421203425671SKevin Wolf }; 421303425671SKevin Wolf 42140fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_ioctl, &acb); 4215c1bb86cdSEric Blake } 4216c1bb86cdSEric Blake #endif /* linux */ 4217c1bb86cdSEric Blake 421833d70fb6SKevin Wolf static coroutine_fn int 42190c802287SVladimir Sementsov-Ogievskiy hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) 4220c1bb86cdSEric Blake { 42211c450366SAnton Nefedov BDRVRawState *s = bs->opaque; 422233d70fb6SKevin Wolf int ret; 4223c1bb86cdSEric Blake 422433d70fb6SKevin Wolf ret = fd_open(bs); 422533d70fb6SKevin Wolf if (ret < 0) { 42261c450366SAnton Nefedov raw_account_discard(s, bytes, ret); 422733d70fb6SKevin Wolf return ret; 4228c1bb86cdSEric Blake } 422946ee0f46SKevin Wolf return raw_do_pdiscard(bs, offset, bytes, true); 4230c1bb86cdSEric Blake } 4231c1bb86cdSEric Blake 4232c1bb86cdSEric Blake static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs, 4233f34b2bcfSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, BdrvRequestFlags flags) 4234c1bb86cdSEric Blake { 4235c1bb86cdSEric Blake int rc; 4236c1bb86cdSEric Blake 4237c1bb86cdSEric Blake rc = fd_open(bs); 4238c1bb86cdSEric Blake if (rc < 0) { 4239c1bb86cdSEric Blake return rc; 4240c1bb86cdSEric Blake } 424134fa110eSKevin Wolf 42427154d8aeSKevin Wolf return raw_do_pwrite_zeroes(bs, offset, bytes, flags, true); 4243c1bb86cdSEric Blake } 4244c1bb86cdSEric Blake 4245c1bb86cdSEric Blake static BlockDriver bdrv_host_device = { 4246c1bb86cdSEric Blake .format_name = "host_device", 4247c1bb86cdSEric Blake .protocol_name = "host_device", 4248c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState), 4249c1bb86cdSEric Blake .bdrv_needs_filename = true, 4250c1bb86cdSEric Blake .bdrv_probe_device = hdev_probe_device, 4251c1bb86cdSEric Blake .bdrv_parse_filename = hdev_parse_filename, 4252c1bb86cdSEric Blake .bdrv_file_open = hdev_open, 4253c1bb86cdSEric Blake .bdrv_close = raw_close, 4254c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare, 4255c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit, 4256c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort, 42575a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple, 42585a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple, 42598a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts, 4260dd577a26SStefan Hajnoczi .bdrv_co_invalidate_cache = raw_co_invalidate_cache, 4261c1bb86cdSEric Blake .bdrv_co_pwrite_zeroes = hdev_co_pwrite_zeroes, 4262c1bb86cdSEric Blake 4263c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv, 4264c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev, 426533d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk, 426633d70fb6SKevin Wolf .bdrv_co_pdiscard = hdev_co_pdiscard, 42671efad060SFam Zheng .bdrv_co_copy_range_from = raw_co_copy_range_from, 42681efad060SFam Zheng .bdrv_co_copy_range_to = raw_co_copy_range_to, 4269c1bb86cdSEric Blake .bdrv_refresh_limits = raw_refresh_limits, 4270042b757cSNishanth Aravamudan .bdrv_attach_aio_context = raw_aio_attach_aio_context, 4271c1bb86cdSEric Blake 4272061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate, 4273c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength, 42743d47eb0aSEmanuele Giuseppe Esposito .bdrv_co_get_info = raw_co_get_info, 42757f36a50aSHanna Reitz .bdrv_get_specific_info = raw_get_specific_info, 427682618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size, 4277d9245599SAnton Nefedov .bdrv_get_specific_stats = hdev_get_specific_stats, 4278244a5668SFam Zheng .bdrv_check_perm = raw_check_perm, 4279244a5668SFam Zheng .bdrv_set_perm = raw_set_perm, 4280244a5668SFam Zheng .bdrv_abort_perm_update = raw_abort_perm_update, 4281c1bb86cdSEric Blake .bdrv_probe_blocksizes = hdev_probe_blocksizes, 4282c1bb86cdSEric Blake .bdrv_probe_geometry = hdev_probe_geometry, 4283c1bb86cdSEric Blake 4284c1bb86cdSEric Blake /* generic scsi device */ 4285c1bb86cdSEric Blake #ifdef __linux__ 42862f3a7ab3SKevin Wolf .bdrv_co_ioctl = hdev_co_ioctl, 4287c1bb86cdSEric Blake #endif 42886d43eaa3SSam Li 42896d43eaa3SSam Li /* zoned device */ 42906d43eaa3SSam Li #if defined(CONFIG_BLKZONED) 42916d43eaa3SSam Li /* zone management operations */ 42926d43eaa3SSam Li .bdrv_co_zone_report = raw_co_zone_report, 42936d43eaa3SSam Li .bdrv_co_zone_mgmt = raw_co_zone_mgmt, 42944751d09aSSam Li .bdrv_co_zone_append = raw_co_zone_append, 42956d43eaa3SSam Li #endif 4296c1bb86cdSEric Blake }; 4297c1bb86cdSEric Blake 4298c1bb86cdSEric Blake #if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 4299c1bb86cdSEric Blake static void cdrom_parse_filename(const char *filename, QDict *options, 4300c1bb86cdSEric Blake Error **errp) 4301c1bb86cdSEric Blake { 430203c320d8SMax Reitz bdrv_parse_filename_strip_prefix(filename, "host_cdrom:", options); 4303c1bb86cdSEric Blake } 43048c6f27e7SPaolo Bonzini 43058c6f27e7SPaolo Bonzini static void cdrom_refresh_limits(BlockDriverState *bs, Error **errp) 43068c6f27e7SPaolo Bonzini { 43078c6f27e7SPaolo Bonzini bs->bl.has_variable_length = true; 43088c6f27e7SPaolo Bonzini raw_refresh_limits(bs, errp); 43098c6f27e7SPaolo Bonzini } 4310c1bb86cdSEric Blake #endif 4311c1bb86cdSEric Blake 4312c1bb86cdSEric Blake #ifdef __linux__ 4313c1bb86cdSEric Blake static int cdrom_open(BlockDriverState *bs, QDict *options, int flags, 4314c1bb86cdSEric Blake Error **errp) 4315c1bb86cdSEric Blake { 4316c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 4317c1bb86cdSEric Blake 4318c1bb86cdSEric Blake s->type = FTYPE_CD; 4319c1bb86cdSEric Blake 4320c1bb86cdSEric Blake /* open will not fail even if no CD is inserted, so add O_NONBLOCK */ 4321230ff739SJohn Snow return raw_open_common(bs, options, flags, O_NONBLOCK, true, errp); 4322c1bb86cdSEric Blake } 4323c1bb86cdSEric Blake 4324c1bb86cdSEric Blake static int cdrom_probe_device(const char *filename) 4325c1bb86cdSEric Blake { 4326c1bb86cdSEric Blake int fd, ret; 4327c1bb86cdSEric Blake int prio = 0; 4328c1bb86cdSEric Blake struct stat st; 4329c1bb86cdSEric Blake 4330b18a24a9SDaniel P. Berrangé fd = qemu_open(filename, O_RDONLY | O_NONBLOCK, NULL); 4331c1bb86cdSEric Blake if (fd < 0) { 4332c1bb86cdSEric Blake goto out; 4333c1bb86cdSEric Blake } 4334c1bb86cdSEric Blake ret = fstat(fd, &st); 4335c1bb86cdSEric Blake if (ret == -1 || !S_ISBLK(st.st_mode)) { 4336c1bb86cdSEric Blake goto outc; 4337c1bb86cdSEric Blake } 4338c1bb86cdSEric Blake 4339c1bb86cdSEric Blake /* Attempt to detect via a CDROM specific ioctl */ 4340c1bb86cdSEric Blake ret = ioctl(fd, CDROM_DRIVE_STATUS, CDSL_CURRENT); 4341c1bb86cdSEric Blake if (ret >= 0) 4342c1bb86cdSEric Blake prio = 100; 4343c1bb86cdSEric Blake 4344c1bb86cdSEric Blake outc: 4345c1bb86cdSEric Blake qemu_close(fd); 4346c1bb86cdSEric Blake out: 4347c1bb86cdSEric Blake return prio; 4348c1bb86cdSEric Blake } 4349c1bb86cdSEric Blake 43501e97be91SEmanuele Giuseppe Esposito static bool coroutine_fn cdrom_co_is_inserted(BlockDriverState *bs) 4351c1bb86cdSEric Blake { 4352c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 4353c1bb86cdSEric Blake int ret; 4354c1bb86cdSEric Blake 4355c1bb86cdSEric Blake ret = ioctl(s->fd, CDROM_DRIVE_STATUS, CDSL_CURRENT); 4356c1bb86cdSEric Blake return ret == CDS_DISC_OK; 4357c1bb86cdSEric Blake } 4358c1bb86cdSEric Blake 43592531b390SEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_eject(BlockDriverState *bs, bool eject_flag) 4360c1bb86cdSEric Blake { 4361c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 4362c1bb86cdSEric Blake 4363c1bb86cdSEric Blake if (eject_flag) { 4364c1bb86cdSEric Blake if (ioctl(s->fd, CDROMEJECT, NULL) < 0) 4365c1bb86cdSEric Blake perror("CDROMEJECT"); 4366c1bb86cdSEric Blake } else { 4367c1bb86cdSEric Blake if (ioctl(s->fd, CDROMCLOSETRAY, NULL) < 0) 4368c1bb86cdSEric Blake perror("CDROMEJECT"); 4369c1bb86cdSEric Blake } 4370c1bb86cdSEric Blake } 4371c1bb86cdSEric Blake 43722c75261cSEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_lock_medium(BlockDriverState *bs, bool locked) 4373c1bb86cdSEric Blake { 4374c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 4375c1bb86cdSEric Blake 4376c1bb86cdSEric Blake if (ioctl(s->fd, CDROM_LOCKDOOR, locked) < 0) { 4377c1bb86cdSEric Blake /* 4378c1bb86cdSEric Blake * Note: an error can happen if the distribution automatically 4379c1bb86cdSEric Blake * mounts the CD-ROM 4380c1bb86cdSEric Blake */ 4381c1bb86cdSEric Blake /* perror("CDROM_LOCKDOOR"); */ 4382c1bb86cdSEric Blake } 4383c1bb86cdSEric Blake } 4384c1bb86cdSEric Blake 4385c1bb86cdSEric Blake static BlockDriver bdrv_host_cdrom = { 4386c1bb86cdSEric Blake .format_name = "host_cdrom", 4387c1bb86cdSEric Blake .protocol_name = "host_cdrom", 4388c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState), 4389c1bb86cdSEric Blake .bdrv_needs_filename = true, 4390c1bb86cdSEric Blake .bdrv_probe_device = cdrom_probe_device, 4391c1bb86cdSEric Blake .bdrv_parse_filename = cdrom_parse_filename, 4392c1bb86cdSEric Blake .bdrv_file_open = cdrom_open, 4393c1bb86cdSEric Blake .bdrv_close = raw_close, 4394c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare, 4395c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit, 4396c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort, 43975a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple, 43985a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple, 43998a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts, 4400dd577a26SStefan Hajnoczi .bdrv_co_invalidate_cache = raw_co_invalidate_cache, 4401c1bb86cdSEric Blake 4402c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv, 4403c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev, 440433d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk, 44058c6f27e7SPaolo Bonzini .bdrv_refresh_limits = cdrom_refresh_limits, 4406042b757cSNishanth Aravamudan .bdrv_attach_aio_context = raw_aio_attach_aio_context, 4407c1bb86cdSEric Blake 4408061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate, 4409c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength, 441082618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size, 4411c1bb86cdSEric Blake 4412c1bb86cdSEric Blake /* removable device support */ 44131e97be91SEmanuele Giuseppe Esposito .bdrv_co_is_inserted = cdrom_co_is_inserted, 44142531b390SEmanuele Giuseppe Esposito .bdrv_co_eject = cdrom_co_eject, 44152c75261cSEmanuele Giuseppe Esposito .bdrv_co_lock_medium = cdrom_co_lock_medium, 4416c1bb86cdSEric Blake 4417c1bb86cdSEric Blake /* generic scsi device */ 44182f3a7ab3SKevin Wolf .bdrv_co_ioctl = hdev_co_ioctl, 4419c1bb86cdSEric Blake }; 4420c1bb86cdSEric Blake #endif /* __linux__ */ 4421c1bb86cdSEric Blake 4422c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__) 4423c1bb86cdSEric Blake static int cdrom_open(BlockDriverState *bs, QDict *options, int flags, 4424c1bb86cdSEric Blake Error **errp) 4425c1bb86cdSEric Blake { 4426c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 4427c1bb86cdSEric Blake int ret; 4428c1bb86cdSEric Blake 4429c1bb86cdSEric Blake s->type = FTYPE_CD; 4430c1bb86cdSEric Blake 4431668f62ecSMarkus Armbruster ret = raw_open_common(bs, options, flags, 0, true, errp); 4432c1bb86cdSEric Blake if (ret) { 4433c1bb86cdSEric Blake return ret; 4434c1bb86cdSEric Blake } 4435c1bb86cdSEric Blake 4436c1bb86cdSEric Blake /* make sure the door isn't locked at this time */ 4437c1bb86cdSEric Blake ioctl(s->fd, CDIOCALLOW); 4438c1bb86cdSEric Blake return 0; 4439c1bb86cdSEric Blake } 4440c1bb86cdSEric Blake 4441c1bb86cdSEric Blake static int cdrom_probe_device(const char *filename) 4442c1bb86cdSEric Blake { 4443c1bb86cdSEric Blake if (strstart(filename, "/dev/cd", NULL) || 4444c1bb86cdSEric Blake strstart(filename, "/dev/acd", NULL)) 4445c1bb86cdSEric Blake return 100; 4446c1bb86cdSEric Blake return 0; 4447c1bb86cdSEric Blake } 4448c1bb86cdSEric Blake 4449c1bb86cdSEric Blake static int cdrom_reopen(BlockDriverState *bs) 4450c1bb86cdSEric Blake { 4451c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 4452c1bb86cdSEric Blake int fd; 4453c1bb86cdSEric Blake 4454c1bb86cdSEric Blake /* 4455c1bb86cdSEric Blake * Force reread of possibly changed/newly loaded disc, 4456c1bb86cdSEric Blake * FreeBSD seems to not notice sometimes... 4457c1bb86cdSEric Blake */ 4458c1bb86cdSEric Blake if (s->fd >= 0) 4459c1bb86cdSEric Blake qemu_close(s->fd); 4460b18a24a9SDaniel P. Berrangé fd = qemu_open(bs->filename, s->open_flags, NULL); 4461c1bb86cdSEric Blake if (fd < 0) { 4462c1bb86cdSEric Blake s->fd = -1; 4463c1bb86cdSEric Blake return -EIO; 4464c1bb86cdSEric Blake } 4465c1bb86cdSEric Blake s->fd = fd; 4466c1bb86cdSEric Blake 4467c1bb86cdSEric Blake /* make sure the door isn't locked at this time */ 4468c1bb86cdSEric Blake ioctl(s->fd, CDIOCALLOW); 4469c1bb86cdSEric Blake return 0; 4470c1bb86cdSEric Blake } 4471c1bb86cdSEric Blake 44721e97be91SEmanuele Giuseppe Esposito static bool coroutine_fn cdrom_co_is_inserted(BlockDriverState *bs) 4473c1bb86cdSEric Blake { 447436c6c877SPaolo Bonzini return raw_getlength(bs) > 0; 4475c1bb86cdSEric Blake } 4476c1bb86cdSEric Blake 44772531b390SEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_eject(BlockDriverState *bs, bool eject_flag) 4478c1bb86cdSEric Blake { 4479c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 4480c1bb86cdSEric Blake 4481c1bb86cdSEric Blake if (s->fd < 0) 4482c1bb86cdSEric Blake return; 4483c1bb86cdSEric Blake 4484c1bb86cdSEric Blake (void) ioctl(s->fd, CDIOCALLOW); 4485c1bb86cdSEric Blake 4486c1bb86cdSEric Blake if (eject_flag) { 4487c1bb86cdSEric Blake if (ioctl(s->fd, CDIOCEJECT) < 0) 4488c1bb86cdSEric Blake perror("CDIOCEJECT"); 4489c1bb86cdSEric Blake } else { 4490c1bb86cdSEric Blake if (ioctl(s->fd, CDIOCCLOSE) < 0) 4491c1bb86cdSEric Blake perror("CDIOCCLOSE"); 4492c1bb86cdSEric Blake } 4493c1bb86cdSEric Blake 4494c1bb86cdSEric Blake cdrom_reopen(bs); 4495c1bb86cdSEric Blake } 4496c1bb86cdSEric Blake 44972c75261cSEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_lock_medium(BlockDriverState *bs, bool locked) 4498c1bb86cdSEric Blake { 4499c1bb86cdSEric Blake BDRVRawState *s = bs->opaque; 4500c1bb86cdSEric Blake 4501c1bb86cdSEric Blake if (s->fd < 0) 4502c1bb86cdSEric Blake return; 4503c1bb86cdSEric Blake if (ioctl(s->fd, (locked ? CDIOCPREVENT : CDIOCALLOW)) < 0) { 4504c1bb86cdSEric Blake /* 4505c1bb86cdSEric Blake * Note: an error can happen if the distribution automatically 4506c1bb86cdSEric Blake * mounts the CD-ROM 4507c1bb86cdSEric Blake */ 4508c1bb86cdSEric Blake /* perror("CDROM_LOCKDOOR"); */ 4509c1bb86cdSEric Blake } 4510c1bb86cdSEric Blake } 4511c1bb86cdSEric Blake 4512c1bb86cdSEric Blake static BlockDriver bdrv_host_cdrom = { 4513c1bb86cdSEric Blake .format_name = "host_cdrom", 4514c1bb86cdSEric Blake .protocol_name = "host_cdrom", 4515c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState), 4516c1bb86cdSEric Blake .bdrv_needs_filename = true, 4517c1bb86cdSEric Blake .bdrv_probe_device = cdrom_probe_device, 4518c1bb86cdSEric Blake .bdrv_parse_filename = cdrom_parse_filename, 4519c1bb86cdSEric Blake .bdrv_file_open = cdrom_open, 4520c1bb86cdSEric Blake .bdrv_close = raw_close, 4521c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare, 4522c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit, 4523c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort, 45245a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple, 45255a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple, 45268a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts, 4527c1bb86cdSEric Blake 4528c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv, 4529c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev, 453033d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk, 45318c6f27e7SPaolo Bonzini .bdrv_refresh_limits = cdrom_refresh_limits, 4532042b757cSNishanth Aravamudan .bdrv_attach_aio_context = raw_aio_attach_aio_context, 4533c1bb86cdSEric Blake 4534061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate, 4535c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength, 453682618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size, 4537c1bb86cdSEric Blake 4538c1bb86cdSEric Blake /* removable device support */ 45391e97be91SEmanuele Giuseppe Esposito .bdrv_co_is_inserted = cdrom_co_is_inserted, 45402531b390SEmanuele Giuseppe Esposito .bdrv_co_eject = cdrom_co_eject, 45412c75261cSEmanuele Giuseppe Esposito .bdrv_co_lock_medium = cdrom_co_lock_medium, 4542c1bb86cdSEric Blake }; 4543c1bb86cdSEric Blake #endif /* __FreeBSD__ */ 4544c1bb86cdSEric Blake 454514176c8dSJoelle van Dyne #endif /* HAVE_HOST_BLOCK_DEVICE */ 454614176c8dSJoelle van Dyne 4547c1bb86cdSEric Blake static void bdrv_file_init(void) 4548c1bb86cdSEric Blake { 4549c1bb86cdSEric Blake /* 4550c1bb86cdSEric Blake * Register all the drivers. Note that order is important, the driver 4551c1bb86cdSEric Blake * registered last will get probed first. 4552c1bb86cdSEric Blake */ 4553c1bb86cdSEric Blake bdrv_register(&bdrv_file); 455414176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE) 4555c1bb86cdSEric Blake bdrv_register(&bdrv_host_device); 4556c1bb86cdSEric Blake #ifdef __linux__ 4557c1bb86cdSEric Blake bdrv_register(&bdrv_host_cdrom); 4558c1bb86cdSEric Blake #endif 4559c1bb86cdSEric Blake #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 4560c1bb86cdSEric Blake bdrv_register(&bdrv_host_cdrom); 4561c1bb86cdSEric Blake #endif 456214176c8dSJoelle van Dyne #endif /* HAVE_HOST_BLOCK_DEVICE */ 4563c1bb86cdSEric Blake } 4564c1bb86cdSEric Blake 4565c1bb86cdSEric Blake block_init(bdrv_file_init); 4566