1c1bb86cdSEric Blake /*
2c1bb86cdSEric Blake * Block driver for RAW files (posix)
3c1bb86cdSEric Blake *
4c1bb86cdSEric Blake * Copyright (c) 2006 Fabrice Bellard
5c1bb86cdSEric Blake *
6c1bb86cdSEric Blake * Permission is hereby granted, free of charge, to any person obtaining a copy
7c1bb86cdSEric Blake * of this software and associated documentation files (the "Software"), to deal
8c1bb86cdSEric Blake * in the Software without restriction, including without limitation the rights
9c1bb86cdSEric Blake * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10c1bb86cdSEric Blake * copies of the Software, and to permit persons to whom the Software is
11c1bb86cdSEric Blake * furnished to do so, subject to the following conditions:
12c1bb86cdSEric Blake *
13c1bb86cdSEric Blake * The above copyright notice and this permission notice shall be included in
14c1bb86cdSEric Blake * all copies or substantial portions of the Software.
15c1bb86cdSEric Blake *
16c1bb86cdSEric Blake * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17c1bb86cdSEric Blake * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18c1bb86cdSEric Blake * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19c1bb86cdSEric Blake * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20c1bb86cdSEric Blake * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21c1bb86cdSEric Blake * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22c1bb86cdSEric Blake * THE SOFTWARE.
23c1bb86cdSEric Blake */
24922a01a0SMarkus Armbruster
25c1bb86cdSEric Blake #include "qemu/osdep.h"
26c1bb86cdSEric Blake #include "qapi/error.h"
27c1bb86cdSEric Blake #include "qemu/cutils.h"
28c1bb86cdSEric Blake #include "qemu/error-report.h"
29e2c1c34fSMarkus Armbruster #include "block/block-io.h"
30c1bb86cdSEric Blake #include "block/block_int.h"
31c1bb86cdSEric Blake #include "qemu/module.h"
32922a01a0SMarkus Armbruster #include "qemu/option.h"
33ffa244c8SKevin Wolf #include "qemu/units.h"
345df022cfSPeter Maydell #include "qemu/memalign.h"
35c1bb86cdSEric Blake #include "trace.h"
36c1bb86cdSEric Blake #include "block/thread-pool.h"
37c1bb86cdSEric Blake #include "qemu/iov.h"
38c1bb86cdSEric Blake #include "block/raw-aio.h"
39452fcdbcSMarkus Armbruster #include "qapi/qmp/qdict.h"
40c1bb86cdSEric Blake #include "qapi/qmp/qstring.h"
41c1bb86cdSEric Blake
427c9e5276SPaolo Bonzini #include "scsi/pr-manager.h"
437c9e5276SPaolo Bonzini #include "scsi/constants.h"
447c9e5276SPaolo Bonzini
45c1bb86cdSEric Blake #if defined(__APPLE__) && (__MACH__)
4614176c8dSJoelle van Dyne #include <sys/ioctl.h>
4714176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE)
48c1bb86cdSEric Blake #include <paths.h>
49c1bb86cdSEric Blake #include <sys/param.h>
500dfc7af2SAkihiko Odaki #include <sys/mount.h>
51c1bb86cdSEric Blake #include <IOKit/IOKitLib.h>
52c1bb86cdSEric Blake #include <IOKit/IOBSD.h>
53c1bb86cdSEric Blake #include <IOKit/storage/IOMediaBSDClient.h>
54c1bb86cdSEric Blake #include <IOKit/storage/IOMedia.h>
55c1bb86cdSEric Blake #include <IOKit/storage/IOCDMedia.h>
56c1bb86cdSEric Blake //#include <IOKit/storage/IOCDTypes.h>
57c1bb86cdSEric Blake #include <IOKit/storage/IODVDMedia.h>
58c1bb86cdSEric Blake #include <CoreFoundation/CoreFoundation.h>
5914176c8dSJoelle van Dyne #endif /* defined(HAVE_HOST_BLOCK_DEVICE) */
60c1bb86cdSEric Blake #endif
61c1bb86cdSEric Blake
62c1bb86cdSEric Blake #ifdef __sun__
63c1bb86cdSEric Blake #define _POSIX_PTHREAD_SEMANTICS 1
64c1bb86cdSEric Blake #include <sys/dkio.h>
65c1bb86cdSEric Blake #endif
66c1bb86cdSEric Blake #ifdef __linux__
67c1bb86cdSEric Blake #include <sys/ioctl.h>
68c1bb86cdSEric Blake #include <sys/param.h>
691efad060SFam Zheng #include <sys/syscall.h>
705edc8557SKevin Wolf #include <sys/vfs.h>
716d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
726d43eaa3SSam Li #include <linux/blkzoned.h>
736d43eaa3SSam Li #endif
74c1bb86cdSEric Blake #include <linux/cdrom.h>
75c1bb86cdSEric Blake #include <linux/fd.h>
76c1bb86cdSEric Blake #include <linux/fs.h>
77c1bb86cdSEric Blake #include <linux/hdreg.h>
785edc8557SKevin Wolf #include <linux/magic.h>
79c1bb86cdSEric Blake #include <scsi/sg.h>
80c1bb86cdSEric Blake #ifdef __s390__
81c1bb86cdSEric Blake #include <asm/dasd.h>
82c1bb86cdSEric Blake #endif
83c1bb86cdSEric Blake #ifndef FS_NOCOW_FL
84c1bb86cdSEric Blake #define FS_NOCOW_FL 0x00800000 /* Do not cow file */
85c1bb86cdSEric Blake #endif
86c1bb86cdSEric Blake #endif
87c1bb86cdSEric Blake #if defined(CONFIG_FALLOCATE_PUNCH_HOLE) || defined(CONFIG_FALLOCATE_ZERO_RANGE)
88c1bb86cdSEric Blake #include <linux/falloc.h>
89c1bb86cdSEric Blake #endif
90c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
91c1bb86cdSEric Blake #include <sys/disk.h>
92c1bb86cdSEric Blake #include <sys/cdio.h>
93c1bb86cdSEric Blake #endif
94c1bb86cdSEric Blake
95c1bb86cdSEric Blake #ifdef __OpenBSD__
96c1bb86cdSEric Blake #include <sys/ioctl.h>
97c1bb86cdSEric Blake #include <sys/disklabel.h>
98c1bb86cdSEric Blake #include <sys/dkio.h>
99c1bb86cdSEric Blake #endif
100c1bb86cdSEric Blake
101c1bb86cdSEric Blake #ifdef __NetBSD__
102c1bb86cdSEric Blake #include <sys/ioctl.h>
103c1bb86cdSEric Blake #include <sys/disklabel.h>
104c1bb86cdSEric Blake #include <sys/dkio.h>
105c1bb86cdSEric Blake #include <sys/disk.h>
106c1bb86cdSEric Blake #endif
107c1bb86cdSEric Blake
108c1bb86cdSEric Blake #ifdef __DragonFly__
109c1bb86cdSEric Blake #include <sys/ioctl.h>
110c1bb86cdSEric Blake #include <sys/diskslice.h>
111c1bb86cdSEric Blake #endif
112c1bb86cdSEric Blake
113c1bb86cdSEric Blake /* OS X does not have O_DSYNC */
114c1bb86cdSEric Blake #ifndef O_DSYNC
115c1bb86cdSEric Blake #ifdef O_SYNC
116c1bb86cdSEric Blake #define O_DSYNC O_SYNC
117c1bb86cdSEric Blake #elif defined(O_FSYNC)
118c1bb86cdSEric Blake #define O_DSYNC O_FSYNC
119c1bb86cdSEric Blake #endif
120c1bb86cdSEric Blake #endif
121c1bb86cdSEric Blake
122c1bb86cdSEric Blake /* Approximate O_DIRECT with O_DSYNC if O_DIRECT isn't available */
123c1bb86cdSEric Blake #ifndef O_DIRECT
124c1bb86cdSEric Blake #define O_DIRECT O_DSYNC
125c1bb86cdSEric Blake #endif
126c1bb86cdSEric Blake
127c1bb86cdSEric Blake #define FTYPE_FILE 0
128c1bb86cdSEric Blake #define FTYPE_CD 1
129c1bb86cdSEric Blake
130c1bb86cdSEric Blake #define MAX_BLOCKSIZE 4096
131c1bb86cdSEric Blake
132244a5668SFam Zheng /* Posix file locking bytes. Libvirt takes byte 0, we start from higher bytes,
133244a5668SFam Zheng * leaving a few more bytes for its future use. */
134244a5668SFam Zheng #define RAW_LOCK_PERM_BASE 100
135244a5668SFam Zheng #define RAW_LOCK_SHARED_BASE 200
136244a5668SFam Zheng
137c1bb86cdSEric Blake typedef struct BDRVRawState {
138c1bb86cdSEric Blake int fd;
139244a5668SFam Zheng bool use_lock;
140c1bb86cdSEric Blake int type;
141c1bb86cdSEric Blake int open_flags;
142c1bb86cdSEric Blake size_t buf_align;
143c1bb86cdSEric Blake
144244a5668SFam Zheng /* The current permissions. */
145244a5668SFam Zheng uint64_t perm;
146244a5668SFam Zheng uint64_t shared_perm;
147244a5668SFam Zheng
1482996ffadSFam Zheng /* The perms bits whose corresponding bytes are already locked in
149f2e3af29SFam Zheng * s->fd. */
1502996ffadSFam Zheng uint64_t locked_perm;
1512996ffadSFam Zheng uint64_t locked_shared_perm;
1522996ffadSFam Zheng
153684960d4SStefano Garzarella uint64_t aio_max_batch;
154684960d4SStefano Garzarella
1556ceabe6fSKevin Wolf int perm_change_fd;
156094e3639SMax Reitz int perm_change_flags;
157e0c9cf3aSKevin Wolf BDRVReopenState *reopen_state;
158e0c9cf3aSKevin Wolf
159c1bb86cdSEric Blake bool has_discard:1;
160c1bb86cdSEric Blake bool has_write_zeroes:1;
161c1bb86cdSEric Blake bool use_linux_aio:1;
162c6447510SAarushi Mehta bool use_linux_io_uring:1;
163c7ddc882SDaniel P. Berrangé int page_cache_inconsistent; /* errno from fdatasync failure */
164c1bb86cdSEric Blake bool has_fallocate;
165c1bb86cdSEric Blake bool needs_alignment;
1665dbd0ce1SKevin Wolf bool force_alignment;
167f357fcd8SStefan Hajnoczi bool drop_cache;
16831be8a2aSStefan Hajnoczi bool check_cache_dropped;
1691c450366SAnton Nefedov struct {
1701c450366SAnton Nefedov uint64_t discard_nb_ok;
1711c450366SAnton Nefedov uint64_t discard_nb_failed;
1721c450366SAnton Nefedov uint64_t discard_bytes_ok;
1731c450366SAnton Nefedov } stats;
1747c9e5276SPaolo Bonzini
1757c9e5276SPaolo Bonzini PRManager *pr_mgr;
176c1bb86cdSEric Blake } BDRVRawState;
177c1bb86cdSEric Blake
178c1bb86cdSEric Blake typedef struct BDRVRawReopenState {
179c1bb86cdSEric Blake int open_flags;
180f357fcd8SStefan Hajnoczi bool drop_cache;
18131be8a2aSStefan Hajnoczi bool check_cache_dropped;
182c1bb86cdSEric Blake } BDRVRawReopenState;
183c1bb86cdSEric Blake
fd_open(BlockDriverState * bs)18414176c8dSJoelle van Dyne static int fd_open(BlockDriverState *bs)
18514176c8dSJoelle van Dyne {
18614176c8dSJoelle van Dyne BDRVRawState *s = bs->opaque;
18714176c8dSJoelle van Dyne
18814176c8dSJoelle van Dyne /* this is just to ensure s->fd is sane (its called by io ops) */
18914176c8dSJoelle van Dyne if (s->fd >= 0) {
19014176c8dSJoelle van Dyne return 0;
19114176c8dSJoelle van Dyne }
19214176c8dSJoelle van Dyne return -EIO;
19314176c8dSJoelle van Dyne }
19414176c8dSJoelle van Dyne
19536c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs);
196c1bb86cdSEric Blake
197c1bb86cdSEric Blake typedef struct RawPosixAIOData {
198c1bb86cdSEric Blake BlockDriverState *bs;
199c1bb86cdSEric Blake int aio_type;
200d57c44d0SKevin Wolf int aio_fildes;
201d57c44d0SKevin Wolf
202d57c44d0SKevin Wolf off_t aio_offset;
203d57c44d0SKevin Wolf uint64_t aio_nbytes;
204d57c44d0SKevin Wolf
20593f4e2ffSKevin Wolf union {
20693f4e2ffSKevin Wolf struct {
207d57c44d0SKevin Wolf struct iovec *iov;
208d57c44d0SKevin Wolf int niov;
209d57c44d0SKevin Wolf } io;
210d57c44d0SKevin Wolf struct {
211d57c44d0SKevin Wolf uint64_t cmd;
212d57c44d0SKevin Wolf void *buf;
213d57c44d0SKevin Wolf } ioctl;
214d57c44d0SKevin Wolf struct {
2151efad060SFam Zheng int aio_fd2;
2161efad060SFam Zheng off_t aio_offset2;
217d57c44d0SKevin Wolf } copy_range;
21893f4e2ffSKevin Wolf struct {
21993f4e2ffSKevin Wolf PreallocMode prealloc;
22093f4e2ffSKevin Wolf Error **errp;
221d57c44d0SKevin Wolf } truncate;
2226d43eaa3SSam Li struct {
2236d43eaa3SSam Li unsigned int *nr_zones;
2246d43eaa3SSam Li BlockZoneDescriptor *zones;
2256d43eaa3SSam Li } zone_report;
2266d43eaa3SSam Li struct {
2276d43eaa3SSam Li unsigned long op;
2286d43eaa3SSam Li } zone_mgmt;
22993f4e2ffSKevin Wolf };
230c1bb86cdSEric Blake } RawPosixAIOData;
231c1bb86cdSEric Blake
232c1bb86cdSEric Blake #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
233c1bb86cdSEric Blake static int cdrom_reopen(BlockDriverState *bs);
234c1bb86cdSEric Blake #endif
235c1bb86cdSEric Blake
236797e3e38SDavid Edmondson /*
237797e3e38SDavid Edmondson * Elide EAGAIN and EACCES details when failing to lock, as this
238797e3e38SDavid Edmondson * indicates that the specified file region is already locked by
239797e3e38SDavid Edmondson * another process, which is considered a common scenario.
240797e3e38SDavid Edmondson */
241797e3e38SDavid Edmondson #define raw_lock_error_setg_errno(errp, err, fmt, ...) \
242797e3e38SDavid Edmondson do { \
243797e3e38SDavid Edmondson if ((err) == EAGAIN || (err) == EACCES) { \
244797e3e38SDavid Edmondson error_setg((errp), (fmt), ## __VA_ARGS__); \
245797e3e38SDavid Edmondson } else { \
246797e3e38SDavid Edmondson error_setg_errno((errp), (err), (fmt), ## __VA_ARGS__); \
247797e3e38SDavid Edmondson } \
248797e3e38SDavid Edmondson } while (0)
249797e3e38SDavid Edmondson
250c1bb86cdSEric Blake #if defined(__NetBSD__)
raw_normalize_devicepath(const char ** filename,Error ** errp)251db0754dfSFam Zheng static int raw_normalize_devicepath(const char **filename, Error **errp)
252c1bb86cdSEric Blake {
253c1bb86cdSEric Blake static char namebuf[PATH_MAX];
254c1bb86cdSEric Blake const char *dp, *fname;
255c1bb86cdSEric Blake struct stat sb;
256c1bb86cdSEric Blake
257c1bb86cdSEric Blake fname = *filename;
258c1bb86cdSEric Blake dp = strrchr(fname, '/');
259c1bb86cdSEric Blake if (lstat(fname, &sb) < 0) {
260f6fc1e30SPaolo Bonzini error_setg_file_open(errp, errno, fname);
261c1bb86cdSEric Blake return -errno;
262c1bb86cdSEric Blake }
263c1bb86cdSEric Blake
264c1bb86cdSEric Blake if (!S_ISBLK(sb.st_mode)) {
265c1bb86cdSEric Blake return 0;
266c1bb86cdSEric Blake }
267c1bb86cdSEric Blake
268c1bb86cdSEric Blake if (dp == NULL) {
269c1bb86cdSEric Blake snprintf(namebuf, PATH_MAX, "r%s", fname);
270c1bb86cdSEric Blake } else {
271c1bb86cdSEric Blake snprintf(namebuf, PATH_MAX, "%.*s/r%s",
272c1bb86cdSEric Blake (int)(dp - fname), fname, dp + 1);
273c1bb86cdSEric Blake }
274c1bb86cdSEric Blake *filename = namebuf;
275db0754dfSFam Zheng warn_report("%s is a block device, using %s", fname, *filename);
276c1bb86cdSEric Blake
277c1bb86cdSEric Blake return 0;
278c1bb86cdSEric Blake }
279c1bb86cdSEric Blake #else
raw_normalize_devicepath(const char ** filename,Error ** errp)280db0754dfSFam Zheng static int raw_normalize_devicepath(const char **filename, Error **errp)
281c1bb86cdSEric Blake {
282c1bb86cdSEric Blake return 0;
283c1bb86cdSEric Blake }
284c1bb86cdSEric Blake #endif
285c1bb86cdSEric Blake
286c1bb86cdSEric Blake /*
287c1bb86cdSEric Blake * Get logical block size via ioctl. On success store it in @sector_size_p.
288c1bb86cdSEric Blake */
probe_logical_blocksize(int fd,unsigned int * sector_size_p)289c1bb86cdSEric Blake static int probe_logical_blocksize(int fd, unsigned int *sector_size_p)
290c1bb86cdSEric Blake {
291c1bb86cdSEric Blake unsigned int sector_size;
292c1bb86cdSEric Blake bool success = false;
293700f9ce0SPeter Maydell int i;
294c1bb86cdSEric Blake
295c1bb86cdSEric Blake errno = ENOTSUP;
296700f9ce0SPeter Maydell static const unsigned long ioctl_list[] = {
297c1bb86cdSEric Blake #ifdef BLKSSZGET
298700f9ce0SPeter Maydell BLKSSZGET,
299c1bb86cdSEric Blake #endif
300c1bb86cdSEric Blake #ifdef DKIOCGETBLOCKSIZE
301700f9ce0SPeter Maydell DKIOCGETBLOCKSIZE,
302c1bb86cdSEric Blake #endif
303c1bb86cdSEric Blake #ifdef DIOCGSECTORSIZE
304700f9ce0SPeter Maydell DIOCGSECTORSIZE,
305700f9ce0SPeter Maydell #endif
306700f9ce0SPeter Maydell };
307700f9ce0SPeter Maydell
308700f9ce0SPeter Maydell /* Try a few ioctls to get the right size */
309700f9ce0SPeter Maydell for (i = 0; i < (int)ARRAY_SIZE(ioctl_list); i++) {
310700f9ce0SPeter Maydell if (ioctl(fd, ioctl_list[i], §or_size) >= 0) {
311c1bb86cdSEric Blake *sector_size_p = sector_size;
312c1bb86cdSEric Blake success = true;
313c1bb86cdSEric Blake }
314700f9ce0SPeter Maydell }
315c1bb86cdSEric Blake
316c1bb86cdSEric Blake return success ? 0 : -errno;
317c1bb86cdSEric Blake }
318c1bb86cdSEric Blake
319c1bb86cdSEric Blake /**
320c1bb86cdSEric Blake * Get physical block size of @fd.
321c1bb86cdSEric Blake * On success, store it in @blk_size and return 0.
322c1bb86cdSEric Blake * On failure, return -errno.
323c1bb86cdSEric Blake */
probe_physical_blocksize(int fd,unsigned int * blk_size)324c1bb86cdSEric Blake static int probe_physical_blocksize(int fd, unsigned int *blk_size)
325c1bb86cdSEric Blake {
326c1bb86cdSEric Blake #ifdef BLKPBSZGET
327c1bb86cdSEric Blake if (ioctl(fd, BLKPBSZGET, blk_size) < 0) {
328c1bb86cdSEric Blake return -errno;
329c1bb86cdSEric Blake }
330c1bb86cdSEric Blake return 0;
331c1bb86cdSEric Blake #else
332c1bb86cdSEric Blake return -ENOTSUP;
333c1bb86cdSEric Blake #endif
334c1bb86cdSEric Blake }
335c1bb86cdSEric Blake
3365edc8557SKevin Wolf /*
3375edc8557SKevin Wolf * Returns true if no alignment restrictions are necessary even for files
3385edc8557SKevin Wolf * opened with O_DIRECT.
3395edc8557SKevin Wolf *
3405edc8557SKevin Wolf * raw_probe_alignment() probes the required alignment and assume that 1 means
3415edc8557SKevin Wolf * the probing failed, so it falls back to a safe default of 4k. This can be
3425edc8557SKevin Wolf * avoided if we know that byte alignment is okay for the file.
3435edc8557SKevin Wolf */
dio_byte_aligned(int fd)3445edc8557SKevin Wolf static bool dio_byte_aligned(int fd)
3455edc8557SKevin Wolf {
3465edc8557SKevin Wolf #ifdef __linux__
3475edc8557SKevin Wolf struct statfs buf;
3485edc8557SKevin Wolf int ret;
3495edc8557SKevin Wolf
3505edc8557SKevin Wolf ret = fstatfs(fd, &buf);
3515edc8557SKevin Wolf if (ret == 0 && buf.f_type == NFS_SUPER_MAGIC) {
3525edc8557SKevin Wolf return true;
3535edc8557SKevin Wolf }
3545edc8557SKevin Wolf #endif
3555edc8557SKevin Wolf return false;
3565edc8557SKevin Wolf }
3575edc8557SKevin Wolf
raw_needs_alignment(BlockDriverState * bs)3585dbd0ce1SKevin Wolf static bool raw_needs_alignment(BlockDriverState *bs)
3595dbd0ce1SKevin Wolf {
3605dbd0ce1SKevin Wolf BDRVRawState *s = bs->opaque;
3615dbd0ce1SKevin Wolf
3625dbd0ce1SKevin Wolf if ((bs->open_flags & BDRV_O_NOCACHE) != 0 && !dio_byte_aligned(s->fd)) {
3635dbd0ce1SKevin Wolf return true;
3645dbd0ce1SKevin Wolf }
3655dbd0ce1SKevin Wolf
3665dbd0ce1SKevin Wolf return s->force_alignment;
3675dbd0ce1SKevin Wolf }
3685dbd0ce1SKevin Wolf
369c1bb86cdSEric Blake /* Check if read is allowed with given memory buffer and length.
370c1bb86cdSEric Blake *
371c1bb86cdSEric Blake * This function is used to check O_DIRECT memory buffer and request alignment.
372c1bb86cdSEric Blake */
raw_is_io_aligned(int fd,void * buf,size_t len)373c1bb86cdSEric Blake static bool raw_is_io_aligned(int fd, void *buf, size_t len)
374c1bb86cdSEric Blake {
375c1bb86cdSEric Blake ssize_t ret = pread(fd, buf, len, 0);
376c1bb86cdSEric Blake
377c1bb86cdSEric Blake if (ret >= 0) {
378c1bb86cdSEric Blake return true;
379c1bb86cdSEric Blake }
380c1bb86cdSEric Blake
381c1bb86cdSEric Blake #ifdef __linux__
382c1bb86cdSEric Blake /* The Linux kernel returns EINVAL for misaligned O_DIRECT reads. Ignore
383c1bb86cdSEric Blake * other errors (e.g. real I/O error), which could happen on a failed
384c1bb86cdSEric Blake * drive, since we only care about probing alignment.
385c1bb86cdSEric Blake */
386c1bb86cdSEric Blake if (errno != EINVAL) {
387c1bb86cdSEric Blake return true;
388c1bb86cdSEric Blake }
389c1bb86cdSEric Blake #endif
390c1bb86cdSEric Blake
391c1bb86cdSEric Blake return false;
392c1bb86cdSEric Blake }
393c1bb86cdSEric Blake
raw_probe_alignment(BlockDriverState * bs,int fd,Error ** errp)394c1bb86cdSEric Blake static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
395c1bb86cdSEric Blake {
396c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
397c1bb86cdSEric Blake char *buf;
3988e3b0cbbSMarc-André Lureau size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
399a6b257a0SNir Soffer size_t alignments[] = {1, 512, 1024, 2048, 4096};
400c1bb86cdSEric Blake
401c1bb86cdSEric Blake /* For SCSI generic devices the alignment is not really used.
402c1bb86cdSEric Blake With buffered I/O, we don't have any restrictions. */
403c1bb86cdSEric Blake if (bdrv_is_sg(bs) || !s->needs_alignment) {
404c1bb86cdSEric Blake bs->bl.request_alignment = 1;
405c1bb86cdSEric Blake s->buf_align = 1;
406c1bb86cdSEric Blake return;
407c1bb86cdSEric Blake }
408c1bb86cdSEric Blake
409c1bb86cdSEric Blake bs->bl.request_alignment = 0;
410c1bb86cdSEric Blake s->buf_align = 0;
411c1bb86cdSEric Blake /* Let's try to use the logical blocksize for the alignment. */
412c1bb86cdSEric Blake if (probe_logical_blocksize(fd, &bs->bl.request_alignment) < 0) {
413c1bb86cdSEric Blake bs->bl.request_alignment = 0;
414c1bb86cdSEric Blake }
415a5730b8bSThomas Huth
416a5730b8bSThomas Huth #ifdef __linux__
417a5730b8bSThomas Huth /*
418a5730b8bSThomas Huth * The XFS ioctl definitions are shipped in extra packages that might
419a5730b8bSThomas Huth * not always be available. Since we just need the XFS_IOC_DIOINFO ioctl
420a5730b8bSThomas Huth * here, we simply use our own definition instead:
421a5730b8bSThomas Huth */
422a5730b8bSThomas Huth struct xfs_dioattr {
423a5730b8bSThomas Huth uint32_t d_mem;
424a5730b8bSThomas Huth uint32_t d_miniosz;
425a5730b8bSThomas Huth uint32_t d_maxiosz;
426a5730b8bSThomas Huth } da;
427a5730b8bSThomas Huth if (ioctl(fd, _IOR('X', 30, struct xfs_dioattr), &da) >= 0) {
428c1bb86cdSEric Blake bs->bl.request_alignment = da.d_miniosz;
429c1bb86cdSEric Blake /* The kernel returns wrong information for d_mem */
430c1bb86cdSEric Blake /* s->buf_align = da.d_mem; */
431c1bb86cdSEric Blake }
432c1bb86cdSEric Blake #endif
433c1bb86cdSEric Blake
434a6b257a0SNir Soffer /*
435a6b257a0SNir Soffer * If we could not get the sizes so far, we can only guess them. First try
436a6b257a0SNir Soffer * to detect request alignment, since it is more likely to succeed. Then
437a6b257a0SNir Soffer * try to detect buf_align, which cannot be detected in some cases (e.g.
438a6b257a0SNir Soffer * Gluster). If buf_align cannot be detected, we fallback to the value of
439a6b257a0SNir Soffer * request_alignment.
440a6b257a0SNir Soffer */
441a6b257a0SNir Soffer
442a6b257a0SNir Soffer if (!bs->bl.request_alignment) {
443a6b257a0SNir Soffer int i;
444c1bb86cdSEric Blake size_t align;
445a6b257a0SNir Soffer buf = qemu_memalign(max_align, max_align);
446a6b257a0SNir Soffer for (i = 0; i < ARRAY_SIZE(alignments); i++) {
447a6b257a0SNir Soffer align = alignments[i];
448a6b257a0SNir Soffer if (raw_is_io_aligned(fd, buf, align)) {
449a6b257a0SNir Soffer /* Fallback to safe value. */
450a6b257a0SNir Soffer bs->bl.request_alignment = (align != 1) ? align : max_align;
451c1bb86cdSEric Blake break;
452c1bb86cdSEric Blake }
453c1bb86cdSEric Blake }
454c1bb86cdSEric Blake qemu_vfree(buf);
455c1bb86cdSEric Blake }
456c1bb86cdSEric Blake
457a6b257a0SNir Soffer if (!s->buf_align) {
458a6b257a0SNir Soffer int i;
459c1bb86cdSEric Blake size_t align;
460a6b257a0SNir Soffer buf = qemu_memalign(max_align, 2 * max_align);
461a6b257a0SNir Soffer for (i = 0; i < ARRAY_SIZE(alignments); i++) {
462a6b257a0SNir Soffer align = alignments[i];
463a6b257a0SNir Soffer if (raw_is_io_aligned(fd, buf + align, max_align)) {
464236094c7SStefan Hajnoczi /* Fallback to request_alignment. */
465a6b257a0SNir Soffer s->buf_align = (align != 1) ? align : bs->bl.request_alignment;
466c1bb86cdSEric Blake break;
467c1bb86cdSEric Blake }
468c1bb86cdSEric Blake }
469c1bb86cdSEric Blake qemu_vfree(buf);
470c1bb86cdSEric Blake }
471c1bb86cdSEric Blake
472c1bb86cdSEric Blake if (!s->buf_align || !bs->bl.request_alignment) {
473c1bb86cdSEric Blake error_setg(errp, "Could not find working O_DIRECT alignment");
474c1bb86cdSEric Blake error_append_hint(errp, "Try cache.direct=off\n");
475c1bb86cdSEric Blake }
476c1bb86cdSEric Blake }
477c1bb86cdSEric Blake
check_hdev_writable(int fd)478bca5283bSKevin Wolf static int check_hdev_writable(int fd)
47920eaf1bfSKevin Wolf {
48020eaf1bfSKevin Wolf #if defined(BLKROGET)
48120eaf1bfSKevin Wolf /* Linux block devices can be configured "read-only" using blockdev(8).
48220eaf1bfSKevin Wolf * This is independent of device node permissions and therefore open(2)
48320eaf1bfSKevin Wolf * with O_RDWR succeeds. Actual writes fail with EPERM.
48420eaf1bfSKevin Wolf *
48520eaf1bfSKevin Wolf * bdrv_open() is supposed to fail if the disk is read-only. Explicitly
48620eaf1bfSKevin Wolf * check for read-only block devices so that Linux block devices behave
48720eaf1bfSKevin Wolf * properly.
48820eaf1bfSKevin Wolf */
48920eaf1bfSKevin Wolf struct stat st;
49020eaf1bfSKevin Wolf int readonly = 0;
49120eaf1bfSKevin Wolf
492bca5283bSKevin Wolf if (fstat(fd, &st)) {
49320eaf1bfSKevin Wolf return -errno;
49420eaf1bfSKevin Wolf }
49520eaf1bfSKevin Wolf
49620eaf1bfSKevin Wolf if (!S_ISBLK(st.st_mode)) {
49720eaf1bfSKevin Wolf return 0;
49820eaf1bfSKevin Wolf }
49920eaf1bfSKevin Wolf
500bca5283bSKevin Wolf if (ioctl(fd, BLKROGET, &readonly) < 0) {
50120eaf1bfSKevin Wolf return -errno;
50220eaf1bfSKevin Wolf }
50320eaf1bfSKevin Wolf
50420eaf1bfSKevin Wolf if (readonly) {
50520eaf1bfSKevin Wolf return -EACCES;
50620eaf1bfSKevin Wolf }
50720eaf1bfSKevin Wolf #endif /* defined(BLKROGET) */
50820eaf1bfSKevin Wolf return 0;
50920eaf1bfSKevin Wolf }
51020eaf1bfSKevin Wolf
raw_parse_flags(int bdrv_flags,int * open_flags,bool has_writers)51123dece19SKevin Wolf static void raw_parse_flags(int bdrv_flags, int *open_flags, bool has_writers)
512c1bb86cdSEric Blake {
51323dece19SKevin Wolf bool read_write = false;
514c1bb86cdSEric Blake assert(open_flags != NULL);
515c1bb86cdSEric Blake
516c1bb86cdSEric Blake *open_flags |= O_BINARY;
517c1bb86cdSEric Blake *open_flags &= ~O_ACCMODE;
51823dece19SKevin Wolf
51923dece19SKevin Wolf if (bdrv_flags & BDRV_O_AUTO_RDONLY) {
52023dece19SKevin Wolf read_write = has_writers;
52123dece19SKevin Wolf } else if (bdrv_flags & BDRV_O_RDWR) {
52223dece19SKevin Wolf read_write = true;
52323dece19SKevin Wolf }
52423dece19SKevin Wolf
52523dece19SKevin Wolf if (read_write) {
526c1bb86cdSEric Blake *open_flags |= O_RDWR;
527c1bb86cdSEric Blake } else {
528c1bb86cdSEric Blake *open_flags |= O_RDONLY;
529c1bb86cdSEric Blake }
530c1bb86cdSEric Blake
531c1bb86cdSEric Blake /* Use O_DSYNC for write-through caching, no flags for write-back caching,
532c1bb86cdSEric Blake * and O_DIRECT for no caching. */
533c1bb86cdSEric Blake if ((bdrv_flags & BDRV_O_NOCACHE)) {
534c1bb86cdSEric Blake *open_flags |= O_DIRECT;
535c1bb86cdSEric Blake }
536c1bb86cdSEric Blake }
537c1bb86cdSEric Blake
raw_parse_filename(const char * filename,QDict * options,Error ** errp)538c1bb86cdSEric Blake static void raw_parse_filename(const char *filename, QDict *options,
539c1bb86cdSEric Blake Error **errp)
540c1bb86cdSEric Blake {
54103c320d8SMax Reitz bdrv_parse_filename_strip_prefix(filename, "file:", options);
542c1bb86cdSEric Blake }
543c1bb86cdSEric Blake
544c1bb86cdSEric Blake static QemuOptsList raw_runtime_opts = {
545c1bb86cdSEric Blake .name = "raw",
546c1bb86cdSEric Blake .head = QTAILQ_HEAD_INITIALIZER(raw_runtime_opts.head),
547c1bb86cdSEric Blake .desc = {
548c1bb86cdSEric Blake {
549c1bb86cdSEric Blake .name = "filename",
550c1bb86cdSEric Blake .type = QEMU_OPT_STRING,
551c1bb86cdSEric Blake .help = "File name of the image",
552c1bb86cdSEric Blake },
553c1bb86cdSEric Blake {
554c1bb86cdSEric Blake .name = "aio",
555c1bb86cdSEric Blake .type = QEMU_OPT_STRING,
556c6447510SAarushi Mehta .help = "host AIO implementation (threads, native, io_uring)",
557c1bb86cdSEric Blake },
55816b48d5dSFam Zheng {
559684960d4SStefano Garzarella .name = "aio-max-batch",
560684960d4SStefano Garzarella .type = QEMU_OPT_NUMBER,
561684960d4SStefano Garzarella .help = "AIO max batch size (0 = auto handled by AIO backend, default: 0)",
562684960d4SStefano Garzarella },
563684960d4SStefano Garzarella {
56416b48d5dSFam Zheng .name = "locking",
56516b48d5dSFam Zheng .type = QEMU_OPT_STRING,
56616b48d5dSFam Zheng .help = "file locking mode (on/off/auto, default: auto)",
56716b48d5dSFam Zheng },
5687c9e5276SPaolo Bonzini {
5697c9e5276SPaolo Bonzini .name = "pr-manager",
5707c9e5276SPaolo Bonzini .type = QEMU_OPT_STRING,
5717c9e5276SPaolo Bonzini .help = "id of persistent reservation manager object (default: none)",
5727c9e5276SPaolo Bonzini },
573f357fcd8SStefan Hajnoczi #if defined(__linux__)
574f357fcd8SStefan Hajnoczi {
575f357fcd8SStefan Hajnoczi .name = "drop-cache",
576f357fcd8SStefan Hajnoczi .type = QEMU_OPT_BOOL,
577f357fcd8SStefan Hajnoczi .help = "invalidate page cache during live migration (default: on)",
578f357fcd8SStefan Hajnoczi },
579f357fcd8SStefan Hajnoczi #endif
58031be8a2aSStefan Hajnoczi {
58131be8a2aSStefan Hajnoczi .name = "x-check-cache-dropped",
58231be8a2aSStefan Hajnoczi .type = QEMU_OPT_BOOL,
58331be8a2aSStefan Hajnoczi .help = "check that page cache was dropped on live migration (default: off)"
58431be8a2aSStefan Hajnoczi },
585c1bb86cdSEric Blake { /* end of list */ }
586c1bb86cdSEric Blake },
587c1bb86cdSEric Blake };
588c1bb86cdSEric Blake
5898a2ce0bcSAlberto Garcia static const char *const mutable_opts[] = { "x-check-cache-dropped", NULL };
5908a2ce0bcSAlberto Garcia
raw_open_common(BlockDriverState * bs,QDict * options,int bdrv_flags,int open_flags,bool device,Error ** errp)591c1bb86cdSEric Blake static int raw_open_common(BlockDriverState *bs, QDict *options,
592230ff739SJohn Snow int bdrv_flags, int open_flags,
593230ff739SJohn Snow bool device, Error **errp)
594c1bb86cdSEric Blake {
595c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
596c1bb86cdSEric Blake QemuOpts *opts;
597c1bb86cdSEric Blake Error *local_err = NULL;
598c1bb86cdSEric Blake const char *filename = NULL;
5997c9e5276SPaolo Bonzini const char *str;
600c1bb86cdSEric Blake BlockdevAioOptions aio, aio_default;
601c1bb86cdSEric Blake int fd, ret;
602c1bb86cdSEric Blake struct stat st;
603244a5668SFam Zheng OnOffAuto locking;
604c1bb86cdSEric Blake
605c1bb86cdSEric Blake opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort);
606af175e85SMarkus Armbruster if (!qemu_opts_absorb_qdict(opts, options, errp)) {
607c1bb86cdSEric Blake ret = -EINVAL;
608c1bb86cdSEric Blake goto fail;
609c1bb86cdSEric Blake }
610c1bb86cdSEric Blake
611c1bb86cdSEric Blake filename = qemu_opt_get(opts, "filename");
612c1bb86cdSEric Blake
613db0754dfSFam Zheng ret = raw_normalize_devicepath(&filename, errp);
614c1bb86cdSEric Blake if (ret != 0) {
615c1bb86cdSEric Blake goto fail;
616c1bb86cdSEric Blake }
617c1bb86cdSEric Blake
618c6447510SAarushi Mehta if (bdrv_flags & BDRV_O_NATIVE_AIO) {
619c6447510SAarushi Mehta aio_default = BLOCKDEV_AIO_OPTIONS_NATIVE;
620c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
621c6447510SAarushi Mehta } else if (bdrv_flags & BDRV_O_IO_URING) {
622c6447510SAarushi Mehta aio_default = BLOCKDEV_AIO_OPTIONS_IO_URING;
623c6447510SAarushi Mehta #endif
624c6447510SAarushi Mehta } else {
625c6447510SAarushi Mehta aio_default = BLOCKDEV_AIO_OPTIONS_THREADS;
626c6447510SAarushi Mehta }
627c6447510SAarushi Mehta
628f7abe0ecSMarc-André Lureau aio = qapi_enum_parse(&BlockdevAioOptions_lookup,
629f7abe0ecSMarc-André Lureau qemu_opt_get(opts, "aio"),
63006c60b6cSMarkus Armbruster aio_default, &local_err);
631c1bb86cdSEric Blake if (local_err) {
632c1bb86cdSEric Blake error_propagate(errp, local_err);
633c1bb86cdSEric Blake ret = -EINVAL;
634c1bb86cdSEric Blake goto fail;
635c1bb86cdSEric Blake }
636c6447510SAarushi Mehta
637c1bb86cdSEric Blake s->use_linux_aio = (aio == BLOCKDEV_AIO_OPTIONS_NATIVE);
638c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
639c6447510SAarushi Mehta s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING);
640c6447510SAarushi Mehta #endif
641c1bb86cdSEric Blake
642684960d4SStefano Garzarella s->aio_max_batch = qemu_opt_get_number(opts, "aio-max-batch", 0);
643684960d4SStefano Garzarella
644f7abe0ecSMarc-André Lureau locking = qapi_enum_parse(&OnOffAuto_lookup,
645f7abe0ecSMarc-André Lureau qemu_opt_get(opts, "locking"),
64606c60b6cSMarkus Armbruster ON_OFF_AUTO_AUTO, &local_err);
647244a5668SFam Zheng if (local_err) {
648244a5668SFam Zheng error_propagate(errp, local_err);
649244a5668SFam Zheng ret = -EINVAL;
650244a5668SFam Zheng goto fail;
651244a5668SFam Zheng }
652244a5668SFam Zheng switch (locking) {
653244a5668SFam Zheng case ON_OFF_AUTO_ON:
654244a5668SFam Zheng s->use_lock = true;
6552b218f5dSFam Zheng if (!qemu_has_ofd_lock()) {
656db0754dfSFam Zheng warn_report("File lock requested but OFD locking syscall is "
657db0754dfSFam Zheng "unavailable, falling back to POSIX file locks");
658db0754dfSFam Zheng error_printf("Due to the implementation, locks can be lost "
6592b218f5dSFam Zheng "unexpectedly.\n");
6602b218f5dSFam Zheng }
661244a5668SFam Zheng break;
662244a5668SFam Zheng case ON_OFF_AUTO_OFF:
663244a5668SFam Zheng s->use_lock = false;
664244a5668SFam Zheng break;
665244a5668SFam Zheng case ON_OFF_AUTO_AUTO:
6662b218f5dSFam Zheng s->use_lock = qemu_has_ofd_lock();
667244a5668SFam Zheng break;
668244a5668SFam Zheng default:
669244a5668SFam Zheng abort();
670244a5668SFam Zheng }
671244a5668SFam Zheng
6727c9e5276SPaolo Bonzini str = qemu_opt_get(opts, "pr-manager");
6737c9e5276SPaolo Bonzini if (str) {
6747c9e5276SPaolo Bonzini s->pr_mgr = pr_manager_lookup(str, &local_err);
6757c9e5276SPaolo Bonzini if (local_err) {
6767c9e5276SPaolo Bonzini error_propagate(errp, local_err);
6777c9e5276SPaolo Bonzini ret = -EINVAL;
6787c9e5276SPaolo Bonzini goto fail;
6797c9e5276SPaolo Bonzini }
6807c9e5276SPaolo Bonzini }
6817c9e5276SPaolo Bonzini
682f357fcd8SStefan Hajnoczi s->drop_cache = qemu_opt_get_bool(opts, "drop-cache", true);
68331be8a2aSStefan Hajnoczi s->check_cache_dropped = qemu_opt_get_bool(opts, "x-check-cache-dropped",
68431be8a2aSStefan Hajnoczi false);
68531be8a2aSStefan Hajnoczi
686c1bb86cdSEric Blake s->open_flags = open_flags;
68723dece19SKevin Wolf raw_parse_flags(bdrv_flags, &s->open_flags, false);
688c1bb86cdSEric Blake
689c1bb86cdSEric Blake s->fd = -1;
690b18a24a9SDaniel P. Berrangé fd = qemu_open(filename, s->open_flags, errp);
69164107dc0SKevin Wolf ret = fd < 0 ? -errno : 0;
69264107dc0SKevin Wolf
69364107dc0SKevin Wolf if (ret < 0) {
694c1bb86cdSEric Blake if (ret == -EROFS) {
695c1bb86cdSEric Blake ret = -EACCES;
696c1bb86cdSEric Blake }
697c1bb86cdSEric Blake goto fail;
698c1bb86cdSEric Blake }
699c1bb86cdSEric Blake s->fd = fd;
700c1bb86cdSEric Blake
701bca5283bSKevin Wolf /* Check s->open_flags rather than bdrv_flags due to auto-read-only */
702bca5283bSKevin Wolf if (s->open_flags & O_RDWR) {
703bca5283bSKevin Wolf ret = check_hdev_writable(s->fd);
704bca5283bSKevin Wolf if (ret < 0) {
705bca5283bSKevin Wolf error_setg_errno(errp, -ret, "The device is not writable");
706bca5283bSKevin Wolf goto fail;
707bca5283bSKevin Wolf }
708bca5283bSKevin Wolf }
709bca5283bSKevin Wolf
710244a5668SFam Zheng s->perm = 0;
711244a5668SFam Zheng s->shared_perm = BLK_PERM_ALL;
712244a5668SFam Zheng
713c1bb86cdSEric Blake #ifdef CONFIG_LINUX_AIO
714c1bb86cdSEric Blake /* Currently Linux does AIO only for files opened with O_DIRECT */
715*cd0c0db0SStefan Hajnoczi if (s->use_linux_aio && !(s->open_flags & O_DIRECT)) {
716c1bb86cdSEric Blake error_setg(errp, "aio=native was specified, but it requires "
717c1bb86cdSEric Blake "cache.direct=on, which was not specified.");
718c1bb86cdSEric Blake ret = -EINVAL;
719c1bb86cdSEric Blake goto fail;
720c1bb86cdSEric Blake }
721c1bb86cdSEric Blake #else
722c1bb86cdSEric Blake if (s->use_linux_aio) {
723c1bb86cdSEric Blake error_setg(errp, "aio=native was specified, but is not supported "
724c1bb86cdSEric Blake "in this build.");
725c1bb86cdSEric Blake ret = -EINVAL;
726c1bb86cdSEric Blake goto fail;
727c1bb86cdSEric Blake }
728c1bb86cdSEric Blake #endif /* !defined(CONFIG_LINUX_AIO) */
729c1bb86cdSEric Blake
730*cd0c0db0SStefan Hajnoczi #ifndef CONFIG_LINUX_IO_URING
731c6447510SAarushi Mehta if (s->use_linux_io_uring) {
732c6447510SAarushi Mehta error_setg(errp, "aio=io_uring was specified, but is not supported "
733c6447510SAarushi Mehta "in this build.");
734c6447510SAarushi Mehta ret = -EINVAL;
735c6447510SAarushi Mehta goto fail;
736c6447510SAarushi Mehta }
737c6447510SAarushi Mehta #endif /* !defined(CONFIG_LINUX_IO_URING) */
738c6447510SAarushi Mehta
739c1bb86cdSEric Blake s->has_discard = true;
740c1bb86cdSEric Blake s->has_write_zeroes = true;
741c1bb86cdSEric Blake
742c1bb86cdSEric Blake if (fstat(s->fd, &st) < 0) {
743c1bb86cdSEric Blake ret = -errno;
744c1bb86cdSEric Blake error_setg_errno(errp, errno, "Could not stat file");
745c1bb86cdSEric Blake goto fail;
746c1bb86cdSEric Blake }
747230ff739SJohn Snow
748230ff739SJohn Snow if (!device) {
7498d17adf3SDaniel P. Berrangé if (!S_ISREG(st.st_mode)) {
7508d17adf3SDaniel P. Berrangé error_setg(errp, "'%s' driver requires '%s' to be a regular file",
7518d17adf3SDaniel P. Berrangé bs->drv->format_name, bs->filename);
752230ff739SJohn Snow ret = -EINVAL;
753230ff739SJohn Snow goto fail;
754230ff739SJohn Snow } else {
755c1bb86cdSEric Blake s->has_fallocate = true;
756c1bb86cdSEric Blake }
757230ff739SJohn Snow } else {
758230ff739SJohn Snow if (!(S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) {
7598d17adf3SDaniel P. Berrangé error_setg(errp, "'%s' driver requires '%s' to be either "
7608d17adf3SDaniel P. Berrangé "a character or block device",
7618d17adf3SDaniel P. Berrangé bs->drv->format_name, bs->filename);
762230ff739SJohn Snow ret = -EINVAL;
763230ff739SJohn Snow goto fail;
764230ff739SJohn Snow }
765230ff739SJohn Snow }
766774c726cSSam Li #ifdef CONFIG_BLKZONED
767774c726cSSam Li /*
768774c726cSSam Li * The kernel page cache does not reliably work for writes to SWR zones
769774c726cSSam Li * of zoned block device because it can not guarantee the order of writes.
770774c726cSSam Li */
771774c726cSSam Li if ((bs->bl.zoned != BLK_Z_NONE) &&
772774c726cSSam Li (!(s->open_flags & O_DIRECT))) {
773774c726cSSam Li error_setg(errp, "The driver supports zoned devices, and it requires "
774774c726cSSam Li "cache.direct=on, which was not specified.");
775774c726cSSam Li return -EINVAL; /* No host kernel page cache */
776774c726cSSam Li }
777774c726cSSam Li #endif
778230ff739SJohn Snow
779c1bb86cdSEric Blake if (S_ISBLK(st.st_mode)) {
780c1bb86cdSEric Blake #ifdef __linux__
781c1bb86cdSEric Blake /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do
782c1bb86cdSEric Blake * not rely on the contents of discarded blocks unless using O_DIRECT.
783c1bb86cdSEric Blake * Same for BLKZEROOUT.
784c1bb86cdSEric Blake */
785c1bb86cdSEric Blake if (!(bs->open_flags & BDRV_O_NOCACHE)) {
786c1bb86cdSEric Blake s->has_write_zeroes = false;
787c1bb86cdSEric Blake }
788c1bb86cdSEric Blake #endif
789c1bb86cdSEric Blake }
790c1bb86cdSEric Blake #ifdef __FreeBSD__
791c1bb86cdSEric Blake if (S_ISCHR(st.st_mode)) {
792c1bb86cdSEric Blake /*
793c1bb86cdSEric Blake * The file is a char device (disk), which on FreeBSD isn't behind
794c1bb86cdSEric Blake * a pager, so force all requests to be aligned. This is needed
795c1bb86cdSEric Blake * so QEMU makes sure all IO operations on the device are aligned
796c1bb86cdSEric Blake * to sector size, or else FreeBSD will reject them with EINVAL.
797c1bb86cdSEric Blake */
7985dbd0ce1SKevin Wolf s->force_alignment = true;
799c1bb86cdSEric Blake }
800c1bb86cdSEric Blake #endif
8015dbd0ce1SKevin Wolf s->needs_alignment = raw_needs_alignment(bs);
802c1bb86cdSEric Blake
803738301e1SKevin Wolf bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK;
8042f0c6e7aSKevin Wolf if (S_ISREG(st.st_mode)) {
8052f0c6e7aSKevin Wolf /* When extending regular files, we get zeros from the OS */
8062f0c6e7aSKevin Wolf bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE;
8072f0c6e7aSKevin Wolf }
808c1bb86cdSEric Blake ret = 0;
809c1bb86cdSEric Blake fail:
810a8c5cf27SKevin Wolf if (ret < 0 && s->fd != -1) {
811a8c5cf27SKevin Wolf qemu_close(s->fd);
812a8c5cf27SKevin Wolf }
813c1bb86cdSEric Blake if (filename && (bdrv_flags & BDRV_O_TEMPORARY)) {
814c1bb86cdSEric Blake unlink(filename);
815c1bb86cdSEric Blake }
816c1bb86cdSEric Blake qemu_opts_del(opts);
817c1bb86cdSEric Blake return ret;
818c1bb86cdSEric Blake }
819c1bb86cdSEric Blake
raw_open(BlockDriverState * bs,QDict * options,int flags,Error ** errp)820c1bb86cdSEric Blake static int raw_open(BlockDriverState *bs, QDict *options, int flags,
821c1bb86cdSEric Blake Error **errp)
822c1bb86cdSEric Blake {
823c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
824c1bb86cdSEric Blake
825c1bb86cdSEric Blake s->type = FTYPE_FILE;
826230ff739SJohn Snow return raw_open_common(bs, options, flags, 0, false, errp);
827c1bb86cdSEric Blake }
828c1bb86cdSEric Blake
829244a5668SFam Zheng typedef enum {
830244a5668SFam Zheng RAW_PL_PREPARE,
831244a5668SFam Zheng RAW_PL_COMMIT,
832244a5668SFam Zheng RAW_PL_ABORT,
833244a5668SFam Zheng } RawPermLockOp;
834244a5668SFam Zheng
835244a5668SFam Zheng #define PERM_FOREACH(i) \
836244a5668SFam Zheng for ((i) = 0; (1ULL << (i)) <= BLK_PERM_ALL; i++)
837244a5668SFam Zheng
838244a5668SFam Zheng /* Lock bytes indicated by @perm_lock_bits and @shared_perm_lock_bits in the
839244a5668SFam Zheng * file; if @unlock == true, also unlock the unneeded bytes.
840244a5668SFam Zheng * @shared_perm_lock_bits is the mask of all permissions that are NOT shared.
841244a5668SFam Zheng */
raw_apply_lock_bytes(BDRVRawState * s,int fd,uint64_t perm_lock_bits,uint64_t shared_perm_lock_bits,bool unlock,Error ** errp)8422996ffadSFam Zheng static int raw_apply_lock_bytes(BDRVRawState *s, int fd,
843244a5668SFam Zheng uint64_t perm_lock_bits,
844244a5668SFam Zheng uint64_t shared_perm_lock_bits,
845244a5668SFam Zheng bool unlock, Error **errp)
846244a5668SFam Zheng {
847244a5668SFam Zheng int ret;
848244a5668SFam Zheng int i;
8492996ffadSFam Zheng uint64_t locked_perm, locked_shared_perm;
8502996ffadSFam Zheng
8512996ffadSFam Zheng if (s) {
8522996ffadSFam Zheng locked_perm = s->locked_perm;
8532996ffadSFam Zheng locked_shared_perm = s->locked_shared_perm;
8542996ffadSFam Zheng } else {
8552996ffadSFam Zheng /*
8562996ffadSFam Zheng * We don't have the previous bits, just lock/unlock for each of the
8572996ffadSFam Zheng * requested bits.
8582996ffadSFam Zheng */
8592996ffadSFam Zheng if (unlock) {
8602996ffadSFam Zheng locked_perm = BLK_PERM_ALL;
8612996ffadSFam Zheng locked_shared_perm = BLK_PERM_ALL;
8622996ffadSFam Zheng } else {
8632996ffadSFam Zheng locked_perm = 0;
8642996ffadSFam Zheng locked_shared_perm = 0;
8652996ffadSFam Zheng }
8662996ffadSFam Zheng }
867244a5668SFam Zheng
868244a5668SFam Zheng PERM_FOREACH(i) {
869244a5668SFam Zheng int off = RAW_LOCK_PERM_BASE + i;
8702996ffadSFam Zheng uint64_t bit = (1ULL << i);
8712996ffadSFam Zheng if ((perm_lock_bits & bit) && !(locked_perm & bit)) {
872d0a96155SMax Reitz ret = qemu_lock_fd(fd, off, 1, false);
873244a5668SFam Zheng if (ret) {
874797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret, "Failed to lock byte %d",
875797e3e38SDavid Edmondson off);
876244a5668SFam Zheng return ret;
8772996ffadSFam Zheng } else if (s) {
8782996ffadSFam Zheng s->locked_perm |= bit;
879244a5668SFam Zheng }
8802996ffadSFam Zheng } else if (unlock && (locked_perm & bit) && !(perm_lock_bits & bit)) {
881d0a96155SMax Reitz ret = qemu_unlock_fd(fd, off, 1);
882244a5668SFam Zheng if (ret) {
883797e3e38SDavid Edmondson error_setg_errno(errp, -ret, "Failed to unlock byte %d", off);
884244a5668SFam Zheng return ret;
8852996ffadSFam Zheng } else if (s) {
8862996ffadSFam Zheng s->locked_perm &= ~bit;
887244a5668SFam Zheng }
888244a5668SFam Zheng }
889244a5668SFam Zheng }
890244a5668SFam Zheng PERM_FOREACH(i) {
891244a5668SFam Zheng int off = RAW_LOCK_SHARED_BASE + i;
8922996ffadSFam Zheng uint64_t bit = (1ULL << i);
8932996ffadSFam Zheng if ((shared_perm_lock_bits & bit) && !(locked_shared_perm & bit)) {
894d0a96155SMax Reitz ret = qemu_lock_fd(fd, off, 1, false);
895244a5668SFam Zheng if (ret) {
896797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret, "Failed to lock byte %d",
897797e3e38SDavid Edmondson off);
898244a5668SFam Zheng return ret;
8992996ffadSFam Zheng } else if (s) {
9002996ffadSFam Zheng s->locked_shared_perm |= bit;
901244a5668SFam Zheng }
9022996ffadSFam Zheng } else if (unlock && (locked_shared_perm & bit) &&
9032996ffadSFam Zheng !(shared_perm_lock_bits & bit)) {
904d0a96155SMax Reitz ret = qemu_unlock_fd(fd, off, 1);
905244a5668SFam Zheng if (ret) {
906797e3e38SDavid Edmondson error_setg_errno(errp, -ret, "Failed to unlock byte %d", off);
907244a5668SFam Zheng return ret;
9082996ffadSFam Zheng } else if (s) {
9092996ffadSFam Zheng s->locked_shared_perm &= ~bit;
910244a5668SFam Zheng }
911244a5668SFam Zheng }
912244a5668SFam Zheng }
913244a5668SFam Zheng return 0;
914244a5668SFam Zheng }
915244a5668SFam Zheng
916244a5668SFam Zheng /* Check "unshared" bytes implied by @perm and ~@shared_perm in the file. */
raw_check_lock_bytes(int fd,uint64_t perm,uint64_t shared_perm,Error ** errp)917d0a96155SMax Reitz static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm,
918244a5668SFam Zheng Error **errp)
919244a5668SFam Zheng {
920244a5668SFam Zheng int ret;
921244a5668SFam Zheng int i;
922244a5668SFam Zheng
923244a5668SFam Zheng PERM_FOREACH(i) {
924244a5668SFam Zheng int off = RAW_LOCK_SHARED_BASE + i;
925244a5668SFam Zheng uint64_t p = 1ULL << i;
926244a5668SFam Zheng if (perm & p) {
927d0a96155SMax Reitz ret = qemu_lock_fd_test(fd, off, 1, true);
928244a5668SFam Zheng if (ret) {
929244a5668SFam Zheng char *perm_name = bdrv_perm_names(p);
930797e3e38SDavid Edmondson
931797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret,
932244a5668SFam Zheng "Failed to get \"%s\" lock",
933244a5668SFam Zheng perm_name);
934244a5668SFam Zheng g_free(perm_name);
935244a5668SFam Zheng return ret;
936244a5668SFam Zheng }
937244a5668SFam Zheng }
938244a5668SFam Zheng }
939244a5668SFam Zheng PERM_FOREACH(i) {
940244a5668SFam Zheng int off = RAW_LOCK_PERM_BASE + i;
941244a5668SFam Zheng uint64_t p = 1ULL << i;
942244a5668SFam Zheng if (!(shared_perm & p)) {
943d0a96155SMax Reitz ret = qemu_lock_fd_test(fd, off, 1, true);
944244a5668SFam Zheng if (ret) {
945244a5668SFam Zheng char *perm_name = bdrv_perm_names(p);
946797e3e38SDavid Edmondson
947797e3e38SDavid Edmondson raw_lock_error_setg_errno(errp, -ret,
948244a5668SFam Zheng "Failed to get shared \"%s\" lock",
949244a5668SFam Zheng perm_name);
950244a5668SFam Zheng g_free(perm_name);
951244a5668SFam Zheng return ret;
952244a5668SFam Zheng }
953244a5668SFam Zheng }
954244a5668SFam Zheng }
955244a5668SFam Zheng return 0;
956244a5668SFam Zheng }
957244a5668SFam Zheng
raw_handle_perm_lock(BlockDriverState * bs,RawPermLockOp op,uint64_t new_perm,uint64_t new_shared,Error ** errp)958244a5668SFam Zheng static int raw_handle_perm_lock(BlockDriverState *bs,
959244a5668SFam Zheng RawPermLockOp op,
960244a5668SFam Zheng uint64_t new_perm, uint64_t new_shared,
961244a5668SFam Zheng Error **errp)
962244a5668SFam Zheng {
963244a5668SFam Zheng BDRVRawState *s = bs->opaque;
964244a5668SFam Zheng int ret = 0;
965244a5668SFam Zheng Error *local_err = NULL;
966244a5668SFam Zheng
967244a5668SFam Zheng if (!s->use_lock) {
968244a5668SFam Zheng return 0;
969244a5668SFam Zheng }
970244a5668SFam Zheng
971244a5668SFam Zheng if (bdrv_get_flags(bs) & BDRV_O_INACTIVE) {
972244a5668SFam Zheng return 0;
973244a5668SFam Zheng }
974244a5668SFam Zheng
975244a5668SFam Zheng switch (op) {
976244a5668SFam Zheng case RAW_PL_PREPARE:
977696aaaedSVladimir Sementsov-Ogievskiy if ((s->perm | new_perm) == s->perm &&
978696aaaedSVladimir Sementsov-Ogievskiy (s->shared_perm & new_shared) == s->shared_perm)
979696aaaedSVladimir Sementsov-Ogievskiy {
980696aaaedSVladimir Sementsov-Ogievskiy /*
981696aaaedSVladimir Sementsov-Ogievskiy * We are going to unlock bytes, it should not fail. If it fail due
982696aaaedSVladimir Sementsov-Ogievskiy * to some fs-dependent permission-unrelated reasons (which occurs
983696aaaedSVladimir Sementsov-Ogievskiy * sometimes on NFS and leads to abort in bdrv_replace_child) we
984696aaaedSVladimir Sementsov-Ogievskiy * can't prevent such errors by any check here. And we ignore them
985696aaaedSVladimir Sementsov-Ogievskiy * anyway in ABORT and COMMIT.
986696aaaedSVladimir Sementsov-Ogievskiy */
987696aaaedSVladimir Sementsov-Ogievskiy return 0;
988696aaaedSVladimir Sementsov-Ogievskiy }
989f2e3af29SFam Zheng ret = raw_apply_lock_bytes(s, s->fd, s->perm | new_perm,
990244a5668SFam Zheng ~s->shared_perm | ~new_shared,
991244a5668SFam Zheng false, errp);
992244a5668SFam Zheng if (!ret) {
993f2e3af29SFam Zheng ret = raw_check_lock_bytes(s->fd, new_perm, new_shared, errp);
994244a5668SFam Zheng if (!ret) {
995244a5668SFam Zheng return 0;
996244a5668SFam Zheng }
997b857431dSFam Zheng error_append_hint(errp,
998b857431dSFam Zheng "Is another process using the image [%s]?\n",
999b857431dSFam Zheng bs->filename);
1000244a5668SFam Zheng }
1001244a5668SFam Zheng /* fall through to unlock bytes. */
1002244a5668SFam Zheng case RAW_PL_ABORT:
1003f2e3af29SFam Zheng raw_apply_lock_bytes(s, s->fd, s->perm, ~s->shared_perm,
1004d0a96155SMax Reitz true, &local_err);
1005244a5668SFam Zheng if (local_err) {
1006244a5668SFam Zheng /* Theoretically the above call only unlocks bytes and it cannot
1007244a5668SFam Zheng * fail. Something weird happened, report it.
1008244a5668SFam Zheng */
1009db0754dfSFam Zheng warn_report_err(local_err);
1010244a5668SFam Zheng }
1011244a5668SFam Zheng break;
1012244a5668SFam Zheng case RAW_PL_COMMIT:
1013f2e3af29SFam Zheng raw_apply_lock_bytes(s, s->fd, new_perm, ~new_shared,
1014d0a96155SMax Reitz true, &local_err);
1015244a5668SFam Zheng if (local_err) {
1016244a5668SFam Zheng /* Theoretically the above call only unlocks bytes and it cannot
1017244a5668SFam Zheng * fail. Something weird happened, report it.
1018244a5668SFam Zheng */
1019db0754dfSFam Zheng warn_report_err(local_err);
1020244a5668SFam Zheng }
1021244a5668SFam Zheng break;
1022244a5668SFam Zheng }
1023244a5668SFam Zheng return ret;
1024244a5668SFam Zheng }
1025244a5668SFam Zheng
1026ad24b679SMarc-André Lureau /* Sets a specific flag */
fcntl_setfl(int fd,int flag)1027ad24b679SMarc-André Lureau static int fcntl_setfl(int fd, int flag)
1028ad24b679SMarc-André Lureau {
1029ad24b679SMarc-André Lureau int flags;
1030ad24b679SMarc-André Lureau
1031ad24b679SMarc-André Lureau flags = fcntl(fd, F_GETFL);
1032ad24b679SMarc-André Lureau if (flags == -1) {
1033ad24b679SMarc-André Lureau return -errno;
1034ad24b679SMarc-André Lureau }
1035ad24b679SMarc-André Lureau if (fcntl(fd, F_SETFL, flags | flag) == -1) {
1036ad24b679SMarc-André Lureau return -errno;
1037ad24b679SMarc-André Lureau }
1038ad24b679SMarc-André Lureau return 0;
1039ad24b679SMarc-André Lureau }
1040ad24b679SMarc-André Lureau
raw_reconfigure_getfd(BlockDriverState * bs,int flags,int * open_flags,uint64_t perm,bool force_dup,Error ** errp)10415cec2870SKevin Wolf static int raw_reconfigure_getfd(BlockDriverState *bs, int flags,
104223dece19SKevin Wolf int *open_flags, uint64_t perm, bool force_dup,
10436ceabe6fSKevin Wolf Error **errp)
10445cec2870SKevin Wolf {
10455cec2870SKevin Wolf BDRVRawState *s = bs->opaque;
10465cec2870SKevin Wolf int fd = -1;
10475cec2870SKevin Wolf int ret;
104823dece19SKevin Wolf bool has_writers = perm &
104923dece19SKevin Wolf (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_RESIZE);
10505cec2870SKevin Wolf int fcntl_flags = O_APPEND | O_NONBLOCK;
10515cec2870SKevin Wolf #ifdef O_NOATIME
10525cec2870SKevin Wolf fcntl_flags |= O_NOATIME;
10535cec2870SKevin Wolf #endif
10545cec2870SKevin Wolf
10555cec2870SKevin Wolf *open_flags = 0;
10565cec2870SKevin Wolf if (s->type == FTYPE_CD) {
10575cec2870SKevin Wolf *open_flags |= O_NONBLOCK;
10585cec2870SKevin Wolf }
10595cec2870SKevin Wolf
106023dece19SKevin Wolf raw_parse_flags(flags, open_flags, has_writers);
10615cec2870SKevin Wolf
10625cec2870SKevin Wolf #ifdef O_ASYNC
10635cec2870SKevin Wolf /* Not all operating systems have O_ASYNC, and those that don't
10645cec2870SKevin Wolf * will not let us track the state into rs->open_flags (typically
10655cec2870SKevin Wolf * you achieve the same effect with an ioctl, for example I_SETSIG
10665cec2870SKevin Wolf * on Solaris). But we do not use O_ASYNC, so that's fine.
10675cec2870SKevin Wolf */
10685cec2870SKevin Wolf assert((s->open_flags & O_ASYNC) == 0);
10695cec2870SKevin Wolf #endif
10705cec2870SKevin Wolf
10716ceabe6fSKevin Wolf if (!force_dup && *open_flags == s->open_flags) {
10726ceabe6fSKevin Wolf /* We're lucky, the existing fd is fine */
10736ceabe6fSKevin Wolf return s->fd;
10746ceabe6fSKevin Wolf }
10756ceabe6fSKevin Wolf
10765cec2870SKevin Wolf if ((*open_flags & ~fcntl_flags) == (s->open_flags & ~fcntl_flags)) {
10775cec2870SKevin Wolf /* dup the original fd */
10785cec2870SKevin Wolf fd = qemu_dup(s->fd);
10795cec2870SKevin Wolf if (fd >= 0) {
10805cec2870SKevin Wolf ret = fcntl_setfl(fd, *open_flags);
10815cec2870SKevin Wolf if (ret) {
10825cec2870SKevin Wolf qemu_close(fd);
10835cec2870SKevin Wolf fd = -1;
10845cec2870SKevin Wolf }
10855cec2870SKevin Wolf }
10865cec2870SKevin Wolf }
10875cec2870SKevin Wolf
1088b18a24a9SDaniel P. Berrangé /* If we cannot use fcntl, or fcntl failed, fall back to qemu_open() */
10895cec2870SKevin Wolf if (fd == -1) {
10905cec2870SKevin Wolf const char *normalized_filename = bs->filename;
10915cec2870SKevin Wolf ret = raw_normalize_devicepath(&normalized_filename, errp);
10925cec2870SKevin Wolf if (ret >= 0) {
1093b18a24a9SDaniel P. Berrangé fd = qemu_open(normalized_filename, *open_flags, errp);
10945cec2870SKevin Wolf if (fd == -1) {
10955cec2870SKevin Wolf return -1;
10965cec2870SKevin Wolf }
10975cec2870SKevin Wolf }
10985cec2870SKevin Wolf }
10995cec2870SKevin Wolf
1100bca5283bSKevin Wolf if (fd != -1 && (*open_flags & O_RDWR)) {
1101bca5283bSKevin Wolf ret = check_hdev_writable(fd);
1102bca5283bSKevin Wolf if (ret < 0) {
1103bca5283bSKevin Wolf qemu_close(fd);
1104bca5283bSKevin Wolf error_setg_errno(errp, -ret, "The device is not writable");
1105bca5283bSKevin Wolf return -1;
1106bca5283bSKevin Wolf }
1107bca5283bSKevin Wolf }
1108bca5283bSKevin Wolf
11095cec2870SKevin Wolf return fd;
11105cec2870SKevin Wolf }
11115cec2870SKevin Wolf
raw_reopen_prepare(BDRVReopenState * state,BlockReopenQueue * queue,Error ** errp)1112c1bb86cdSEric Blake static int raw_reopen_prepare(BDRVReopenState *state,
1113c1bb86cdSEric Blake BlockReopenQueue *queue, Error **errp)
1114c1bb86cdSEric Blake {
1115c1bb86cdSEric Blake BDRVRawState *s;
1116c1bb86cdSEric Blake BDRVRawReopenState *rs;
111731be8a2aSStefan Hajnoczi QemuOpts *opts;
1118a6aeca0cSKevin Wolf int ret;
1119c1bb86cdSEric Blake
1120c1bb86cdSEric Blake assert(state != NULL);
1121c1bb86cdSEric Blake assert(state->bs != NULL);
1122c1bb86cdSEric Blake
1123c1bb86cdSEric Blake s = state->bs->opaque;
1124c1bb86cdSEric Blake
1125c1bb86cdSEric Blake state->opaque = g_new0(BDRVRawReopenState, 1);
1126c1bb86cdSEric Blake rs = state->opaque;
112731be8a2aSStefan Hajnoczi
112831be8a2aSStefan Hajnoczi /* Handle options changes */
112931be8a2aSStefan Hajnoczi opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort);
1130af175e85SMarkus Armbruster if (!qemu_opts_absorb_qdict(opts, state->options, errp)) {
113131be8a2aSStefan Hajnoczi ret = -EINVAL;
113231be8a2aSStefan Hajnoczi goto out;
113331be8a2aSStefan Hajnoczi }
113431be8a2aSStefan Hajnoczi
1135f357fcd8SStefan Hajnoczi rs->drop_cache = qemu_opt_get_bool_del(opts, "drop-cache", true);
11368d324575SAlberto Garcia rs->check_cache_dropped =
11378d324575SAlberto Garcia qemu_opt_get_bool_del(opts, "x-check-cache-dropped", false);
11388d324575SAlberto Garcia
11398d324575SAlberto Garcia /* This driver's reopen function doesn't currently allow changing
11408d324575SAlberto Garcia * other options, so let's put them back in the original QDict and
11418d324575SAlberto Garcia * bdrv_reopen_prepare() will detect changes and complain. */
11428d324575SAlberto Garcia qemu_opts_to_qdict(opts, state->options);
1143c1bb86cdSEric Blake
114472373e40SVladimir Sementsov-Ogievskiy /*
114572373e40SVladimir Sementsov-Ogievskiy * As part of reopen prepare we also want to create new fd by
114672373e40SVladimir Sementsov-Ogievskiy * raw_reconfigure_getfd(). But it wants updated "perm", when in
114772373e40SVladimir Sementsov-Ogievskiy * bdrv_reopen_multiple() .bdrv_reopen_prepare() callback called prior to
11483202d8e4SMichael Tokarev * permission update. Happily, permission update is always a part
11493202d8e4SMichael Tokarev * (a separate stage) of bdrv_reopen_multiple() so we can rely on this
11503202d8e4SMichael Tokarev * fact and reconfigure fd in raw_check_perm().
115172373e40SVladimir Sementsov-Ogievskiy */
1152c1bb86cdSEric Blake
1153e0c9cf3aSKevin Wolf s->reopen_state = state;
1154a6aeca0cSKevin Wolf ret = 0;
115572373e40SVladimir Sementsov-Ogievskiy
115631be8a2aSStefan Hajnoczi out:
115731be8a2aSStefan Hajnoczi qemu_opts_del(opts);
1158c1bb86cdSEric Blake return ret;
1159c1bb86cdSEric Blake }
1160c1bb86cdSEric Blake
raw_reopen_commit(BDRVReopenState * state)1161c1bb86cdSEric Blake static void raw_reopen_commit(BDRVReopenState *state)
1162c1bb86cdSEric Blake {
1163c1bb86cdSEric Blake BDRVRawReopenState *rs = state->opaque;
1164c1bb86cdSEric Blake BDRVRawState *s = state->bs->opaque;
1165c1bb86cdSEric Blake
1166f357fcd8SStefan Hajnoczi s->drop_cache = rs->drop_cache;
116731be8a2aSStefan Hajnoczi s->check_cache_dropped = rs->check_cache_dropped;
1168c1bb86cdSEric Blake s->open_flags = rs->open_flags;
1169c1bb86cdSEric Blake g_free(state->opaque);
1170c1bb86cdSEric Blake state->opaque = NULL;
1171e0c9cf3aSKevin Wolf
1172e0c9cf3aSKevin Wolf assert(s->reopen_state == state);
1173e0c9cf3aSKevin Wolf s->reopen_state = NULL;
1174c1bb86cdSEric Blake }
1175c1bb86cdSEric Blake
1176c1bb86cdSEric Blake
raw_reopen_abort(BDRVReopenState * state)1177c1bb86cdSEric Blake static void raw_reopen_abort(BDRVReopenState *state)
1178c1bb86cdSEric Blake {
1179c1bb86cdSEric Blake BDRVRawReopenState *rs = state->opaque;
1180e0c9cf3aSKevin Wolf BDRVRawState *s = state->bs->opaque;
1181c1bb86cdSEric Blake
1182c1bb86cdSEric Blake /* nothing to do if NULL, we didn't get far enough */
1183c1bb86cdSEric Blake if (rs == NULL) {
1184c1bb86cdSEric Blake return;
1185c1bb86cdSEric Blake }
1186c1bb86cdSEric Blake
1187c1bb86cdSEric Blake g_free(state->opaque);
1188c1bb86cdSEric Blake state->opaque = NULL;
1189e0c9cf3aSKevin Wolf
1190e0c9cf3aSKevin Wolf assert(s->reopen_state == state);
1191e0c9cf3aSKevin Wolf s->reopen_state = NULL;
1192c1bb86cdSEric Blake }
1193c1bb86cdSEric Blake
hdev_get_max_hw_transfer(int fd,struct stat * st)119418473467SPaolo Bonzini static int hdev_get_max_hw_transfer(int fd, struct stat *st)
1195c1bb86cdSEric Blake {
1196c1bb86cdSEric Blake #ifdef BLKSECTGET
119718473467SPaolo Bonzini if (S_ISBLK(st->st_mode)) {
119818473467SPaolo Bonzini unsigned short max_sectors = 0;
119918473467SPaolo Bonzini if (ioctl(fd, BLKSECTGET, &max_sectors) == 0) {
120018473467SPaolo Bonzini return max_sectors * 512;
120118473467SPaolo Bonzini }
120218473467SPaolo Bonzini } else {
120348265250SEric Farman int max_bytes = 0;
1204867eccfeSMaxim Levitsky if (ioctl(fd, BLKSECTGET, &max_bytes) == 0) {
120548265250SEric Farman return max_bytes;
1206c1bb86cdSEric Blake }
120718473467SPaolo Bonzini }
120818473467SPaolo Bonzini return -errno;
1209c1bb86cdSEric Blake #else
1210c1bb86cdSEric Blake return -ENOSYS;
1211c1bb86cdSEric Blake #endif
1212c1bb86cdSEric Blake }
1213c1bb86cdSEric Blake
1214a735b56eSSam Li /*
1215a735b56eSSam Li * Get a sysfs attribute value as character string.
1216a735b56eSSam Li */
1217a735b56eSSam Li #ifdef CONFIG_LINUX
get_sysfs_str_val(struct stat * st,const char * attribute,char ** val)1218a735b56eSSam Li static int get_sysfs_str_val(struct stat *st, const char *attribute,
1219a735b56eSSam Li char **val) {
1220a735b56eSSam Li g_autofree char *sysfspath = NULL;
1221a735b56eSSam Li size_t len;
1222a735b56eSSam Li
1223a735b56eSSam Li if (!S_ISBLK(st->st_mode)) {
1224a735b56eSSam Li return -ENOTSUP;
1225a735b56eSSam Li }
1226a735b56eSSam Li
1227a735b56eSSam Li sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/%s",
1228a735b56eSSam Li major(st->st_rdev), minor(st->st_rdev),
1229a735b56eSSam Li attribute);
123029a242e1SSam Li if (!g_file_get_contents(sysfspath, val, &len, NULL)) {
1231a735b56eSSam Li return -ENOENT;
1232a735b56eSSam Li }
1233a735b56eSSam Li
1234a735b56eSSam Li /* The file is ended with '\n' */
1235a735b56eSSam Li char *p;
1236a735b56eSSam Li p = *val;
1237a735b56eSSam Li if (*(p + len - 1) == '\n') {
1238a735b56eSSam Li *(p + len - 1) = '\0';
1239a735b56eSSam Li }
124029a242e1SSam Li return 0;
1241a735b56eSSam Li }
1242a735b56eSSam Li #endif
1243a735b56eSSam Li
12446d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
get_sysfs_zoned_model(struct stat * st,BlockZoneModel * zoned)1245a735b56eSSam Li static int get_sysfs_zoned_model(struct stat *st, BlockZoneModel *zoned)
1246a735b56eSSam Li {
1247a735b56eSSam Li g_autofree char *val = NULL;
1248a735b56eSSam Li int ret;
1249a735b56eSSam Li
1250a735b56eSSam Li ret = get_sysfs_str_val(st, "zoned", &val);
1251a735b56eSSam Li if (ret < 0) {
1252a735b56eSSam Li return ret;
1253a735b56eSSam Li }
1254a735b56eSSam Li
1255a735b56eSSam Li if (strcmp(val, "host-managed") == 0) {
1256a735b56eSSam Li *zoned = BLK_Z_HM;
1257a735b56eSSam Li } else if (strcmp(val, "host-aware") == 0) {
1258a735b56eSSam Li *zoned = BLK_Z_HA;
1259a735b56eSSam Li } else if (strcmp(val, "none") == 0) {
1260a735b56eSSam Li *zoned = BLK_Z_NONE;
1261a735b56eSSam Li } else {
1262a735b56eSSam Li return -ENOTSUP;
1263a735b56eSSam Li }
1264a735b56eSSam Li return 0;
1265a735b56eSSam Li }
12666d43eaa3SSam Li #endif /* defined(CONFIG_BLKZONED) */
1267a735b56eSSam Li
1268a735b56eSSam Li /*
1269a735b56eSSam Li * Get a sysfs attribute value as a long integer.
1270a735b56eSSam Li */
1271a735b56eSSam Li #ifdef CONFIG_LINUX
get_sysfs_long_val(struct stat * st,const char * attribute)1272a735b56eSSam Li static long get_sysfs_long_val(struct stat *st, const char *attribute)
1273a735b56eSSam Li {
1274a735b56eSSam Li g_autofree char *str = NULL;
1275a735b56eSSam Li const char *end;
1276a735b56eSSam Li long val;
1277a735b56eSSam Li int ret;
1278a735b56eSSam Li
1279a735b56eSSam Li ret = get_sysfs_str_val(st, attribute, &str);
1280a735b56eSSam Li if (ret < 0) {
1281a735b56eSSam Li return ret;
1282a735b56eSSam Li }
1283a735b56eSSam Li
1284a735b56eSSam Li /* The file is ended with '\n', pass 'end' to accept that. */
1285a735b56eSSam Li ret = qemu_strtol(str, &end, 10, &val);
1286a735b56eSSam Li if (ret == 0 && end && *end == '\0') {
1287a735b56eSSam Li ret = val;
1288a735b56eSSam Li }
1289a735b56eSSam Li return ret;
1290a735b56eSSam Li }
1291a735b56eSSam Li #endif
1292a735b56eSSam Li
hdev_get_max_segments(int fd,struct stat * st)129318473467SPaolo Bonzini static int hdev_get_max_segments(int fd, struct stat *st)
12949103f1ceSFam Zheng {
12959103f1ceSFam Zheng #ifdef CONFIG_LINUX
12969103f1ceSFam Zheng int ret;
1297867eccfeSMaxim Levitsky
129818473467SPaolo Bonzini if (S_ISCHR(st->st_mode)) {
12998ad5ab61SPaolo Bonzini if (ioctl(fd, SG_GET_SG_TABLESIZE, &ret) == 0) {
13008ad5ab61SPaolo Bonzini return ret;
13018ad5ab61SPaolo Bonzini }
13028ad5ab61SPaolo Bonzini return -ENOTSUP;
13038ad5ab61SPaolo Bonzini }
1304a735b56eSSam Li return get_sysfs_long_val(st, "max_segments");
13059103f1ceSFam Zheng #else
13069103f1ceSFam Zheng return -ENOTSUP;
13079103f1ceSFam Zheng #endif
13089103f1ceSFam Zheng }
13099103f1ceSFam Zheng
13106d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
1311a3c41f06SSam Li /*
1312a3c41f06SSam Li * If the reset_all flag is true, then the wps of zone whose state is
1313a3c41f06SSam Li * not readonly or offline should be all reset to the start sector.
1314a3c41f06SSam Li * Else, take the real wp of the device.
1315a3c41f06SSam Li */
get_zones_wp(BlockDriverState * bs,int fd,int64_t offset,unsigned int nrz,bool reset_all)1316a3c41f06SSam Li static int get_zones_wp(BlockDriverState *bs, int fd, int64_t offset,
1317a3c41f06SSam Li unsigned int nrz, bool reset_all)
1318a3c41f06SSam Li {
1319a3c41f06SSam Li struct blk_zone *blkz;
1320a3c41f06SSam Li size_t rep_size;
1321a3c41f06SSam Li uint64_t sector = offset >> BDRV_SECTOR_BITS;
1322a3c41f06SSam Li BlockZoneWps *wps = bs->wps;
1323a3c41f06SSam Li unsigned int j = offset / bs->bl.zone_size;
1324a3c41f06SSam Li unsigned int n = 0, i = 0;
1325a3c41f06SSam Li int ret;
1326a3c41f06SSam Li rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone);
1327a3c41f06SSam Li g_autofree struct blk_zone_report *rep = NULL;
1328a3c41f06SSam Li
1329a3c41f06SSam Li rep = g_malloc(rep_size);
1330a3c41f06SSam Li blkz = (struct blk_zone *)(rep + 1);
1331a3c41f06SSam Li while (n < nrz) {
1332a3c41f06SSam Li memset(rep, 0, rep_size);
1333a3c41f06SSam Li rep->sector = sector;
1334a3c41f06SSam Li rep->nr_zones = nrz - n;
1335a3c41f06SSam Li
1336a3c41f06SSam Li do {
1337a3c41f06SSam Li ret = ioctl(fd, BLKREPORTZONE, rep);
1338a3c41f06SSam Li } while (ret != 0 && errno == EINTR);
1339a3c41f06SSam Li if (ret != 0) {
1340a3c41f06SSam Li error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d",
1341a3c41f06SSam Li fd, offset, errno);
1342a3c41f06SSam Li return -errno;
1343a3c41f06SSam Li }
1344a3c41f06SSam Li
1345a3c41f06SSam Li if (!rep->nr_zones) {
1346a3c41f06SSam Li break;
1347a3c41f06SSam Li }
1348a3c41f06SSam Li
1349a3c41f06SSam Li for (i = 0; i < rep->nr_zones; ++i, ++n, ++j) {
1350a3c41f06SSam Li /*
1351a3c41f06SSam Li * The wp tracking cares only about sequential writes required and
1352a3c41f06SSam Li * sequential write preferred zones so that the wp can advance to
1353a3c41f06SSam Li * the right location.
1354a3c41f06SSam Li * Use the most significant bit of the wp location to indicate the
1355a3c41f06SSam Li * zone type: 0 for SWR/SWP zones and 1 for conventional zones.
1356a3c41f06SSam Li */
1357a3c41f06SSam Li if (blkz[i].type == BLK_ZONE_TYPE_CONVENTIONAL) {
1358a3c41f06SSam Li wps->wp[j] |= 1ULL << 63;
1359a3c41f06SSam Li } else {
1360a3c41f06SSam Li switch(blkz[i].cond) {
1361a3c41f06SSam Li case BLK_ZONE_COND_FULL:
1362a3c41f06SSam Li case BLK_ZONE_COND_READONLY:
1363a3c41f06SSam Li /* Zone not writable */
1364a3c41f06SSam Li wps->wp[j] = (blkz[i].start + blkz[i].len) << BDRV_SECTOR_BITS;
1365a3c41f06SSam Li break;
1366a3c41f06SSam Li case BLK_ZONE_COND_OFFLINE:
1367a3c41f06SSam Li /* Zone not writable nor readable */
1368a3c41f06SSam Li wps->wp[j] = (blkz[i].start) << BDRV_SECTOR_BITS;
1369a3c41f06SSam Li break;
1370a3c41f06SSam Li default:
1371a3c41f06SSam Li if (reset_all) {
1372a3c41f06SSam Li wps->wp[j] = blkz[i].start << BDRV_SECTOR_BITS;
1373a3c41f06SSam Li } else {
1374a3c41f06SSam Li wps->wp[j] = blkz[i].wp << BDRV_SECTOR_BITS;
1375a3c41f06SSam Li }
1376a3c41f06SSam Li break;
1377a3c41f06SSam Li }
1378a3c41f06SSam Li }
1379a3c41f06SSam Li }
1380a3c41f06SSam Li sector = blkz[i - 1].start + blkz[i - 1].len;
1381a3c41f06SSam Li }
1382a3c41f06SSam Li
1383a3c41f06SSam Li return 0;
1384a3c41f06SSam Li }
1385a3c41f06SSam Li
update_zones_wp(BlockDriverState * bs,int fd,int64_t offset,unsigned int nrz)1386a3c41f06SSam Li static void update_zones_wp(BlockDriverState *bs, int fd, int64_t offset,
1387a3c41f06SSam Li unsigned int nrz)
1388a3c41f06SSam Li {
1389a3c41f06SSam Li if (get_zones_wp(bs, fd, offset, nrz, 0) < 0) {
1390a3c41f06SSam Li error_report("update zone wp failed");
1391a3c41f06SSam Li }
1392a3c41f06SSam Li }
1393a3c41f06SSam Li
raw_refresh_zoned_limits(BlockDriverState * bs,struct stat * st,Error ** errp)1394a735b56eSSam Li static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
1395a735b56eSSam Li Error **errp)
1396a735b56eSSam Li {
1397a3c41f06SSam Li BDRVRawState *s = bs->opaque;
1398a735b56eSSam Li BlockZoneModel zoned;
1399a735b56eSSam Li int ret;
1400a735b56eSSam Li
1401a735b56eSSam Li ret = get_sysfs_zoned_model(st, &zoned);
1402a735b56eSSam Li if (ret < 0 || zoned == BLK_Z_NONE) {
140356d1a022SHanna Czenczek goto no_zoned;
1404a735b56eSSam Li }
1405a735b56eSSam Li bs->bl.zoned = zoned;
14066d43eaa3SSam Li
14076d43eaa3SSam Li ret = get_sysfs_long_val(st, "max_open_zones");
14086d43eaa3SSam Li if (ret >= 0) {
14096d43eaa3SSam Li bs->bl.max_open_zones = ret;
1410a735b56eSSam Li }
1411a735b56eSSam Li
14126d43eaa3SSam Li ret = get_sysfs_long_val(st, "max_active_zones");
14136d43eaa3SSam Li if (ret >= 0) {
14146d43eaa3SSam Li bs->bl.max_active_zones = ret;
14156d43eaa3SSam Li }
14166d43eaa3SSam Li
14176d43eaa3SSam Li /*
14186d43eaa3SSam Li * The zoned device must at least have zone size and nr_zones fields.
14196d43eaa3SSam Li */
14206d43eaa3SSam Li ret = get_sysfs_long_val(st, "chunk_sectors");
14216d43eaa3SSam Li if (ret < 0) {
14226d43eaa3SSam Li error_setg_errno(errp, -ret, "Unable to read chunk_sectors "
14236d43eaa3SSam Li "sysfs attribute");
142456d1a022SHanna Czenczek goto no_zoned;
14256d43eaa3SSam Li } else if (!ret) {
14266d43eaa3SSam Li error_setg(errp, "Read 0 from chunk_sectors sysfs attribute");
142756d1a022SHanna Czenczek goto no_zoned;
14286d43eaa3SSam Li }
14296d43eaa3SSam Li bs->bl.zone_size = ret << BDRV_SECTOR_BITS;
14306d43eaa3SSam Li
14316d43eaa3SSam Li ret = get_sysfs_long_val(st, "nr_zones");
14326d43eaa3SSam Li if (ret < 0) {
14336d43eaa3SSam Li error_setg_errno(errp, -ret, "Unable to read nr_zones "
14346d43eaa3SSam Li "sysfs attribute");
143556d1a022SHanna Czenczek goto no_zoned;
14366d43eaa3SSam Li } else if (!ret) {
14376d43eaa3SSam Li error_setg(errp, "Read 0 from nr_zones sysfs attribute");
143856d1a022SHanna Czenczek goto no_zoned;
14396d43eaa3SSam Li }
14406d43eaa3SSam Li bs->bl.nr_zones = ret;
14416d43eaa3SSam Li
14426d43eaa3SSam Li ret = get_sysfs_long_val(st, "zone_append_max_bytes");
14436d43eaa3SSam Li if (ret > 0) {
14446d43eaa3SSam Li bs->bl.max_append_sectors = ret >> BDRV_SECTOR_BITS;
14456d43eaa3SSam Li }
1446a3c41f06SSam Li
1447a3c41f06SSam Li ret = get_sysfs_long_val(st, "physical_block_size");
1448a3c41f06SSam Li if (ret >= 0) {
1449a3c41f06SSam Li bs->bl.write_granularity = ret;
1450a3c41f06SSam Li }
1451a3c41f06SSam Li
1452a3c41f06SSam Li /* The refresh_limits() function can be called multiple times. */
1453a3c41f06SSam Li g_free(bs->wps);
1454a3c41f06SSam Li bs->wps = g_malloc(sizeof(BlockZoneWps) +
1455a3c41f06SSam Li sizeof(int64_t) * bs->bl.nr_zones);
1456a3c41f06SSam Li ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 0);
1457a3c41f06SSam Li if (ret < 0) {
1458a3c41f06SSam Li error_setg_errno(errp, -ret, "report wps failed");
145956d1a022SHanna Czenczek goto no_zoned;
1460a3c41f06SSam Li }
1461a3c41f06SSam Li qemu_co_mutex_init(&bs->wps->colock);
146256d1a022SHanna Czenczek return;
146356d1a022SHanna Czenczek
146456d1a022SHanna Czenczek no_zoned:
146556d1a022SHanna Czenczek bs->bl.zoned = BLK_Z_NONE;
146656d1a022SHanna Czenczek g_free(bs->wps);
146756d1a022SHanna Czenczek bs->wps = NULL;
14686d43eaa3SSam Li }
14696d43eaa3SSam Li #else /* !defined(CONFIG_BLKZONED) */
raw_refresh_zoned_limits(BlockDriverState * bs,struct stat * st,Error ** errp)14706d43eaa3SSam Li static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
14716d43eaa3SSam Li Error **errp)
14726d43eaa3SSam Li {
14736d43eaa3SSam Li bs->bl.zoned = BLK_Z_NONE;
14746d43eaa3SSam Li }
14756d43eaa3SSam Li #endif /* !defined(CONFIG_BLKZONED) */
14766d43eaa3SSam Li
raw_refresh_limits(BlockDriverState * bs,Error ** errp)1477c1bb86cdSEric Blake static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
1478c1bb86cdSEric Blake {
1479c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
148018473467SPaolo Bonzini struct stat st;
1481c1bb86cdSEric Blake
14825dbd0ce1SKevin Wolf s->needs_alignment = raw_needs_alignment(bs);
1483c1bb86cdSEric Blake raw_probe_alignment(bs, s->fd, errp);
14845dbd0ce1SKevin Wolf
1485c1bb86cdSEric Blake bs->bl.min_mem_alignment = s->buf_align;
14868e3b0cbbSMarc-André Lureau bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size());
148718473467SPaolo Bonzini
148818473467SPaolo Bonzini /*
148918473467SPaolo Bonzini * Maximum transfers are best effort, so it is okay to ignore any
149018473467SPaolo Bonzini * errors. That said, based on the man page errors in fstat would be
149118473467SPaolo Bonzini * very much unexpected; the only possible case seems to be ENOMEM.
149218473467SPaolo Bonzini */
149318473467SPaolo Bonzini if (fstat(s->fd, &st)) {
149418473467SPaolo Bonzini return;
149518473467SPaolo Bonzini }
149618473467SPaolo Bonzini
14970dfc7af2SAkihiko Odaki #if defined(__APPLE__) && (__MACH__)
14980dfc7af2SAkihiko Odaki struct statfs buf;
14990dfc7af2SAkihiko Odaki
15000dfc7af2SAkihiko Odaki if (!fstatfs(s->fd, &buf)) {
15010dfc7af2SAkihiko Odaki bs->bl.opt_transfer = buf.f_iosize;
15020dfc7af2SAkihiko Odaki bs->bl.pdiscard_alignment = buf.f_bsize;
15030dfc7af2SAkihiko Odaki }
15040dfc7af2SAkihiko Odaki #endif
15050dfc7af2SAkihiko Odaki
1506006e1962SDenis V. Lunev if (bdrv_is_sg(bs) || S_ISBLK(st.st_mode)) {
150718473467SPaolo Bonzini int ret = hdev_get_max_hw_transfer(s->fd, &st);
150818473467SPaolo Bonzini
150918473467SPaolo Bonzini if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) {
151018473467SPaolo Bonzini bs->bl.max_hw_transfer = ret;
151118473467SPaolo Bonzini }
151218473467SPaolo Bonzini
151318473467SPaolo Bonzini ret = hdev_get_max_segments(s->fd, &st);
151418473467SPaolo Bonzini if (ret > 0) {
1515cc071629SPaolo Bonzini bs->bl.max_hw_iov = ret;
151618473467SPaolo Bonzini }
151718473467SPaolo Bonzini }
1518a735b56eSSam Li
1519a735b56eSSam Li raw_refresh_zoned_limits(bs, &st, errp);
1520c1bb86cdSEric Blake }
1521c1bb86cdSEric Blake
check_for_dasd(int fd)1522c1bb86cdSEric Blake static int check_for_dasd(int fd)
1523c1bb86cdSEric Blake {
1524c1bb86cdSEric Blake #ifdef BIODASDINFO2
1525c1bb86cdSEric Blake struct dasd_information2_t info = {0};
1526c1bb86cdSEric Blake
1527c1bb86cdSEric Blake return ioctl(fd, BIODASDINFO2, &info);
1528c1bb86cdSEric Blake #else
1529c1bb86cdSEric Blake return -1;
1530c1bb86cdSEric Blake #endif
1531c1bb86cdSEric Blake }
1532c1bb86cdSEric Blake
1533c1bb86cdSEric Blake /**
1534c1bb86cdSEric Blake * Try to get @bs's logical and physical block size.
1535c1bb86cdSEric Blake * On success, store them in @bsz and return zero.
1536c1bb86cdSEric Blake * On failure, return negative errno.
1537c1bb86cdSEric Blake */
hdev_probe_blocksizes(BlockDriverState * bs,BlockSizes * bsz)1538c1bb86cdSEric Blake static int hdev_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
1539c1bb86cdSEric Blake {
1540c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
1541c1bb86cdSEric Blake int ret;
1542c1bb86cdSEric Blake
15436d43eaa3SSam Li /* If DASD or zoned devices, get blocksizes */
1544c1bb86cdSEric Blake if (check_for_dasd(s->fd) < 0) {
15456d43eaa3SSam Li /* zoned devices are not DASD */
15466d43eaa3SSam Li if (bs->bl.zoned == BLK_Z_NONE) {
1547c1bb86cdSEric Blake return -ENOTSUP;
1548c1bb86cdSEric Blake }
15496d43eaa3SSam Li }
1550c1bb86cdSEric Blake ret = probe_logical_blocksize(s->fd, &bsz->log);
1551c1bb86cdSEric Blake if (ret < 0) {
1552c1bb86cdSEric Blake return ret;
1553c1bb86cdSEric Blake }
1554c1bb86cdSEric Blake return probe_physical_blocksize(s->fd, &bsz->phys);
1555c1bb86cdSEric Blake }
1556c1bb86cdSEric Blake
1557c1bb86cdSEric Blake /**
1558c1bb86cdSEric Blake * Try to get @bs's geometry: cyls, heads, sectors.
1559c1bb86cdSEric Blake * On success, store them in @geo and return 0.
1560c1bb86cdSEric Blake * On failure return -errno.
1561c1bb86cdSEric Blake * (Allows block driver to assign default geometry values that guest sees)
1562c1bb86cdSEric Blake */
1563c1bb86cdSEric Blake #ifdef __linux__
hdev_probe_geometry(BlockDriverState * bs,HDGeometry * geo)1564c1bb86cdSEric Blake static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
1565c1bb86cdSEric Blake {
1566c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
1567c1bb86cdSEric Blake struct hd_geometry ioctl_geo = {0};
1568c1bb86cdSEric Blake
1569c1bb86cdSEric Blake /* If DASD, get its geometry */
1570c1bb86cdSEric Blake if (check_for_dasd(s->fd) < 0) {
1571c1bb86cdSEric Blake return -ENOTSUP;
1572c1bb86cdSEric Blake }
1573c1bb86cdSEric Blake if (ioctl(s->fd, HDIO_GETGEO, &ioctl_geo) < 0) {
1574c1bb86cdSEric Blake return -errno;
1575c1bb86cdSEric Blake }
1576c1bb86cdSEric Blake /* HDIO_GETGEO may return success even though geo contains zeros
1577c1bb86cdSEric Blake (e.g. certain multipath setups) */
1578c1bb86cdSEric Blake if (!ioctl_geo.heads || !ioctl_geo.sectors || !ioctl_geo.cylinders) {
1579c1bb86cdSEric Blake return -ENOTSUP;
1580c1bb86cdSEric Blake }
1581c1bb86cdSEric Blake /* Do not return a geometry for partition */
1582c1bb86cdSEric Blake if (ioctl_geo.start != 0) {
1583c1bb86cdSEric Blake return -ENOTSUP;
1584c1bb86cdSEric Blake }
1585c1bb86cdSEric Blake geo->heads = ioctl_geo.heads;
1586c1bb86cdSEric Blake geo->sectors = ioctl_geo.sectors;
1587c1bb86cdSEric Blake geo->cylinders = ioctl_geo.cylinders;
1588c1bb86cdSEric Blake
1589c1bb86cdSEric Blake return 0;
1590c1bb86cdSEric Blake }
1591c1bb86cdSEric Blake #else /* __linux__ */
hdev_probe_geometry(BlockDriverState * bs,HDGeometry * geo)1592c1bb86cdSEric Blake static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
1593c1bb86cdSEric Blake {
1594c1bb86cdSEric Blake return -ENOTSUP;
1595c1bb86cdSEric Blake }
1596c1bb86cdSEric Blake #endif
1597c1bb86cdSEric Blake
159803425671SKevin Wolf #if defined(__linux__)
handle_aiocb_ioctl(void * opaque)159903425671SKevin Wolf static int handle_aiocb_ioctl(void *opaque)
1600c1bb86cdSEric Blake {
160103425671SKevin Wolf RawPosixAIOData *aiocb = opaque;
1602c1bb86cdSEric Blake int ret;
1603c1bb86cdSEric Blake
160437b0b24eSNikita Ivanov ret = RETRY_ON_EINTR(
160537b0b24eSNikita Ivanov ioctl(aiocb->aio_fildes, aiocb->ioctl.cmd, aiocb->ioctl.buf)
160637b0b24eSNikita Ivanov );
1607c1bb86cdSEric Blake if (ret == -1) {
1608c1bb86cdSEric Blake return -errno;
1609c1bb86cdSEric Blake }
1610c1bb86cdSEric Blake
1611c1bb86cdSEric Blake return 0;
1612c1bb86cdSEric Blake }
161303425671SKevin Wolf #endif /* linux */
1614c1bb86cdSEric Blake
handle_aiocb_flush(void * opaque)161506dc9bd5SKevin Wolf static int handle_aiocb_flush(void *opaque)
1616c1bb86cdSEric Blake {
161706dc9bd5SKevin Wolf RawPosixAIOData *aiocb = opaque;
1618e5bcf967SKevin Wolf BDRVRawState *s = aiocb->bs->opaque;
1619c1bb86cdSEric Blake int ret;
1620c1bb86cdSEric Blake
1621e5bcf967SKevin Wolf if (s->page_cache_inconsistent) {
1622c7ddc882SDaniel P. Berrangé return -s->page_cache_inconsistent;
1623e5bcf967SKevin Wolf }
1624e5bcf967SKevin Wolf
1625c1bb86cdSEric Blake ret = qemu_fdatasync(aiocb->aio_fildes);
1626c1bb86cdSEric Blake if (ret == -1) {
162760ff2ae2SDaniel P. Berrangé trace_file_flush_fdatasync_failed(errno);
162860ff2ae2SDaniel P. Berrangé
1629e5bcf967SKevin Wolf /* There is no clear definition of the semantics of a failing fsync(),
1630e5bcf967SKevin Wolf * so we may have to assume the worst. The sad truth is that this
1631e5bcf967SKevin Wolf * assumption is correct for Linux. Some pages are now probably marked
1632e5bcf967SKevin Wolf * clean in the page cache even though they are inconsistent with the
1633e5bcf967SKevin Wolf * on-disk contents. The next fdatasync() call would succeed, but no
1634e5bcf967SKevin Wolf * further writeback attempt will be made. We can't get back to a state
1635e5bcf967SKevin Wolf * in which we know what is on disk (we would have to rewrite
1636e5bcf967SKevin Wolf * everything that was touched since the last fdatasync() at least), so
1637e5bcf967SKevin Wolf * make bdrv_flush() fail permanently. Given that the behaviour isn't
1638e5bcf967SKevin Wolf * really defined, I have little hope that other OSes are doing better.
1639e5bcf967SKevin Wolf *
1640e5bcf967SKevin Wolf * Obviously, this doesn't affect O_DIRECT, which bypasses the page
1641e5bcf967SKevin Wolf * cache. */
1642e5bcf967SKevin Wolf if ((s->open_flags & O_DIRECT) == 0) {
1643c7ddc882SDaniel P. Berrangé s->page_cache_inconsistent = errno;
1644e5bcf967SKevin Wolf }
1645c1bb86cdSEric Blake return -errno;
1646c1bb86cdSEric Blake }
1647c1bb86cdSEric Blake return 0;
1648c1bb86cdSEric Blake }
1649c1bb86cdSEric Blake
1650c1bb86cdSEric Blake #ifdef CONFIG_PREADV
1651c1bb86cdSEric Blake
1652c1bb86cdSEric Blake static bool preadv_present = true;
1653c1bb86cdSEric Blake
1654c1bb86cdSEric Blake static ssize_t
qemu_preadv(int fd,const struct iovec * iov,int nr_iov,off_t offset)1655c1bb86cdSEric Blake qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1656c1bb86cdSEric Blake {
1657c1bb86cdSEric Blake return preadv(fd, iov, nr_iov, offset);
1658c1bb86cdSEric Blake }
1659c1bb86cdSEric Blake
1660c1bb86cdSEric Blake static ssize_t
qemu_pwritev(int fd,const struct iovec * iov,int nr_iov,off_t offset)1661c1bb86cdSEric Blake qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1662c1bb86cdSEric Blake {
1663c1bb86cdSEric Blake return pwritev(fd, iov, nr_iov, offset);
1664c1bb86cdSEric Blake }
1665c1bb86cdSEric Blake
1666c1bb86cdSEric Blake #else
1667c1bb86cdSEric Blake
1668c1bb86cdSEric Blake static bool preadv_present = false;
1669c1bb86cdSEric Blake
1670c1bb86cdSEric Blake static ssize_t
qemu_preadv(int fd,const struct iovec * iov,int nr_iov,off_t offset)1671c1bb86cdSEric Blake qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1672c1bb86cdSEric Blake {
1673c1bb86cdSEric Blake return -ENOSYS;
1674c1bb86cdSEric Blake }
1675c1bb86cdSEric Blake
1676c1bb86cdSEric Blake static ssize_t
qemu_pwritev(int fd,const struct iovec * iov,int nr_iov,off_t offset)1677c1bb86cdSEric Blake qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
1678c1bb86cdSEric Blake {
1679c1bb86cdSEric Blake return -ENOSYS;
1680c1bb86cdSEric Blake }
1681c1bb86cdSEric Blake
1682c1bb86cdSEric Blake #endif
1683c1bb86cdSEric Blake
handle_aiocb_rw_vector(RawPosixAIOData * aiocb)1684c1bb86cdSEric Blake static ssize_t handle_aiocb_rw_vector(RawPosixAIOData *aiocb)
1685c1bb86cdSEric Blake {
1686c1bb86cdSEric Blake ssize_t len;
1687c1bb86cdSEric Blake
168837b0b24eSNikita Ivanov len = RETRY_ON_EINTR(
16894751d09aSSam Li (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) ?
169037b0b24eSNikita Ivanov qemu_pwritev(aiocb->aio_fildes,
1691d57c44d0SKevin Wolf aiocb->io.iov,
1692d57c44d0SKevin Wolf aiocb->io.niov,
169337b0b24eSNikita Ivanov aiocb->aio_offset) :
169437b0b24eSNikita Ivanov qemu_preadv(aiocb->aio_fildes,
1695d57c44d0SKevin Wolf aiocb->io.iov,
1696d57c44d0SKevin Wolf aiocb->io.niov,
169737b0b24eSNikita Ivanov aiocb->aio_offset)
169837b0b24eSNikita Ivanov );
1699c1bb86cdSEric Blake
1700c1bb86cdSEric Blake if (len == -1) {
1701c1bb86cdSEric Blake return -errno;
1702c1bb86cdSEric Blake }
1703c1bb86cdSEric Blake return len;
1704c1bb86cdSEric Blake }
1705c1bb86cdSEric Blake
1706c1bb86cdSEric Blake /*
1707c1bb86cdSEric Blake * Read/writes the data to/from a given linear buffer.
1708c1bb86cdSEric Blake *
1709c1bb86cdSEric Blake * Returns the number of bytes handles or -errno in case of an error. Short
1710c1bb86cdSEric Blake * reads are only returned if the end of the file is reached.
1711c1bb86cdSEric Blake */
handle_aiocb_rw_linear(RawPosixAIOData * aiocb,char * buf)1712c1bb86cdSEric Blake static ssize_t handle_aiocb_rw_linear(RawPosixAIOData *aiocb, char *buf)
1713c1bb86cdSEric Blake {
1714c1bb86cdSEric Blake ssize_t offset = 0;
1715c1bb86cdSEric Blake ssize_t len;
1716c1bb86cdSEric Blake
1717c1bb86cdSEric Blake while (offset < aiocb->aio_nbytes) {
17184751d09aSSam Li if (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) {
1719c1bb86cdSEric Blake len = pwrite(aiocb->aio_fildes,
1720c1bb86cdSEric Blake (const char *)buf + offset,
1721c1bb86cdSEric Blake aiocb->aio_nbytes - offset,
1722c1bb86cdSEric Blake aiocb->aio_offset + offset);
1723c1bb86cdSEric Blake } else {
1724c1bb86cdSEric Blake len = pread(aiocb->aio_fildes,
1725c1bb86cdSEric Blake buf + offset,
1726c1bb86cdSEric Blake aiocb->aio_nbytes - offset,
1727c1bb86cdSEric Blake aiocb->aio_offset + offset);
1728c1bb86cdSEric Blake }
1729c1bb86cdSEric Blake if (len == -1 && errno == EINTR) {
1730c1bb86cdSEric Blake continue;
1731c1bb86cdSEric Blake } else if (len == -1 && errno == EINVAL &&
1732c1bb86cdSEric Blake (aiocb->bs->open_flags & BDRV_O_NOCACHE) &&
1733c1bb86cdSEric Blake !(aiocb->aio_type & QEMU_AIO_WRITE) &&
1734c1bb86cdSEric Blake offset > 0) {
1735c1bb86cdSEric Blake /* O_DIRECT pread() may fail with EINVAL when offset is unaligned
1736c1bb86cdSEric Blake * after a short read. Assume that O_DIRECT short reads only occur
1737c1bb86cdSEric Blake * at EOF. Therefore this is a short read, not an I/O error.
1738c1bb86cdSEric Blake */
1739c1bb86cdSEric Blake break;
1740c1bb86cdSEric Blake } else if (len == -1) {
1741c1bb86cdSEric Blake offset = -errno;
1742c1bb86cdSEric Blake break;
1743c1bb86cdSEric Blake } else if (len == 0) {
1744c1bb86cdSEric Blake break;
1745c1bb86cdSEric Blake }
1746c1bb86cdSEric Blake offset += len;
1747c1bb86cdSEric Blake }
1748c1bb86cdSEric Blake
1749c1bb86cdSEric Blake return offset;
1750c1bb86cdSEric Blake }
1751c1bb86cdSEric Blake
handle_aiocb_rw(void * opaque)1752999e6b69SKevin Wolf static int handle_aiocb_rw(void *opaque)
1753c1bb86cdSEric Blake {
1754999e6b69SKevin Wolf RawPosixAIOData *aiocb = opaque;
1755c1bb86cdSEric Blake ssize_t nbytes;
1756c1bb86cdSEric Blake char *buf;
1757c1bb86cdSEric Blake
1758c1bb86cdSEric Blake if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) {
1759c1bb86cdSEric Blake /*
1760c1bb86cdSEric Blake * If there is just a single buffer, and it is properly aligned
1761c1bb86cdSEric Blake * we can just use plain pread/pwrite without any problems.
1762c1bb86cdSEric Blake */
1763d57c44d0SKevin Wolf if (aiocb->io.niov == 1) {
176454c7ca1bSKevin Wolf nbytes = handle_aiocb_rw_linear(aiocb, aiocb->io.iov->iov_base);
176554c7ca1bSKevin Wolf goto out;
1766c1bb86cdSEric Blake }
1767c1bb86cdSEric Blake /*
1768c1bb86cdSEric Blake * We have more than one iovec, and all are properly aligned.
1769c1bb86cdSEric Blake *
1770c1bb86cdSEric Blake * Try preadv/pwritev first and fall back to linearizing the
1771c1bb86cdSEric Blake * buffer if it's not supported.
1772c1bb86cdSEric Blake */
1773c1bb86cdSEric Blake if (preadv_present) {
1774c1bb86cdSEric Blake nbytes = handle_aiocb_rw_vector(aiocb);
1775c1bb86cdSEric Blake if (nbytes == aiocb->aio_nbytes ||
1776c1bb86cdSEric Blake (nbytes < 0 && nbytes != -ENOSYS)) {
177754c7ca1bSKevin Wolf goto out;
1778c1bb86cdSEric Blake }
1779c1bb86cdSEric Blake preadv_present = false;
1780c1bb86cdSEric Blake }
1781c1bb86cdSEric Blake
1782c1bb86cdSEric Blake /*
1783c1bb86cdSEric Blake * XXX(hch): short read/write. no easy way to handle the reminder
1784c1bb86cdSEric Blake * using these interfaces. For now retry using plain
1785c1bb86cdSEric Blake * pread/pwrite?
1786c1bb86cdSEric Blake */
1787c1bb86cdSEric Blake }
1788c1bb86cdSEric Blake
1789c1bb86cdSEric Blake /*
1790c1bb86cdSEric Blake * Ok, we have to do it the hard way, copy all segments into
1791c1bb86cdSEric Blake * a single aligned buffer.
1792c1bb86cdSEric Blake */
1793c1bb86cdSEric Blake buf = qemu_try_blockalign(aiocb->bs, aiocb->aio_nbytes);
1794c1bb86cdSEric Blake if (buf == NULL) {
179554c7ca1bSKevin Wolf nbytes = -ENOMEM;
179654c7ca1bSKevin Wolf goto out;
1797c1bb86cdSEric Blake }
1798c1bb86cdSEric Blake
1799c1bb86cdSEric Blake if (aiocb->aio_type & QEMU_AIO_WRITE) {
1800c1bb86cdSEric Blake char *p = buf;
1801c1bb86cdSEric Blake int i;
1802c1bb86cdSEric Blake
1803d57c44d0SKevin Wolf for (i = 0; i < aiocb->io.niov; ++i) {
1804d57c44d0SKevin Wolf memcpy(p, aiocb->io.iov[i].iov_base, aiocb->io.iov[i].iov_len);
1805d57c44d0SKevin Wolf p += aiocb->io.iov[i].iov_len;
1806c1bb86cdSEric Blake }
1807c1bb86cdSEric Blake assert(p - buf == aiocb->aio_nbytes);
1808c1bb86cdSEric Blake }
1809c1bb86cdSEric Blake
1810c1bb86cdSEric Blake nbytes = handle_aiocb_rw_linear(aiocb, buf);
18114751d09aSSam Li if (!(aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND))) {
1812c1bb86cdSEric Blake char *p = buf;
1813c1bb86cdSEric Blake size_t count = aiocb->aio_nbytes, copy;
1814c1bb86cdSEric Blake int i;
1815c1bb86cdSEric Blake
1816d57c44d0SKevin Wolf for (i = 0; i < aiocb->io.niov && count; ++i) {
1817c1bb86cdSEric Blake copy = count;
1818d57c44d0SKevin Wolf if (copy > aiocb->io.iov[i].iov_len) {
1819d57c44d0SKevin Wolf copy = aiocb->io.iov[i].iov_len;
1820c1bb86cdSEric Blake }
1821d57c44d0SKevin Wolf memcpy(aiocb->io.iov[i].iov_base, p, copy);
1822c1bb86cdSEric Blake assert(count >= copy);
1823c1bb86cdSEric Blake p += copy;
1824c1bb86cdSEric Blake count -= copy;
1825c1bb86cdSEric Blake }
1826c1bb86cdSEric Blake assert(count == 0);
1827c1bb86cdSEric Blake }
1828c1bb86cdSEric Blake qemu_vfree(buf);
1829c1bb86cdSEric Blake
183054c7ca1bSKevin Wolf out:
183154c7ca1bSKevin Wolf if (nbytes == aiocb->aio_nbytes) {
183254c7ca1bSKevin Wolf return 0;
183354c7ca1bSKevin Wolf } else if (nbytes >= 0 && nbytes < aiocb->aio_nbytes) {
183454c7ca1bSKevin Wolf if (aiocb->aio_type & QEMU_AIO_WRITE) {
183554c7ca1bSKevin Wolf return -EINVAL;
183654c7ca1bSKevin Wolf } else {
183754c7ca1bSKevin Wolf iov_memset(aiocb->io.iov, aiocb->io.niov, nbytes,
183854c7ca1bSKevin Wolf 0, aiocb->aio_nbytes - nbytes);
183954c7ca1bSKevin Wolf return 0;
184054c7ca1bSKevin Wolf }
184154c7ca1bSKevin Wolf } else {
184254c7ca1bSKevin Wolf assert(nbytes < 0);
1843c1bb86cdSEric Blake return nbytes;
1844c1bb86cdSEric Blake }
184554c7ca1bSKevin Wolf }
1846c1bb86cdSEric Blake
18470dfc7af2SAkihiko Odaki #if defined(CONFIG_FALLOCATE) || defined(BLKZEROOUT) || defined(BLKDISCARD)
translate_err(int err)1848c1bb86cdSEric Blake static int translate_err(int err)
1849c1bb86cdSEric Blake {
1850c1bb86cdSEric Blake if (err == -ENODEV || err == -ENOSYS || err == -EOPNOTSUPP ||
1851c1bb86cdSEric Blake err == -ENOTTY) {
1852c1bb86cdSEric Blake err = -ENOTSUP;
1853c1bb86cdSEric Blake }
1854c1bb86cdSEric Blake return err;
1855c1bb86cdSEric Blake }
18560dfc7af2SAkihiko Odaki #endif
1857c1bb86cdSEric Blake
1858c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE
do_fallocate(int fd,int mode,off_t offset,off_t len)1859c1bb86cdSEric Blake static int do_fallocate(int fd, int mode, off_t offset, off_t len)
1860c1bb86cdSEric Blake {
1861c1bb86cdSEric Blake do {
1862c1bb86cdSEric Blake if (fallocate(fd, mode, offset, len) == 0) {
1863c1bb86cdSEric Blake return 0;
1864c1bb86cdSEric Blake }
1865c1bb86cdSEric Blake } while (errno == EINTR);
1866c1bb86cdSEric Blake return translate_err(-errno);
1867c1bb86cdSEric Blake }
1868c1bb86cdSEric Blake #endif
1869c1bb86cdSEric Blake
handle_aiocb_write_zeroes_block(RawPosixAIOData * aiocb)1870c1bb86cdSEric Blake static ssize_t handle_aiocb_write_zeroes_block(RawPosixAIOData *aiocb)
1871c1bb86cdSEric Blake {
1872c1bb86cdSEric Blake int ret = -ENOTSUP;
1873c1bb86cdSEric Blake BDRVRawState *s = aiocb->bs->opaque;
1874c1bb86cdSEric Blake
1875c1bb86cdSEric Blake if (!s->has_write_zeroes) {
1876c1bb86cdSEric Blake return -ENOTSUP;
1877c1bb86cdSEric Blake }
1878c1bb86cdSEric Blake
1879c1bb86cdSEric Blake #ifdef BLKZEROOUT
1880738301e1SKevin Wolf /* The BLKZEROOUT implementation in the kernel doesn't set
1881738301e1SKevin Wolf * BLKDEV_ZERO_NOFALLBACK, so we can't call this if we have to avoid slow
1882738301e1SKevin Wolf * fallbacks. */
1883738301e1SKevin Wolf if (!(aiocb->aio_type & QEMU_AIO_NO_FALLBACK)) {
1884c1bb86cdSEric Blake do {
1885c1bb86cdSEric Blake uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
1886c1bb86cdSEric Blake if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) {
1887c1bb86cdSEric Blake return 0;
1888c1bb86cdSEric Blake }
1889c1bb86cdSEric Blake } while (errno == EINTR);
1890c1bb86cdSEric Blake
1891c1bb86cdSEric Blake ret = translate_err(-errno);
1892c1bb86cdSEric Blake if (ret == -ENOTSUP) {
1893c1bb86cdSEric Blake s->has_write_zeroes = false;
1894c1bb86cdSEric Blake }
1895effecce6SKevin Wolf }
1896effecce6SKevin Wolf #endif
1897effecce6SKevin Wolf
1898c1bb86cdSEric Blake return ret;
1899c1bb86cdSEric Blake }
1900c1bb86cdSEric Blake
handle_aiocb_write_zeroes(void * opaque)19017154d8aeSKevin Wolf static int handle_aiocb_write_zeroes(void *opaque)
1902c1bb86cdSEric Blake {
19037154d8aeSKevin Wolf RawPosixAIOData *aiocb = opaque;
190470d9110bSDenis V. Lunev #ifdef CONFIG_FALLOCATE
1905b2c6f23fSMax Reitz BDRVRawState *s = aiocb->bs->opaque;
190670d9110bSDenis V. Lunev int64_t len;
190770d9110bSDenis V. Lunev #endif
1908c1bb86cdSEric Blake
1909c1bb86cdSEric Blake if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
1910c1bb86cdSEric Blake return handle_aiocb_write_zeroes_block(aiocb);
1911c1bb86cdSEric Blake }
1912c1bb86cdSEric Blake
1913c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE_ZERO_RANGE
1914c1bb86cdSEric Blake if (s->has_write_zeroes) {
1915c1bb86cdSEric Blake int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE,
1916c1bb86cdSEric Blake aiocb->aio_offset, aiocb->aio_nbytes);
1917fa95e9fbSThomas Huth if (ret == -ENOTSUP) {
1918fa95e9fbSThomas Huth s->has_write_zeroes = false;
1919fa95e9fbSThomas Huth } else if (ret == 0 || ret != -EINVAL) {
1920c1bb86cdSEric Blake return ret;
1921c1bb86cdSEric Blake }
1922fa95e9fbSThomas Huth /*
1923fa95e9fbSThomas Huth * Note: Some file systems do not like unaligned byte ranges, and
1924fa95e9fbSThomas Huth * return EINVAL in such a case, though they should not do it according
1925fa95e9fbSThomas Huth * to the man-page of fallocate(). Thus we simply ignore this return
1926fa95e9fbSThomas Huth * value and try the other fallbacks instead.
1927fa95e9fbSThomas Huth */
1928c1bb86cdSEric Blake }
1929c1bb86cdSEric Blake #endif
1930c1bb86cdSEric Blake
1931c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
1932c1bb86cdSEric Blake if (s->has_discard && s->has_fallocate) {
1933c1bb86cdSEric Blake int ret = do_fallocate(s->fd,
1934c1bb86cdSEric Blake FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1935c1bb86cdSEric Blake aiocb->aio_offset, aiocb->aio_nbytes);
1936c1bb86cdSEric Blake if (ret == 0) {
1937c1bb86cdSEric Blake ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
1938c1bb86cdSEric Blake if (ret == 0 || ret != -ENOTSUP) {
1939c1bb86cdSEric Blake return ret;
1940c1bb86cdSEric Blake }
1941c1bb86cdSEric Blake s->has_fallocate = false;
194273ebf297SThomas Huth } else if (ret == -EINVAL) {
194373ebf297SThomas Huth /*
194473ebf297SThomas Huth * Some file systems like older versions of GPFS do not like un-
194573ebf297SThomas Huth * aligned byte ranges, and return EINVAL in such a case, though
194673ebf297SThomas Huth * they should not do it according to the man-page of fallocate().
194773ebf297SThomas Huth * Warn about the bad filesystem and try the final fallback instead.
194873ebf297SThomas Huth */
194973ebf297SThomas Huth warn_report_once("Your file system is misbehaving: "
195073ebf297SThomas Huth "fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. "
195168857f13SMichael Tokarev "Please report this bug to your file system "
195273ebf297SThomas Huth "vendor.");
1953c1bb86cdSEric Blake } else if (ret != -ENOTSUP) {
1954c1bb86cdSEric Blake return ret;
1955c1bb86cdSEric Blake } else {
1956c1bb86cdSEric Blake s->has_discard = false;
1957c1bb86cdSEric Blake }
1958c1bb86cdSEric Blake }
1959c1bb86cdSEric Blake #endif
1960c1bb86cdSEric Blake
1961c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE
196270d9110bSDenis V. Lunev /* Last resort: we are trying to extend the file with zeroed data. This
196370d9110bSDenis V. Lunev * can be done via fallocate(fd, 0) */
196436c6c877SPaolo Bonzini len = raw_getlength(aiocb->bs);
196570d9110bSDenis V. Lunev if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) {
1966c1bb86cdSEric Blake int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
1967c1bb86cdSEric Blake if (ret == 0 || ret != -ENOTSUP) {
1968c1bb86cdSEric Blake return ret;
1969c1bb86cdSEric Blake }
1970c1bb86cdSEric Blake s->has_fallocate = false;
1971c1bb86cdSEric Blake }
1972c1bb86cdSEric Blake #endif
1973c1bb86cdSEric Blake
1974c1bb86cdSEric Blake return -ENOTSUP;
1975c1bb86cdSEric Blake }
1976c1bb86cdSEric Blake
handle_aiocb_write_zeroes_unmap(void * opaque)19777154d8aeSKevin Wolf static int handle_aiocb_write_zeroes_unmap(void *opaque)
197834fa110eSKevin Wolf {
19797154d8aeSKevin Wolf RawPosixAIOData *aiocb = opaque;
198034fa110eSKevin Wolf BDRVRawState *s G_GNUC_UNUSED = aiocb->bs->opaque;
198134fa110eSKevin Wolf
198234fa110eSKevin Wolf /* First try to write zeros and unmap at the same time */
198334fa110eSKevin Wolf
198434fa110eSKevin Wolf #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
1985b3ac2b94SSimran Singhal int ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
198634fa110eSKevin Wolf aiocb->aio_offset, aiocb->aio_nbytes);
1987bae127d4SAntoine Damhet switch (ret) {
1988bae127d4SAntoine Damhet case -ENOTSUP:
1989bae127d4SAntoine Damhet case -EINVAL:
1990ece4fa91SMaxim Levitsky case -EBUSY:
1991bae127d4SAntoine Damhet break;
1992bae127d4SAntoine Damhet default:
199334fa110eSKevin Wolf return ret;
199434fa110eSKevin Wolf }
199534fa110eSKevin Wolf #endif
199634fa110eSKevin Wolf
199734fa110eSKevin Wolf /* If we couldn't manage to unmap while guaranteed that the area reads as
199834fa110eSKevin Wolf * all-zero afterwards, just write zeroes without unmapping */
1999b3ac2b94SSimran Singhal return handle_aiocb_write_zeroes(aiocb);
200034fa110eSKevin Wolf }
200134fa110eSKevin Wolf
20021efad060SFam Zheng #ifndef HAVE_COPY_FILE_RANGE
copy_file_range(int in_fd,off_t * in_off,int out_fd,off_t * out_off,size_t len,unsigned int flags)20031efad060SFam Zheng static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd,
20041efad060SFam Zheng off_t *out_off, size_t len, unsigned int flags)
20051efad060SFam Zheng {
20061efad060SFam Zheng #ifdef __NR_copy_file_range
20071efad060SFam Zheng return syscall(__NR_copy_file_range, in_fd, in_off, out_fd,
20081efad060SFam Zheng out_off, len, flags);
20091efad060SFam Zheng #else
20101efad060SFam Zheng errno = ENOSYS;
20111efad060SFam Zheng return -1;
20121efad060SFam Zheng #endif
20131efad060SFam Zheng }
20141efad060SFam Zheng #endif
20151efad060SFam Zheng
20166d43eaa3SSam Li /*
20176d43eaa3SSam Li * parse_zone - Fill a zone descriptor
20186d43eaa3SSam Li */
20196d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
parse_zone(struct BlockZoneDescriptor * zone,const struct blk_zone * blkz)20206d43eaa3SSam Li static inline int parse_zone(struct BlockZoneDescriptor *zone,
20216d43eaa3SSam Li const struct blk_zone *blkz) {
20226d43eaa3SSam Li zone->start = blkz->start << BDRV_SECTOR_BITS;
20236d43eaa3SSam Li zone->length = blkz->len << BDRV_SECTOR_BITS;
20246d43eaa3SSam Li zone->wp = blkz->wp << BDRV_SECTOR_BITS;
20256d43eaa3SSam Li
20266d43eaa3SSam Li #ifdef HAVE_BLK_ZONE_REP_CAPACITY
20276d43eaa3SSam Li zone->cap = blkz->capacity << BDRV_SECTOR_BITS;
20286d43eaa3SSam Li #else
20296d43eaa3SSam Li zone->cap = blkz->len << BDRV_SECTOR_BITS;
20306d43eaa3SSam Li #endif
20316d43eaa3SSam Li
20326d43eaa3SSam Li switch (blkz->type) {
20336d43eaa3SSam Li case BLK_ZONE_TYPE_SEQWRITE_REQ:
20346d43eaa3SSam Li zone->type = BLK_ZT_SWR;
20356d43eaa3SSam Li break;
20366d43eaa3SSam Li case BLK_ZONE_TYPE_SEQWRITE_PREF:
20376d43eaa3SSam Li zone->type = BLK_ZT_SWP;
20386d43eaa3SSam Li break;
20396d43eaa3SSam Li case BLK_ZONE_TYPE_CONVENTIONAL:
20406d43eaa3SSam Li zone->type = BLK_ZT_CONV;
20416d43eaa3SSam Li break;
20426d43eaa3SSam Li default:
20436d43eaa3SSam Li error_report("Unsupported zone type: 0x%x", blkz->type);
20446d43eaa3SSam Li return -ENOTSUP;
20456d43eaa3SSam Li }
20466d43eaa3SSam Li
20476d43eaa3SSam Li switch (blkz->cond) {
20486d43eaa3SSam Li case BLK_ZONE_COND_NOT_WP:
20496d43eaa3SSam Li zone->state = BLK_ZS_NOT_WP;
20506d43eaa3SSam Li break;
20516d43eaa3SSam Li case BLK_ZONE_COND_EMPTY:
20526d43eaa3SSam Li zone->state = BLK_ZS_EMPTY;
20536d43eaa3SSam Li break;
20546d43eaa3SSam Li case BLK_ZONE_COND_IMP_OPEN:
20556d43eaa3SSam Li zone->state = BLK_ZS_IOPEN;
20566d43eaa3SSam Li break;
20576d43eaa3SSam Li case BLK_ZONE_COND_EXP_OPEN:
20586d43eaa3SSam Li zone->state = BLK_ZS_EOPEN;
20596d43eaa3SSam Li break;
20606d43eaa3SSam Li case BLK_ZONE_COND_CLOSED:
20616d43eaa3SSam Li zone->state = BLK_ZS_CLOSED;
20626d43eaa3SSam Li break;
20636d43eaa3SSam Li case BLK_ZONE_COND_READONLY:
20646d43eaa3SSam Li zone->state = BLK_ZS_RDONLY;
20656d43eaa3SSam Li break;
20666d43eaa3SSam Li case BLK_ZONE_COND_FULL:
20676d43eaa3SSam Li zone->state = BLK_ZS_FULL;
20686d43eaa3SSam Li break;
20696d43eaa3SSam Li case BLK_ZONE_COND_OFFLINE:
20706d43eaa3SSam Li zone->state = BLK_ZS_OFFLINE;
20716d43eaa3SSam Li break;
20726d43eaa3SSam Li default:
20736d43eaa3SSam Li error_report("Unsupported zone state: 0x%x", blkz->cond);
20746d43eaa3SSam Li return -ENOTSUP;
20756d43eaa3SSam Li }
20766d43eaa3SSam Li return 0;
20776d43eaa3SSam Li }
20786d43eaa3SSam Li #endif
20796d43eaa3SSam Li
20806d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
handle_aiocb_zone_report(void * opaque)20816d43eaa3SSam Li static int handle_aiocb_zone_report(void *opaque)
20826d43eaa3SSam Li {
20836d43eaa3SSam Li RawPosixAIOData *aiocb = opaque;
20846d43eaa3SSam Li int fd = aiocb->aio_fildes;
20856d43eaa3SSam Li unsigned int *nr_zones = aiocb->zone_report.nr_zones;
20866d43eaa3SSam Li BlockZoneDescriptor *zones = aiocb->zone_report.zones;
20876d43eaa3SSam Li /* zoned block devices use 512-byte sectors */
20886d43eaa3SSam Li uint64_t sector = aiocb->aio_offset / 512;
20896d43eaa3SSam Li
20906d43eaa3SSam Li struct blk_zone *blkz;
20916d43eaa3SSam Li size_t rep_size;
20926d43eaa3SSam Li unsigned int nrz;
20936d43eaa3SSam Li int ret;
20946d43eaa3SSam Li unsigned int n = 0, i = 0;
20956d43eaa3SSam Li
20966d43eaa3SSam Li nrz = *nr_zones;
20976d43eaa3SSam Li rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone);
20986d43eaa3SSam Li g_autofree struct blk_zone_report *rep = NULL;
20996d43eaa3SSam Li rep = g_malloc(rep_size);
21006d43eaa3SSam Li
21016d43eaa3SSam Li blkz = (struct blk_zone *)(rep + 1);
21026d43eaa3SSam Li while (n < nrz) {
21036d43eaa3SSam Li memset(rep, 0, rep_size);
21046d43eaa3SSam Li rep->sector = sector;
21056d43eaa3SSam Li rep->nr_zones = nrz - n;
21066d43eaa3SSam Li
21076d43eaa3SSam Li do {
21086d43eaa3SSam Li ret = ioctl(fd, BLKREPORTZONE, rep);
21096d43eaa3SSam Li } while (ret != 0 && errno == EINTR);
21106d43eaa3SSam Li if (ret != 0) {
21116d43eaa3SSam Li error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d",
21126d43eaa3SSam Li fd, sector, errno);
21136d43eaa3SSam Li return -errno;
21146d43eaa3SSam Li }
21156d43eaa3SSam Li
21166d43eaa3SSam Li if (!rep->nr_zones) {
21176d43eaa3SSam Li break;
21186d43eaa3SSam Li }
21196d43eaa3SSam Li
21206d43eaa3SSam Li for (i = 0; i < rep->nr_zones; i++, n++) {
21216d43eaa3SSam Li ret = parse_zone(&zones[n], &blkz[i]);
21226d43eaa3SSam Li if (ret != 0) {
21236d43eaa3SSam Li return ret;
21246d43eaa3SSam Li }
21256d43eaa3SSam Li
21266d43eaa3SSam Li /* The next report should start after the last zone reported */
21276d43eaa3SSam Li sector = blkz[i].start + blkz[i].len;
21286d43eaa3SSam Li }
21296d43eaa3SSam Li }
21306d43eaa3SSam Li
21316d43eaa3SSam Li *nr_zones = n;
21326d43eaa3SSam Li return 0;
21336d43eaa3SSam Li }
21346d43eaa3SSam Li #endif
21356d43eaa3SSam Li
21366d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
handle_aiocb_zone_mgmt(void * opaque)21376d43eaa3SSam Li static int handle_aiocb_zone_mgmt(void *opaque)
21386d43eaa3SSam Li {
21396d43eaa3SSam Li RawPosixAIOData *aiocb = opaque;
21406d43eaa3SSam Li int fd = aiocb->aio_fildes;
21416d43eaa3SSam Li uint64_t sector = aiocb->aio_offset / 512;
21426d43eaa3SSam Li int64_t nr_sectors = aiocb->aio_nbytes / 512;
21436d43eaa3SSam Li struct blk_zone_range range;
21446d43eaa3SSam Li int ret;
21456d43eaa3SSam Li
21466d43eaa3SSam Li /* Execute the operation */
21476d43eaa3SSam Li range.sector = sector;
21486d43eaa3SSam Li range.nr_sectors = nr_sectors;
21496d43eaa3SSam Li do {
21506d43eaa3SSam Li ret = ioctl(fd, aiocb->zone_mgmt.op, &range);
21516d43eaa3SSam Li } while (ret != 0 && errno == EINTR);
21526d43eaa3SSam Li
2153a3c41f06SSam Li return ret < 0 ? -errno : ret;
21546d43eaa3SSam Li }
21556d43eaa3SSam Li #endif
21566d43eaa3SSam Li
handle_aiocb_copy_range(void * opaque)215758a209c4SKevin Wolf static int handle_aiocb_copy_range(void *opaque)
21581efad060SFam Zheng {
215958a209c4SKevin Wolf RawPosixAIOData *aiocb = opaque;
21601efad060SFam Zheng uint64_t bytes = aiocb->aio_nbytes;
21611efad060SFam Zheng off_t in_off = aiocb->aio_offset;
2162d57c44d0SKevin Wolf off_t out_off = aiocb->copy_range.aio_offset2;
21631efad060SFam Zheng
21641efad060SFam Zheng while (bytes) {
21651efad060SFam Zheng ssize_t ret = copy_file_range(aiocb->aio_fildes, &in_off,
2166d57c44d0SKevin Wolf aiocb->copy_range.aio_fd2, &out_off,
21671efad060SFam Zheng bytes, 0);
2168ecc983a5SFam Zheng trace_file_copy_file_range(aiocb->bs, aiocb->aio_fildes, in_off,
2169d57c44d0SKevin Wolf aiocb->copy_range.aio_fd2, out_off, bytes,
2170d57c44d0SKevin Wolf 0, ret);
2171c436e3d0SFam Zheng if (ret == 0) {
2172c436e3d0SFam Zheng /* No progress (e.g. when beyond EOF), let the caller fall back to
2173c436e3d0SFam Zheng * buffer I/O. */
2174c436e3d0SFam Zheng return -ENOSPC;
21751efad060SFam Zheng }
21761efad060SFam Zheng if (ret < 0) {
2177c436e3d0SFam Zheng switch (errno) {
2178c436e3d0SFam Zheng case ENOSYS:
21791efad060SFam Zheng return -ENOTSUP;
2180c436e3d0SFam Zheng case EINTR:
2181c436e3d0SFam Zheng continue;
2182c436e3d0SFam Zheng default:
21831efad060SFam Zheng return -errno;
21841efad060SFam Zheng }
21851efad060SFam Zheng }
21861efad060SFam Zheng bytes -= ret;
21871efad060SFam Zheng }
21881efad060SFam Zheng return 0;
21891efad060SFam Zheng }
21901efad060SFam Zheng
handle_aiocb_discard(void * opaque)219146ee0f46SKevin Wolf static int handle_aiocb_discard(void *opaque)
2192c1bb86cdSEric Blake {
219346ee0f46SKevin Wolf RawPosixAIOData *aiocb = opaque;
219413a02833SAri Sundholm int ret = -ENOTSUP;
2195c1bb86cdSEric Blake BDRVRawState *s = aiocb->bs->opaque;
2196c1bb86cdSEric Blake
2197c1bb86cdSEric Blake if (!s->has_discard) {
2198c1bb86cdSEric Blake return -ENOTSUP;
2199c1bb86cdSEric Blake }
2200c1bb86cdSEric Blake
2201c1bb86cdSEric Blake if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
2202c1bb86cdSEric Blake #ifdef BLKDISCARD
2203c1bb86cdSEric Blake do {
2204c1bb86cdSEric Blake uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
2205c1bb86cdSEric Blake if (ioctl(aiocb->aio_fildes, BLKDISCARD, range) == 0) {
2206c1bb86cdSEric Blake return 0;
2207c1bb86cdSEric Blake }
2208c1bb86cdSEric Blake } while (errno == EINTR);
2209c1bb86cdSEric Blake
22100dfc7af2SAkihiko Odaki ret = translate_err(-errno);
2211c1bb86cdSEric Blake #endif
2212c1bb86cdSEric Blake } else {
2213c1bb86cdSEric Blake #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
2214c1bb86cdSEric Blake ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
2215c1bb86cdSEric Blake aiocb->aio_offset, aiocb->aio_nbytes);
221613a02833SAri Sundholm ret = translate_err(ret);
22170dfc7af2SAkihiko Odaki #elif defined(__APPLE__) && (__MACH__)
22180dfc7af2SAkihiko Odaki fpunchhole_t fpunchhole;
22190dfc7af2SAkihiko Odaki fpunchhole.fp_flags = 0;
22200dfc7af2SAkihiko Odaki fpunchhole.reserved = 0;
22210dfc7af2SAkihiko Odaki fpunchhole.fp_offset = aiocb->aio_offset;
22220dfc7af2SAkihiko Odaki fpunchhole.fp_length = aiocb->aio_nbytes;
22230dfc7af2SAkihiko Odaki if (fcntl(s->fd, F_PUNCHHOLE, &fpunchhole) == -1) {
22240dfc7af2SAkihiko Odaki ret = errno == ENODEV ? -ENOTSUP : -errno;
22250dfc7af2SAkihiko Odaki } else {
22260dfc7af2SAkihiko Odaki ret = 0;
22270dfc7af2SAkihiko Odaki }
2228c1bb86cdSEric Blake #endif
2229c1bb86cdSEric Blake }
2230c1bb86cdSEric Blake
2231c1bb86cdSEric Blake if (ret == -ENOTSUP) {
2232c1bb86cdSEric Blake s->has_discard = false;
2233c1bb86cdSEric Blake }
2234c1bb86cdSEric Blake return ret;
2235c1bb86cdSEric Blake }
2236c1bb86cdSEric Blake
22373a20013fSNir Soffer /*
22383a20013fSNir Soffer * Help alignment probing by allocating the first block.
22393a20013fSNir Soffer *
22403a20013fSNir Soffer * When reading with direct I/O from unallocated area on Gluster backed by XFS,
22413a20013fSNir Soffer * reading succeeds regardless of request length. In this case we fallback to
22423a20013fSNir Soffer * safe alignment which is not optimal. Allocating the first block avoids this
22433a20013fSNir Soffer * fallback.
22443a20013fSNir Soffer *
22453a20013fSNir Soffer * fd may be opened with O_DIRECT, but we don't know the buffer alignment or
22463a20013fSNir Soffer * request alignment, so we use safe values.
22473a20013fSNir Soffer *
22483a20013fSNir Soffer * Returns: 0 on success, -errno on failure. Since this is an optimization,
22493a20013fSNir Soffer * caller may ignore failures.
22503a20013fSNir Soffer */
allocate_first_block(int fd,size_t max_size)22513a20013fSNir Soffer static int allocate_first_block(int fd, size_t max_size)
22523a20013fSNir Soffer {
22533a20013fSNir Soffer size_t write_size = (max_size < MAX_BLOCKSIZE)
22543a20013fSNir Soffer ? BDRV_SECTOR_SIZE
22553a20013fSNir Soffer : MAX_BLOCKSIZE;
22568e3b0cbbSMarc-André Lureau size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
22573a20013fSNir Soffer void *buf;
22583a20013fSNir Soffer ssize_t n;
22593a20013fSNir Soffer int ret;
22603a20013fSNir Soffer
22613a20013fSNir Soffer buf = qemu_memalign(max_align, write_size);
22623a20013fSNir Soffer memset(buf, 0, write_size);
22633a20013fSNir Soffer
226437b0b24eSNikita Ivanov n = RETRY_ON_EINTR(pwrite(fd, buf, write_size, 0));
22653a20013fSNir Soffer
22663a20013fSNir Soffer ret = (n == -1) ? -errno : 0;
22673a20013fSNir Soffer
22683a20013fSNir Soffer qemu_vfree(buf);
22693a20013fSNir Soffer return ret;
22703a20013fSNir Soffer }
22713a20013fSNir Soffer
handle_aiocb_truncate(void * opaque)227229cb4c01SKevin Wolf static int handle_aiocb_truncate(void *opaque)
227393f4e2ffSKevin Wolf {
227429cb4c01SKevin Wolf RawPosixAIOData *aiocb = opaque;
227593f4e2ffSKevin Wolf int result = 0;
227693f4e2ffSKevin Wolf int64_t current_length = 0;
227793f4e2ffSKevin Wolf char *buf = NULL;
227893f4e2ffSKevin Wolf struct stat st;
227993f4e2ffSKevin Wolf int fd = aiocb->aio_fildes;
228093f4e2ffSKevin Wolf int64_t offset = aiocb->aio_offset;
2281d57c44d0SKevin Wolf PreallocMode prealloc = aiocb->truncate.prealloc;
2282d57c44d0SKevin Wolf Error **errp = aiocb->truncate.errp;
228393f4e2ffSKevin Wolf
228493f4e2ffSKevin Wolf if (fstat(fd, &st) < 0) {
228593f4e2ffSKevin Wolf result = -errno;
228693f4e2ffSKevin Wolf error_setg_errno(errp, -result, "Could not stat file");
228793f4e2ffSKevin Wolf return result;
228893f4e2ffSKevin Wolf }
228993f4e2ffSKevin Wolf
229093f4e2ffSKevin Wolf current_length = st.st_size;
2291d57c44d0SKevin Wolf if (current_length > offset && prealloc != PREALLOC_MODE_OFF) {
229293f4e2ffSKevin Wolf error_setg(errp, "Cannot use preallocation for shrinking files");
229393f4e2ffSKevin Wolf return -ENOTSUP;
229493f4e2ffSKevin Wolf }
229593f4e2ffSKevin Wolf
2296d57c44d0SKevin Wolf switch (prealloc) {
229793f4e2ffSKevin Wolf #ifdef CONFIG_POSIX_FALLOCATE
229893f4e2ffSKevin Wolf case PREALLOC_MODE_FALLOC:
229993f4e2ffSKevin Wolf /*
230093f4e2ffSKevin Wolf * Truncating before posix_fallocate() makes it about twice slower on
230193f4e2ffSKevin Wolf * file systems that do not support fallocate(), trying to check if a
230293f4e2ffSKevin Wolf * block is allocated before allocating it, so don't do that here.
230393f4e2ffSKevin Wolf */
230493f4e2ffSKevin Wolf if (offset != current_length) {
230593f4e2ffSKevin Wolf result = -posix_fallocate(fd, current_length,
230693f4e2ffSKevin Wolf offset - current_length);
230793f4e2ffSKevin Wolf if (result != 0) {
230893f4e2ffSKevin Wolf /* posix_fallocate() doesn't set errno. */
230993f4e2ffSKevin Wolf error_setg_errno(errp, -result,
231093f4e2ffSKevin Wolf "Could not preallocate new data");
23113a20013fSNir Soffer } else if (current_length == 0) {
23123a20013fSNir Soffer /*
23133a20013fSNir Soffer * posix_fallocate() uses fallocate() if the filesystem
23143a20013fSNir Soffer * supports it, or fallback to manually writing zeroes. If
23153a20013fSNir Soffer * fallocate() was used, unaligned reads from the fallocated
23163a20013fSNir Soffer * area in raw_probe_alignment() will succeed, hence we need to
23173a20013fSNir Soffer * allocate the first block.
23183a20013fSNir Soffer *
23193a20013fSNir Soffer * Optimize future alignment probing; ignore failures.
23203a20013fSNir Soffer */
23213a20013fSNir Soffer allocate_first_block(fd, offset);
232293f4e2ffSKevin Wolf }
232393f4e2ffSKevin Wolf } else {
232493f4e2ffSKevin Wolf result = 0;
232593f4e2ffSKevin Wolf }
232693f4e2ffSKevin Wolf goto out;
232793f4e2ffSKevin Wolf #endif
232893f4e2ffSKevin Wolf case PREALLOC_MODE_FULL:
232993f4e2ffSKevin Wolf {
233093f4e2ffSKevin Wolf int64_t num = 0, left = offset - current_length;
233193f4e2ffSKevin Wolf off_t seek_result;
233293f4e2ffSKevin Wolf
233393f4e2ffSKevin Wolf /*
233493f4e2ffSKevin Wolf * Knowing the final size from the beginning could allow the file
233593f4e2ffSKevin Wolf * system driver to do less allocations and possibly avoid
233693f4e2ffSKevin Wolf * fragmentation of the file.
233793f4e2ffSKevin Wolf */
233893f4e2ffSKevin Wolf if (ftruncate(fd, offset) != 0) {
233993f4e2ffSKevin Wolf result = -errno;
234093f4e2ffSKevin Wolf error_setg_errno(errp, -result, "Could not resize file");
234193f4e2ffSKevin Wolf goto out;
234293f4e2ffSKevin Wolf }
234393f4e2ffSKevin Wolf
234493f4e2ffSKevin Wolf buf = g_malloc0(65536);
234593f4e2ffSKevin Wolf
234693f4e2ffSKevin Wolf seek_result = lseek(fd, current_length, SEEK_SET);
234793f4e2ffSKevin Wolf if (seek_result < 0) {
234893f4e2ffSKevin Wolf result = -errno;
234993f4e2ffSKevin Wolf error_setg_errno(errp, -result,
235093f4e2ffSKevin Wolf "Failed to seek to the old end of file");
235193f4e2ffSKevin Wolf goto out;
235293f4e2ffSKevin Wolf }
235393f4e2ffSKevin Wolf
235493f4e2ffSKevin Wolf while (left > 0) {
235593f4e2ffSKevin Wolf num = MIN(left, 65536);
235693f4e2ffSKevin Wolf result = write(fd, buf, num);
235793f4e2ffSKevin Wolf if (result < 0) {
2358a1c81f4fSFam Zheng if (errno == EINTR) {
2359a1c81f4fSFam Zheng continue;
2360a1c81f4fSFam Zheng }
236193f4e2ffSKevin Wolf result = -errno;
236293f4e2ffSKevin Wolf error_setg_errno(errp, -result,
236393f4e2ffSKevin Wolf "Could not write zeros for preallocation");
236493f4e2ffSKevin Wolf goto out;
236593f4e2ffSKevin Wolf }
236693f4e2ffSKevin Wolf left -= result;
236793f4e2ffSKevin Wolf }
236893f4e2ffSKevin Wolf if (result >= 0) {
236993f4e2ffSKevin Wolf result = fsync(fd);
237093f4e2ffSKevin Wolf if (result < 0) {
237193f4e2ffSKevin Wolf result = -errno;
237293f4e2ffSKevin Wolf error_setg_errno(errp, -result,
237393f4e2ffSKevin Wolf "Could not flush file to disk");
237493f4e2ffSKevin Wolf goto out;
237593f4e2ffSKevin Wolf }
237693f4e2ffSKevin Wolf }
237793f4e2ffSKevin Wolf goto out;
237893f4e2ffSKevin Wolf }
237993f4e2ffSKevin Wolf case PREALLOC_MODE_OFF:
238093f4e2ffSKevin Wolf if (ftruncate(fd, offset) != 0) {
238193f4e2ffSKevin Wolf result = -errno;
238293f4e2ffSKevin Wolf error_setg_errno(errp, -result, "Could not resize file");
23833a20013fSNir Soffer } else if (current_length == 0 && offset > current_length) {
23843a20013fSNir Soffer /* Optimize future alignment probing; ignore failures. */
23853a20013fSNir Soffer allocate_first_block(fd, offset);
238693f4e2ffSKevin Wolf }
238793f4e2ffSKevin Wolf return result;
238893f4e2ffSKevin Wolf default:
238993f4e2ffSKevin Wolf result = -ENOTSUP;
239093f4e2ffSKevin Wolf error_setg(errp, "Unsupported preallocation mode: %s",
2391d57c44d0SKevin Wolf PreallocMode_str(prealloc));
239293f4e2ffSKevin Wolf return result;
239393f4e2ffSKevin Wolf }
239493f4e2ffSKevin Wolf
239593f4e2ffSKevin Wolf out:
239693f4e2ffSKevin Wolf if (result < 0) {
239793f4e2ffSKevin Wolf if (ftruncate(fd, current_length) < 0) {
239893f4e2ffSKevin Wolf error_report("Failed to restore old file length: %s",
239993f4e2ffSKevin Wolf strerror(errno));
240093f4e2ffSKevin Wolf }
240193f4e2ffSKevin Wolf }
240293f4e2ffSKevin Wolf
240393f4e2ffSKevin Wolf g_free(buf);
240493f4e2ffSKevin Wolf return result;
240593f4e2ffSKevin Wolf }
240693f4e2ffSKevin Wolf
raw_thread_pool_submit(ThreadPoolFunc func,void * arg)24070fdb7311SEmanuele Giuseppe Esposito static int coroutine_fn raw_thread_pool_submit(ThreadPoolFunc func, void *arg)
24085d5de250SKevin Wolf {
2409aef04fc7SEmanuele Giuseppe Esposito return thread_pool_submit_co(func, arg);
24105d5de250SKevin Wolf }
24115d5de250SKevin Wolf
2412a7c5f67aSKeith Busch /*
2413a7c5f67aSKeith Busch * Check if all memory in this vector is sector aligned.
2414a7c5f67aSKeith Busch */
bdrv_qiov_is_aligned(BlockDriverState * bs,QEMUIOVector * qiov)2415a7c5f67aSKeith Busch static bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2416a7c5f67aSKeith Busch {
2417a7c5f67aSKeith Busch int i;
2418a7c5f67aSKeith Busch size_t alignment = bdrv_min_mem_align(bs);
241925474d90SKeith Busch size_t len = bs->bl.request_alignment;
2420a7c5f67aSKeith Busch IO_CODE();
2421a7c5f67aSKeith Busch
2422a7c5f67aSKeith Busch for (i = 0; i < qiov->niov; i++) {
2423a7c5f67aSKeith Busch if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2424a7c5f67aSKeith Busch return false;
2425a7c5f67aSKeith Busch }
242625474d90SKeith Busch if (qiov->iov[i].iov_len % len) {
2427a7c5f67aSKeith Busch return false;
2428a7c5f67aSKeith Busch }
2429a7c5f67aSKeith Busch }
2430a7c5f67aSKeith Busch
2431a7c5f67aSKeith Busch return true;
2432a7c5f67aSKeith Busch }
2433a7c5f67aSKeith Busch
2434*cd0c0db0SStefan Hajnoczi #ifdef CONFIG_LINUX_IO_URING
raw_check_linux_io_uring(BDRVRawState * s)2435*cd0c0db0SStefan Hajnoczi static inline bool raw_check_linux_io_uring(BDRVRawState *s)
2436*cd0c0db0SStefan Hajnoczi {
2437*cd0c0db0SStefan Hajnoczi Error *local_err = NULL;
2438*cd0c0db0SStefan Hajnoczi AioContext *ctx;
2439*cd0c0db0SStefan Hajnoczi
2440*cd0c0db0SStefan Hajnoczi if (!s->use_linux_io_uring) {
2441*cd0c0db0SStefan Hajnoczi return false;
2442*cd0c0db0SStefan Hajnoczi }
2443*cd0c0db0SStefan Hajnoczi
2444*cd0c0db0SStefan Hajnoczi ctx = qemu_get_current_aio_context();
2445*cd0c0db0SStefan Hajnoczi if (unlikely(!aio_setup_linux_io_uring(ctx, &local_err))) {
2446*cd0c0db0SStefan Hajnoczi error_reportf_err(local_err, "Unable to use linux io_uring, "
2447*cd0c0db0SStefan Hajnoczi "falling back to thread pool: ");
2448*cd0c0db0SStefan Hajnoczi s->use_linux_io_uring = false;
2449*cd0c0db0SStefan Hajnoczi return false;
2450*cd0c0db0SStefan Hajnoczi }
2451*cd0c0db0SStefan Hajnoczi return true;
2452*cd0c0db0SStefan Hajnoczi }
2453*cd0c0db0SStefan Hajnoczi #endif
2454*cd0c0db0SStefan Hajnoczi
2455*cd0c0db0SStefan Hajnoczi #ifdef CONFIG_LINUX_AIO
raw_check_linux_aio(BDRVRawState * s)2456*cd0c0db0SStefan Hajnoczi static inline bool raw_check_linux_aio(BDRVRawState *s)
2457*cd0c0db0SStefan Hajnoczi {
2458*cd0c0db0SStefan Hajnoczi Error *local_err = NULL;
2459*cd0c0db0SStefan Hajnoczi AioContext *ctx;
2460*cd0c0db0SStefan Hajnoczi
2461*cd0c0db0SStefan Hajnoczi if (!s->use_linux_aio) {
2462*cd0c0db0SStefan Hajnoczi return false;
2463*cd0c0db0SStefan Hajnoczi }
2464*cd0c0db0SStefan Hajnoczi
2465*cd0c0db0SStefan Hajnoczi ctx = qemu_get_current_aio_context();
2466*cd0c0db0SStefan Hajnoczi if (unlikely(!aio_setup_linux_aio(ctx, &local_err))) {
2467*cd0c0db0SStefan Hajnoczi error_reportf_err(local_err, "Unable to use Linux AIO, "
2468*cd0c0db0SStefan Hajnoczi "falling back to thread pool: ");
2469*cd0c0db0SStefan Hajnoczi s->use_linux_aio = false;
2470*cd0c0db0SStefan Hajnoczi return false;
2471*cd0c0db0SStefan Hajnoczi }
2472*cd0c0db0SStefan Hajnoczi return true;
2473*cd0c0db0SStefan Hajnoczi }
2474*cd0c0db0SStefan Hajnoczi #endif
2475*cd0c0db0SStefan Hajnoczi
raw_co_prw(BlockDriverState * bs,int64_t * offset_ptr,uint64_t bytes,QEMUIOVector * qiov,int type)2476ad4feacaSNaohiro Aota static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
2477c1bb86cdSEric Blake uint64_t bytes, QEMUIOVector *qiov, int type)
2478c1bb86cdSEric Blake {
2479c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2480999e6b69SKevin Wolf RawPosixAIOData acb;
2481a3c41f06SSam Li int ret;
2482ad4feacaSNaohiro Aota uint64_t offset = *offset_ptr;
2483c1bb86cdSEric Blake
2484c1bb86cdSEric Blake if (fd_open(bs) < 0)
2485c1bb86cdSEric Blake return -EIO;
2486a3c41f06SSam Li #if defined(CONFIG_BLKZONED)
24874b5d80f3SHanna Czenczek if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
24884b5d80f3SHanna Czenczek bs->bl.zoned != BLK_Z_NONE) {
2489a3c41f06SSam Li qemu_co_mutex_lock(&bs->wps->colock);
24904b5d80f3SHanna Czenczek if (type & QEMU_AIO_ZONE_APPEND) {
24914751d09aSSam Li int index = offset / bs->bl.zone_size;
24924751d09aSSam Li offset = bs->wps->wp[index];
24934751d09aSSam Li }
2494a3c41f06SSam Li }
2495a3c41f06SSam Li #endif
2496c1bb86cdSEric Blake
2497c1bb86cdSEric Blake /*
2498c6447510SAarushi Mehta * When using O_DIRECT, the request must be aligned to be able to use
2499c6447510SAarushi Mehta * either libaio or io_uring interface. If not fail back to regular thread
2500c6447510SAarushi Mehta * pool read/write code which emulates this for us if we
2501c6447510SAarushi Mehta * set QEMU_AIO_MISALIGNED.
2502c1bb86cdSEric Blake */
2503c6447510SAarushi Mehta if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) {
2504c1bb86cdSEric Blake type |= QEMU_AIO_MISALIGNED;
2505c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
2506*cd0c0db0SStefan Hajnoczi } else if (raw_check_linux_io_uring(s)) {
2507c6447510SAarushi Mehta assert(qiov->size == bytes);
2508a3c41f06SSam Li ret = luring_co_submit(bs, s->fd, offset, qiov, type);
2509a3c41f06SSam Li goto out;
2510c6447510SAarushi Mehta #endif
2511c1bb86cdSEric Blake #ifdef CONFIG_LINUX_AIO
2512*cd0c0db0SStefan Hajnoczi } else if (raw_check_linux_aio(s)) {
2513c1bb86cdSEric Blake assert(qiov->size == bytes);
2514a3c41f06SSam Li ret = laio_co_submit(s->fd, offset, qiov, type,
2515a3c41f06SSam Li s->aio_max_batch);
2516a3c41f06SSam Li goto out;
2517c1bb86cdSEric Blake #endif
2518c1bb86cdSEric Blake }
2519c1bb86cdSEric Blake
2520999e6b69SKevin Wolf acb = (RawPosixAIOData) {
2521999e6b69SKevin Wolf .bs = bs,
2522999e6b69SKevin Wolf .aio_fildes = s->fd,
2523999e6b69SKevin Wolf .aio_type = type,
2524999e6b69SKevin Wolf .aio_offset = offset,
2525999e6b69SKevin Wolf .aio_nbytes = bytes,
2526999e6b69SKevin Wolf .io = {
2527999e6b69SKevin Wolf .iov = qiov->iov,
2528999e6b69SKevin Wolf .niov = qiov->niov,
2529999e6b69SKevin Wolf },
2530999e6b69SKevin Wolf };
2531999e6b69SKevin Wolf
2532999e6b69SKevin Wolf assert(qiov->size == bytes);
2533a3c41f06SSam Li ret = raw_thread_pool_submit(handle_aiocb_rw, &acb);
2534a3c41f06SSam Li goto out; /* Avoid the compiler err of unused label */
2535a3c41f06SSam Li
2536a3c41f06SSam Li out:
2537a3c41f06SSam Li #if defined(CONFIG_BLKZONED)
25384b5d80f3SHanna Czenczek if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
25394b5d80f3SHanna Czenczek bs->bl.zoned != BLK_Z_NONE) {
2540a3c41f06SSam Li BlockZoneWps *wps = bs->wps;
2541a3c41f06SSam Li if (ret == 0) {
2542a3c41f06SSam Li uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
2543a3c41f06SSam Li if (!BDRV_ZT_IS_CONV(*wp)) {
25444751d09aSSam Li if (type & QEMU_AIO_ZONE_APPEND) {
2545ad4feacaSNaohiro Aota *offset_ptr = *wp;
2546ad4feacaSNaohiro Aota trace_zbd_zone_append_complete(bs, *offset_ptr
25476c811e19SSam Li >> BDRV_SECTOR_BITS);
25484751d09aSSam Li }
2549a3c41f06SSam Li /* Advance the wp if needed */
2550a3c41f06SSam Li if (offset + bytes > *wp) {
2551a3c41f06SSam Li *wp = offset + bytes;
2552a3c41f06SSam Li }
2553a3c41f06SSam Li }
2554a3c41f06SSam Li } else {
255510b9e080SSam Li /*
255610b9e080SSam Li * write and append write are not allowed to cross zone boundaries
255710b9e080SSam Li */
255810b9e080SSam Li update_zones_wp(bs, s->fd, offset, 1);
2559a3c41f06SSam Li }
2560a3c41f06SSam Li
2561a3c41f06SSam Li qemu_co_mutex_unlock(&wps->colock);
2562a3c41f06SSam Li }
2563a3c41f06SSam Li #endif
2564a3c41f06SSam Li return ret;
2565c1bb86cdSEric Blake }
2566c1bb86cdSEric Blake
raw_co_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)2567f7ef38ddSVladimir Sementsov-Ogievskiy static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
2568f7ef38ddSVladimir Sementsov-Ogievskiy int64_t bytes, QEMUIOVector *qiov,
2569f7ef38ddSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags)
2570c1bb86cdSEric Blake {
2571ad4feacaSNaohiro Aota return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ);
2572c1bb86cdSEric Blake }
2573c1bb86cdSEric Blake
raw_co_pwritev(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)2574e75abedaSVladimir Sementsov-Ogievskiy static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
2575e75abedaSVladimir Sementsov-Ogievskiy int64_t bytes, QEMUIOVector *qiov,
2576e75abedaSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags)
2577c1bb86cdSEric Blake {
2578ad4feacaSNaohiro Aota return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE);
2579c1bb86cdSEric Blake }
2580c1bb86cdSEric Blake
raw_co_flush_to_disk(BlockDriverState * bs)2581dda56b75SPaolo Bonzini static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
2582c1bb86cdSEric Blake {
2583c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
258406dc9bd5SKevin Wolf RawPosixAIOData acb;
258533d70fb6SKevin Wolf int ret;
2586c1bb86cdSEric Blake
258733d70fb6SKevin Wolf ret = fd_open(bs);
258833d70fb6SKevin Wolf if (ret < 0) {
258933d70fb6SKevin Wolf return ret;
259033d70fb6SKevin Wolf }
2591c1bb86cdSEric Blake
259206dc9bd5SKevin Wolf acb = (RawPosixAIOData) {
259306dc9bd5SKevin Wolf .bs = bs,
259406dc9bd5SKevin Wolf .aio_fildes = s->fd,
259506dc9bd5SKevin Wolf .aio_type = QEMU_AIO_FLUSH,
259606dc9bd5SKevin Wolf };
259706dc9bd5SKevin Wolf
2598c6447510SAarushi Mehta #ifdef CONFIG_LINUX_IO_URING
2599*cd0c0db0SStefan Hajnoczi if (raw_check_linux_io_uring(s)) {
2600a75e4e43SEmanuele Giuseppe Esposito return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH);
2601c6447510SAarushi Mehta }
2602c6447510SAarushi Mehta #endif
26030fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_flush, &acb);
2604c1bb86cdSEric Blake }
2605c1bb86cdSEric Blake
raw_close(BlockDriverState * bs)2606c1bb86cdSEric Blake static void raw_close(BlockDriverState *bs)
2607c1bb86cdSEric Blake {
2608c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2609c1bb86cdSEric Blake
2610c1bb86cdSEric Blake if (s->fd >= 0) {
2611a3c41f06SSam Li #if defined(CONFIG_BLKZONED)
2612a3c41f06SSam Li g_free(bs->wps);
2613a3c41f06SSam Li #endif
2614c1bb86cdSEric Blake qemu_close(s->fd);
2615c1bb86cdSEric Blake s->fd = -1;
2616c1bb86cdSEric Blake }
2617c1bb86cdSEric Blake }
2618c1bb86cdSEric Blake
2619d0bc9e5dSMax Reitz /**
2620d0bc9e5dSMax Reitz * Truncates the given regular file @fd to @offset and, when growing, fills the
2621d0bc9e5dSMax Reitz * new space according to @prealloc.
2622d0bc9e5dSMax Reitz *
2623d0bc9e5dSMax Reitz * Returns: 0 on success, -errno on failure.
2624d0bc9e5dSMax Reitz */
262593f4e2ffSKevin Wolf static int coroutine_fn
raw_regular_truncate(BlockDriverState * bs,int fd,int64_t offset,PreallocMode prealloc,Error ** errp)262693f4e2ffSKevin Wolf raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset,
262793f4e2ffSKevin Wolf PreallocMode prealloc, Error **errp)
26289f63b07eSMax Reitz {
262929cb4c01SKevin Wolf RawPosixAIOData acb;
2630d0bc9e5dSMax Reitz
263129cb4c01SKevin Wolf acb = (RawPosixAIOData) {
263293f4e2ffSKevin Wolf .bs = bs,
263393f4e2ffSKevin Wolf .aio_fildes = fd,
263493f4e2ffSKevin Wolf .aio_type = QEMU_AIO_TRUNCATE,
263593f4e2ffSKevin Wolf .aio_offset = offset,
2636d57c44d0SKevin Wolf .truncate = {
263793f4e2ffSKevin Wolf .prealloc = prealloc,
263893f4e2ffSKevin Wolf .errp = errp,
2639d57c44d0SKevin Wolf },
264093f4e2ffSKevin Wolf };
2641d0bc9e5dSMax Reitz
26420fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_truncate, &acb);
26439f63b07eSMax Reitz }
26449f63b07eSMax Reitz
raw_co_truncate(BlockDriverState * bs,int64_t offset,bool exact,PreallocMode prealloc,BdrvRequestFlags flags,Error ** errp)2645061ca8a3SKevin Wolf static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
2646c80d8b06SMax Reitz bool exact, PreallocMode prealloc,
264792b92799SKevin Wolf BdrvRequestFlags flags, Error **errp)
2648c1bb86cdSEric Blake {
2649c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2650c1bb86cdSEric Blake struct stat st;
2651f59adb32SMax Reitz int ret;
2652c1bb86cdSEric Blake
2653c1bb86cdSEric Blake if (fstat(s->fd, &st)) {
2654f59adb32SMax Reitz ret = -errno;
2655f59adb32SMax Reitz error_setg_errno(errp, -ret, "Failed to fstat() the file");
2656f59adb32SMax Reitz return ret;
2657c1bb86cdSEric Blake }
2658c1bb86cdSEric Blake
2659c1bb86cdSEric Blake if (S_ISREG(st.st_mode)) {
266082325ae5SMax Reitz /* Always resizes to the exact @offset */
266193f4e2ffSKevin Wolf return raw_regular_truncate(bs, s->fd, offset, prealloc, errp);
2662c1bb86cdSEric Blake }
266335d72602SMax Reitz
266435d72602SMax Reitz if (prealloc != PREALLOC_MODE_OFF) {
266535d72602SMax Reitz error_setg(errp, "Preallocation mode '%s' unsupported for this "
2666977c736fSMarkus Armbruster "non-regular file", PreallocMode_str(prealloc));
266735d72602SMax Reitz return -ENOTSUP;
266835d72602SMax Reitz }
266935d72602SMax Reitz
267035d72602SMax Reitz if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
267136c6c877SPaolo Bonzini int64_t cur_length = raw_getlength(bs);
267282325ae5SMax Reitz
267382325ae5SMax Reitz if (offset != cur_length && exact) {
267482325ae5SMax Reitz error_setg(errp, "Cannot resize device files");
267582325ae5SMax Reitz return -ENOTSUP;
267682325ae5SMax Reitz } else if (offset > cur_length) {
2677f59adb32SMax Reitz error_setg(errp, "Cannot grow device files");
2678c1bb86cdSEric Blake return -EINVAL;
2679c1bb86cdSEric Blake }
2680c1bb86cdSEric Blake } else {
2681f59adb32SMax Reitz error_setg(errp, "Resizing this file is not supported");
2682c1bb86cdSEric Blake return -ENOTSUP;
2683c1bb86cdSEric Blake }
2684c1bb86cdSEric Blake
2685c1bb86cdSEric Blake return 0;
2686c1bb86cdSEric Blake }
2687c1bb86cdSEric Blake
2688c1bb86cdSEric Blake #ifdef __OpenBSD__
raw_getlength(BlockDriverState * bs)268936c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs)
2690c1bb86cdSEric Blake {
2691c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2692c1bb86cdSEric Blake int fd = s->fd;
2693c1bb86cdSEric Blake struct stat st;
2694c1bb86cdSEric Blake
2695c1bb86cdSEric Blake if (fstat(fd, &st))
2696c1bb86cdSEric Blake return -errno;
2697c1bb86cdSEric Blake if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
2698c1bb86cdSEric Blake struct disklabel dl;
2699c1bb86cdSEric Blake
2700c1bb86cdSEric Blake if (ioctl(fd, DIOCGDINFO, &dl))
2701c1bb86cdSEric Blake return -errno;
2702c1bb86cdSEric Blake return (uint64_t)dl.d_secsize *
2703c1bb86cdSEric Blake dl.d_partitions[DISKPART(st.st_rdev)].p_size;
2704c1bb86cdSEric Blake } else
2705c1bb86cdSEric Blake return st.st_size;
2706c1bb86cdSEric Blake }
2707c1bb86cdSEric Blake #elif defined(__NetBSD__)
raw_getlength(BlockDriverState * bs)270836c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs)
2709c1bb86cdSEric Blake {
2710c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2711c1bb86cdSEric Blake int fd = s->fd;
2712c1bb86cdSEric Blake struct stat st;
2713c1bb86cdSEric Blake
2714c1bb86cdSEric Blake if (fstat(fd, &st))
2715c1bb86cdSEric Blake return -errno;
2716c1bb86cdSEric Blake if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
2717c1bb86cdSEric Blake struct dkwedge_info dkw;
2718c1bb86cdSEric Blake
2719c1bb86cdSEric Blake if (ioctl(fd, DIOCGWEDGEINFO, &dkw) != -1) {
2720c1bb86cdSEric Blake return dkw.dkw_size * 512;
2721c1bb86cdSEric Blake } else {
2722c1bb86cdSEric Blake struct disklabel dl;
2723c1bb86cdSEric Blake
2724c1bb86cdSEric Blake if (ioctl(fd, DIOCGDINFO, &dl))
2725c1bb86cdSEric Blake return -errno;
2726c1bb86cdSEric Blake return (uint64_t)dl.d_secsize *
2727c1bb86cdSEric Blake dl.d_partitions[DISKPART(st.st_rdev)].p_size;
2728c1bb86cdSEric Blake }
2729c1bb86cdSEric Blake } else
2730c1bb86cdSEric Blake return st.st_size;
2731c1bb86cdSEric Blake }
2732c1bb86cdSEric Blake #elif defined(__sun__)
raw_getlength(BlockDriverState * bs)273336c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs)
2734c1bb86cdSEric Blake {
2735c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2736c1bb86cdSEric Blake struct dk_minfo minfo;
2737c1bb86cdSEric Blake int ret;
2738c1bb86cdSEric Blake int64_t size;
2739c1bb86cdSEric Blake
2740c1bb86cdSEric Blake ret = fd_open(bs);
2741c1bb86cdSEric Blake if (ret < 0) {
2742c1bb86cdSEric Blake return ret;
2743c1bb86cdSEric Blake }
2744c1bb86cdSEric Blake
2745c1bb86cdSEric Blake /*
2746c1bb86cdSEric Blake * Use the DKIOCGMEDIAINFO ioctl to read the size.
2747c1bb86cdSEric Blake */
2748c1bb86cdSEric Blake ret = ioctl(s->fd, DKIOCGMEDIAINFO, &minfo);
2749c1bb86cdSEric Blake if (ret != -1) {
2750c1bb86cdSEric Blake return minfo.dki_lbsize * minfo.dki_capacity;
2751c1bb86cdSEric Blake }
2752c1bb86cdSEric Blake
2753c1bb86cdSEric Blake /*
2754c1bb86cdSEric Blake * There are reports that lseek on some devices fails, but
2755c1bb86cdSEric Blake * irc discussion said that contingency on contingency was overkill.
2756c1bb86cdSEric Blake */
2757c1bb86cdSEric Blake size = lseek(s->fd, 0, SEEK_END);
2758c1bb86cdSEric Blake if (size < 0) {
2759c1bb86cdSEric Blake return -errno;
2760c1bb86cdSEric Blake }
2761c1bb86cdSEric Blake return size;
2762c1bb86cdSEric Blake }
2763c1bb86cdSEric Blake #elif defined(CONFIG_BSD)
raw_getlength(BlockDriverState * bs)276436c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs)
2765c1bb86cdSEric Blake {
2766c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2767c1bb86cdSEric Blake int fd = s->fd;
2768c1bb86cdSEric Blake int64_t size;
2769c1bb86cdSEric Blake struct stat sb;
2770c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
2771c1bb86cdSEric Blake int reopened = 0;
2772c1bb86cdSEric Blake #endif
2773c1bb86cdSEric Blake int ret;
2774c1bb86cdSEric Blake
2775c1bb86cdSEric Blake ret = fd_open(bs);
2776c1bb86cdSEric Blake if (ret < 0)
2777c1bb86cdSEric Blake return ret;
2778c1bb86cdSEric Blake
2779c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
2780c1bb86cdSEric Blake again:
2781c1bb86cdSEric Blake #endif
2782c1bb86cdSEric Blake if (!fstat(fd, &sb) && (S_IFCHR & sb.st_mode)) {
2783267cd53fSPaolo Bonzini size = 0;
2784c1bb86cdSEric Blake #ifdef DIOCGMEDIASIZE
2785267cd53fSPaolo Bonzini if (ioctl(fd, DIOCGMEDIASIZE, (off_t *)&size)) {
2786c1bb86cdSEric Blake size = 0;
2787c1bb86cdSEric Blake }
2788267cd53fSPaolo Bonzini #endif
2789267cd53fSPaolo Bonzini #ifdef DIOCGPART
2790267cd53fSPaolo Bonzini if (size == 0) {
2791267cd53fSPaolo Bonzini struct partinfo pi;
2792267cd53fSPaolo Bonzini if (ioctl(fd, DIOCGPART, &pi) == 0) {
2793267cd53fSPaolo Bonzini size = pi.media_size;
2794267cd53fSPaolo Bonzini }
2795267cd53fSPaolo Bonzini }
2796c1bb86cdSEric Blake #endif
279709e20abdSJoelle van Dyne #if defined(DKIOCGETBLOCKCOUNT) && defined(DKIOCGETBLOCKSIZE)
2798267cd53fSPaolo Bonzini if (size == 0) {
2799c1bb86cdSEric Blake uint64_t sectors = 0;
2800c1bb86cdSEric Blake uint32_t sector_size = 0;
2801c1bb86cdSEric Blake
2802c1bb86cdSEric Blake if (ioctl(fd, DKIOCGETBLOCKCOUNT, §ors) == 0
2803c1bb86cdSEric Blake && ioctl(fd, DKIOCGETBLOCKSIZE, §or_size) == 0) {
2804c1bb86cdSEric Blake size = sectors * sector_size;
2805c1bb86cdSEric Blake }
2806c1bb86cdSEric Blake }
2807c1bb86cdSEric Blake #endif
2808267cd53fSPaolo Bonzini if (size == 0) {
2809267cd53fSPaolo Bonzini size = lseek(fd, 0LL, SEEK_END);
2810267cd53fSPaolo Bonzini }
2811267cd53fSPaolo Bonzini if (size < 0) {
2812267cd53fSPaolo Bonzini return -errno;
2813267cd53fSPaolo Bonzini }
2814c1bb86cdSEric Blake #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
2815c1bb86cdSEric Blake switch(s->type) {
2816c1bb86cdSEric Blake case FTYPE_CD:
2817c1bb86cdSEric Blake /* XXX FreeBSD acd returns UINT_MAX sectors for an empty drive */
2818c1bb86cdSEric Blake if (size == 2048LL * (unsigned)-1)
2819c1bb86cdSEric Blake size = 0;
2820c1bb86cdSEric Blake /* XXX no disc? maybe we need to reopen... */
2821c1bb86cdSEric Blake if (size <= 0 && !reopened && cdrom_reopen(bs) >= 0) {
2822c1bb86cdSEric Blake reopened = 1;
2823c1bb86cdSEric Blake goto again;
2824c1bb86cdSEric Blake }
2825c1bb86cdSEric Blake }
2826c1bb86cdSEric Blake #endif
2827c1bb86cdSEric Blake } else {
2828c1bb86cdSEric Blake size = lseek(fd, 0, SEEK_END);
2829c1bb86cdSEric Blake if (size < 0) {
2830c1bb86cdSEric Blake return -errno;
2831c1bb86cdSEric Blake }
2832c1bb86cdSEric Blake }
2833c1bb86cdSEric Blake return size;
2834c1bb86cdSEric Blake }
2835c1bb86cdSEric Blake #else
raw_getlength(BlockDriverState * bs)283636c6c877SPaolo Bonzini static int64_t raw_getlength(BlockDriverState *bs)
2837c1bb86cdSEric Blake {
2838c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2839c1bb86cdSEric Blake int ret;
2840c1bb86cdSEric Blake int64_t size;
2841c1bb86cdSEric Blake
2842c1bb86cdSEric Blake ret = fd_open(bs);
2843c1bb86cdSEric Blake if (ret < 0) {
2844c1bb86cdSEric Blake return ret;
2845c1bb86cdSEric Blake }
2846c1bb86cdSEric Blake
2847c1bb86cdSEric Blake size = lseek(s->fd, 0, SEEK_END);
2848c1bb86cdSEric Blake if (size < 0) {
2849c1bb86cdSEric Blake return -errno;
2850c1bb86cdSEric Blake }
2851c1bb86cdSEric Blake return size;
2852c1bb86cdSEric Blake }
2853c1bb86cdSEric Blake #endif
2854c1bb86cdSEric Blake
raw_co_getlength(BlockDriverState * bs)285536c6c877SPaolo Bonzini static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
285636c6c877SPaolo Bonzini {
285736c6c877SPaolo Bonzini return raw_getlength(bs);
285836c6c877SPaolo Bonzini }
285936c6c877SPaolo Bonzini
raw_co_get_allocated_file_size(BlockDriverState * bs)286082618d7bSEmanuele Giuseppe Esposito static int64_t coroutine_fn raw_co_get_allocated_file_size(BlockDriverState *bs)
2861c1bb86cdSEric Blake {
2862c1bb86cdSEric Blake struct stat st;
2863c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
2864c1bb86cdSEric Blake
2865c1bb86cdSEric Blake if (fstat(s->fd, &st) < 0) {
2866c1bb86cdSEric Blake return -errno;
2867c1bb86cdSEric Blake }
2868c1bb86cdSEric Blake return (int64_t)st.st_blocks * 512;
2869c1bb86cdSEric Blake }
2870c1bb86cdSEric Blake
287193f4e2ffSKevin Wolf static int coroutine_fn
raw_co_create(BlockdevCreateOptions * options,Error ** errp)287293f4e2ffSKevin Wolf raw_co_create(BlockdevCreateOptions *options, Error **errp)
2873c1bb86cdSEric Blake {
2874927f11e1SKevin Wolf BlockdevCreateOptionsFile *file_opts;
28757c20c808SMax Reitz Error *local_err = NULL;
2876c1bb86cdSEric Blake int fd;
2877d815efcaSMax Reitz uint64_t perm, shared;
2878c1bb86cdSEric Blake int result = 0;
2879c1bb86cdSEric Blake
2880927f11e1SKevin Wolf /* Validate options and set default values */
2881927f11e1SKevin Wolf assert(options->driver == BLOCKDEV_DRIVER_FILE);
2882927f11e1SKevin Wolf file_opts = &options->u.file;
2883c1bb86cdSEric Blake
2884927f11e1SKevin Wolf if (!file_opts->has_nocow) {
2885927f11e1SKevin Wolf file_opts->nocow = false;
2886927f11e1SKevin Wolf }
2887927f11e1SKevin Wolf if (!file_opts->has_preallocation) {
2888927f11e1SKevin Wolf file_opts->preallocation = PREALLOC_MODE_OFF;
2889c1bb86cdSEric Blake }
2890ffa244c8SKevin Wolf if (!file_opts->has_extent_size_hint) {
2891ffa244c8SKevin Wolf file_opts->extent_size_hint = 1 * MiB;
2892ffa244c8SKevin Wolf }
2893ffa244c8SKevin Wolf if (file_opts->extent_size_hint > UINT32_MAX) {
2894ffa244c8SKevin Wolf result = -EINVAL;
2895ffa244c8SKevin Wolf error_setg(errp, "Extent size hint is too large");
2896ffa244c8SKevin Wolf goto out;
2897ffa244c8SKevin Wolf }
2898c1bb86cdSEric Blake
2899927f11e1SKevin Wolf /* Create file */
2900b18a24a9SDaniel P. Berrangé fd = qemu_create(file_opts->filename, O_RDWR | O_BINARY, 0644, errp);
2901c1bb86cdSEric Blake if (fd < 0) {
2902c1bb86cdSEric Blake result = -errno;
2903c1bb86cdSEric Blake goto out;
2904c1bb86cdSEric Blake }
2905c1bb86cdSEric Blake
2906b8cf1913SMax Reitz /* Take permissions: We want to discard everything, so we need
2907b8cf1913SMax Reitz * BLK_PERM_WRITE; and truncation to the desired size requires
2908b8cf1913SMax Reitz * BLK_PERM_RESIZE.
2909b8cf1913SMax Reitz * On the other hand, we cannot share the RESIZE permission
2910b8cf1913SMax Reitz * because we promise that after this function, the file has the
2911b8cf1913SMax Reitz * size given in the options. If someone else were to resize it
2912b8cf1913SMax Reitz * concurrently, we could not guarantee that.
2913b8cf1913SMax Reitz * Note that after this function, we can no longer guarantee that
2914b8cf1913SMax Reitz * the file is not touched by a third party, so it may be resized
2915b8cf1913SMax Reitz * then. */
2916b8cf1913SMax Reitz perm = BLK_PERM_WRITE | BLK_PERM_RESIZE;
2917b8cf1913SMax Reitz shared = BLK_PERM_ALL & ~BLK_PERM_RESIZE;
2918b8cf1913SMax Reitz
2919b8cf1913SMax Reitz /* Step one: Take locks */
29202996ffadSFam Zheng result = raw_apply_lock_bytes(NULL, fd, perm, ~shared, false, errp);
2921b8cf1913SMax Reitz if (result < 0) {
2922b8cf1913SMax Reitz goto out_close;
2923b8cf1913SMax Reitz }
2924b8cf1913SMax Reitz
2925b8cf1913SMax Reitz /* Step two: Check that nobody else has taken conflicting locks */
2926b8cf1913SMax Reitz result = raw_check_lock_bytes(fd, perm, shared, errp);
2927b8cf1913SMax Reitz if (result < 0) {
2928b857431dSFam Zheng error_append_hint(errp,
2929b857431dSFam Zheng "Is another process using the image [%s]?\n",
2930b857431dSFam Zheng file_opts->filename);
29317c20c808SMax Reitz goto out_unlock;
2932b8cf1913SMax Reitz }
2933b8cf1913SMax Reitz
2934b8cf1913SMax Reitz /* Clear the file by truncating it to 0 */
293593f4e2ffSKevin Wolf result = raw_regular_truncate(NULL, fd, 0, PREALLOC_MODE_OFF, errp);
2936b8cf1913SMax Reitz if (result < 0) {
29377c20c808SMax Reitz goto out_unlock;
2938b8cf1913SMax Reitz }
2939b8cf1913SMax Reitz
2940927f11e1SKevin Wolf if (file_opts->nocow) {
2941c1bb86cdSEric Blake #ifdef __linux__
2942c1bb86cdSEric Blake /* Set NOCOW flag to solve performance issue on fs like btrfs.
2943c1bb86cdSEric Blake * This is an optimisation. The FS_IOC_SETFLAGS ioctl return value
2944c1bb86cdSEric Blake * will be ignored since any failure of this operation should not
2945c1bb86cdSEric Blake * block the left work.
2946c1bb86cdSEric Blake */
2947c1bb86cdSEric Blake int attr;
2948c1bb86cdSEric Blake if (ioctl(fd, FS_IOC_GETFLAGS, &attr) == 0) {
2949c1bb86cdSEric Blake attr |= FS_NOCOW_FL;
2950c1bb86cdSEric Blake ioctl(fd, FS_IOC_SETFLAGS, &attr);
2951c1bb86cdSEric Blake }
2952c1bb86cdSEric Blake #endif
2953c1bb86cdSEric Blake }
2954ffa244c8SKevin Wolf #ifdef FS_IOC_FSSETXATTR
2955ffa244c8SKevin Wolf /*
2956ffa244c8SKevin Wolf * Try to set the extent size hint. Failure is not fatal, and a warning is
2957ffa244c8SKevin Wolf * only printed if the option was explicitly specified.
2958ffa244c8SKevin Wolf */
2959ffa244c8SKevin Wolf {
2960ffa244c8SKevin Wolf struct fsxattr attr;
2961ffa244c8SKevin Wolf result = ioctl(fd, FS_IOC_FSGETXATTR, &attr);
2962ffa244c8SKevin Wolf if (result == 0) {
2963ffa244c8SKevin Wolf attr.fsx_xflags |= FS_XFLAG_EXTSIZE;
2964ffa244c8SKevin Wolf attr.fsx_extsize = file_opts->extent_size_hint;
2965ffa244c8SKevin Wolf result = ioctl(fd, FS_IOC_FSSETXATTR, &attr);
2966ffa244c8SKevin Wolf }
2967ffa244c8SKevin Wolf if (result < 0 && file_opts->has_extent_size_hint &&
2968ffa244c8SKevin Wolf file_opts->extent_size_hint)
2969ffa244c8SKevin Wolf {
2970ffa244c8SKevin Wolf warn_report("Failed to set extent size hint: %s",
2971ffa244c8SKevin Wolf strerror(errno));
2972ffa244c8SKevin Wolf }
2973ffa244c8SKevin Wolf }
2974ffa244c8SKevin Wolf #endif
2975c1bb86cdSEric Blake
2976b8cf1913SMax Reitz /* Resize and potentially preallocate the file to the desired
2977b8cf1913SMax Reitz * final size */
297893f4e2ffSKevin Wolf result = raw_regular_truncate(NULL, fd, file_opts->size,
297993f4e2ffSKevin Wolf file_opts->preallocation, errp);
29809f63b07eSMax Reitz if (result < 0) {
29817c20c808SMax Reitz goto out_unlock;
29827c20c808SMax Reitz }
29837c20c808SMax Reitz
29847c20c808SMax Reitz out_unlock:
29852996ffadSFam Zheng raw_apply_lock_bytes(NULL, fd, 0, 0, true, &local_err);
29867c20c808SMax Reitz if (local_err) {
29877c20c808SMax Reitz /* The above call should not fail, and if it does, that does
29887c20c808SMax Reitz * not mean the whole creation operation has failed. So
29897c20c808SMax Reitz * report it the user for their convenience, but do not report
29907c20c808SMax Reitz * it to the caller. */
2991db0754dfSFam Zheng warn_report_err(local_err);
29925a1dad9dSNir Soffer }
29935a1dad9dSNir Soffer
29945a1dad9dSNir Soffer out_close:
2995c1bb86cdSEric Blake if (qemu_close(fd) != 0 && result == 0) {
2996c1bb86cdSEric Blake result = -errno;
2997c1bb86cdSEric Blake error_setg_errno(errp, -result, "Could not close the new file");
2998c1bb86cdSEric Blake }
2999c1bb86cdSEric Blake out:
3000c1bb86cdSEric Blake return result;
3001c1bb86cdSEric Blake }
3002c1bb86cdSEric Blake
30034ec8df01SKevin Wolf static int coroutine_fn GRAPH_RDLOCK
raw_co_create_opts(BlockDriver * drv,const char * filename,QemuOpts * opts,Error ** errp)30044ec8df01SKevin Wolf raw_co_create_opts(BlockDriver *drv, const char *filename,
30054ec8df01SKevin Wolf QemuOpts *opts, Error **errp)
3006927f11e1SKevin Wolf {
3007927f11e1SKevin Wolf BlockdevCreateOptions options;
3008927f11e1SKevin Wolf int64_t total_size = 0;
3009ffa244c8SKevin Wolf int64_t extent_size_hint = 0;
3010ffa244c8SKevin Wolf bool has_extent_size_hint = false;
3011927f11e1SKevin Wolf bool nocow = false;
3012927f11e1SKevin Wolf PreallocMode prealloc;
3013927f11e1SKevin Wolf char *buf = NULL;
3014927f11e1SKevin Wolf Error *local_err = NULL;
3015927f11e1SKevin Wolf
3016927f11e1SKevin Wolf /* Skip file: protocol prefix */
3017927f11e1SKevin Wolf strstart(filename, "file:", &filename);
3018927f11e1SKevin Wolf
3019927f11e1SKevin Wolf /* Read out options */
3020927f11e1SKevin Wolf total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
3021927f11e1SKevin Wolf BDRV_SECTOR_SIZE);
3022ffa244c8SKevin Wolf if (qemu_opt_get(opts, BLOCK_OPT_EXTENT_SIZE_HINT)) {
3023ffa244c8SKevin Wolf has_extent_size_hint = true;
3024ffa244c8SKevin Wolf extent_size_hint =
3025ffa244c8SKevin Wolf qemu_opt_get_size_del(opts, BLOCK_OPT_EXTENT_SIZE_HINT, -1);
3026ffa244c8SKevin Wolf }
3027927f11e1SKevin Wolf nocow = qemu_opt_get_bool(opts, BLOCK_OPT_NOCOW, false);
3028927f11e1SKevin Wolf buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
3029927f11e1SKevin Wolf prealloc = qapi_enum_parse(&PreallocMode_lookup, buf,
3030927f11e1SKevin Wolf PREALLOC_MODE_OFF, &local_err);
3031927f11e1SKevin Wolf g_free(buf);
3032927f11e1SKevin Wolf if (local_err) {
3033927f11e1SKevin Wolf error_propagate(errp, local_err);
3034927f11e1SKevin Wolf return -EINVAL;
3035927f11e1SKevin Wolf }
3036927f11e1SKevin Wolf
3037927f11e1SKevin Wolf options = (BlockdevCreateOptions) {
3038927f11e1SKevin Wolf .driver = BLOCKDEV_DRIVER_FILE,
3039927f11e1SKevin Wolf .u.file = {
3040927f11e1SKevin Wolf .filename = (char *) filename,
3041927f11e1SKevin Wolf .size = total_size,
3042927f11e1SKevin Wolf .has_preallocation = true,
3043927f11e1SKevin Wolf .preallocation = prealloc,
3044927f11e1SKevin Wolf .has_nocow = true,
3045927f11e1SKevin Wolf .nocow = nocow,
3046ffa244c8SKevin Wolf .has_extent_size_hint = has_extent_size_hint,
3047ffa244c8SKevin Wolf .extent_size_hint = extent_size_hint,
3048927f11e1SKevin Wolf },
3049927f11e1SKevin Wolf };
3050927f11e1SKevin Wolf return raw_co_create(&options, errp);
3051927f11e1SKevin Wolf }
3052927f11e1SKevin Wolf
raw_co_delete_file(BlockDriverState * bs,Error ** errp)30539bffae14SDaniel Henrique Barboza static int coroutine_fn raw_co_delete_file(BlockDriverState *bs,
30549bffae14SDaniel Henrique Barboza Error **errp)
30559bffae14SDaniel Henrique Barboza {
30569bffae14SDaniel Henrique Barboza struct stat st;
30579bffae14SDaniel Henrique Barboza int ret;
30589bffae14SDaniel Henrique Barboza
30599bffae14SDaniel Henrique Barboza if (!(stat(bs->filename, &st) == 0) || !S_ISREG(st.st_mode)) {
30609bffae14SDaniel Henrique Barboza error_setg_errno(errp, ENOENT, "%s is not a regular file",
30619bffae14SDaniel Henrique Barboza bs->filename);
30629bffae14SDaniel Henrique Barboza return -ENOENT;
30639bffae14SDaniel Henrique Barboza }
30649bffae14SDaniel Henrique Barboza
30659bffae14SDaniel Henrique Barboza ret = unlink(bs->filename);
30669bffae14SDaniel Henrique Barboza if (ret < 0) {
30679bffae14SDaniel Henrique Barboza ret = -errno;
30689bffae14SDaniel Henrique Barboza error_setg_errno(errp, -ret, "Error when deleting file %s",
30699bffae14SDaniel Henrique Barboza bs->filename);
30709bffae14SDaniel Henrique Barboza }
30719bffae14SDaniel Henrique Barboza
30729bffae14SDaniel Henrique Barboza return ret;
30739bffae14SDaniel Henrique Barboza }
30749bffae14SDaniel Henrique Barboza
3075c1bb86cdSEric Blake /*
3076c1bb86cdSEric Blake * Find allocation range in @bs around offset @start.
3077c1bb86cdSEric Blake * May change underlying file descriptor's file offset.
3078c1bb86cdSEric Blake * If @start is not in a hole, store @start in @data, and the
3079c1bb86cdSEric Blake * beginning of the next hole in @hole, and return 0.
3080c1bb86cdSEric Blake * If @start is in a non-trailing hole, store @start in @hole and the
3081c1bb86cdSEric Blake * beginning of the next non-hole in @data, and return 0.
3082c1bb86cdSEric Blake * If @start is in a trailing hole or beyond EOF, return -ENXIO.
3083c1bb86cdSEric Blake * If we can't find out, return a negative errno other than -ENXIO.
3084c1bb86cdSEric Blake */
find_allocation(BlockDriverState * bs,off_t start,off_t * data,off_t * hole)3085c1bb86cdSEric Blake static int find_allocation(BlockDriverState *bs, off_t start,
3086c1bb86cdSEric Blake off_t *data, off_t *hole)
3087c1bb86cdSEric Blake {
3088c1bb86cdSEric Blake #if defined SEEK_HOLE && defined SEEK_DATA
3089c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
3090c1bb86cdSEric Blake off_t offs;
3091c1bb86cdSEric Blake
3092c1bb86cdSEric Blake /*
3093c1bb86cdSEric Blake * SEEK_DATA cases:
3094c1bb86cdSEric Blake * D1. offs == start: start is in data
3095c1bb86cdSEric Blake * D2. offs > start: start is in a hole, next data at offs
3096c1bb86cdSEric Blake * D3. offs < 0, errno = ENXIO: either start is in a trailing hole
3097c1bb86cdSEric Blake * or start is beyond EOF
3098c1bb86cdSEric Blake * If the latter happens, the file has been truncated behind
3099c1bb86cdSEric Blake * our back since we opened it. All bets are off then.
3100c1bb86cdSEric Blake * Treating like a trailing hole is simplest.
3101c1bb86cdSEric Blake * D4. offs < 0, errno != ENXIO: we learned nothing
3102c1bb86cdSEric Blake */
3103c1bb86cdSEric Blake offs = lseek(s->fd, start, SEEK_DATA);
3104c1bb86cdSEric Blake if (offs < 0) {
3105c1bb86cdSEric Blake return -errno; /* D3 or D4 */
3106c1bb86cdSEric Blake }
3107a03083a0SJeff Cody
3108a03083a0SJeff Cody if (offs < start) {
3109a03083a0SJeff Cody /* This is not a valid return by lseek(). We are safe to just return
3110a03083a0SJeff Cody * -EIO in this case, and we'll treat it like D4. */
3111a03083a0SJeff Cody return -EIO;
3112a03083a0SJeff Cody }
3113c1bb86cdSEric Blake
3114c1bb86cdSEric Blake if (offs > start) {
3115c1bb86cdSEric Blake /* D2: in hole, next data at offs */
3116c1bb86cdSEric Blake *hole = start;
3117c1bb86cdSEric Blake *data = offs;
3118c1bb86cdSEric Blake return 0;
3119c1bb86cdSEric Blake }
3120c1bb86cdSEric Blake
3121c1bb86cdSEric Blake /* D1: in data, end not yet known */
3122c1bb86cdSEric Blake
3123c1bb86cdSEric Blake /*
3124c1bb86cdSEric Blake * SEEK_HOLE cases:
3125c1bb86cdSEric Blake * H1. offs == start: start is in a hole
3126c1bb86cdSEric Blake * If this happens here, a hole has been dug behind our back
3127c1bb86cdSEric Blake * since the previous lseek().
3128c1bb86cdSEric Blake * H2. offs > start: either start is in data, next hole at offs,
3129c1bb86cdSEric Blake * or start is in trailing hole, EOF at offs
3130c1bb86cdSEric Blake * Linux treats trailing holes like any other hole: offs ==
3131c1bb86cdSEric Blake * start. Solaris seeks to EOF instead: offs > start (blech).
3132c1bb86cdSEric Blake * If that happens here, a hole has been dug behind our back
3133c1bb86cdSEric Blake * since the previous lseek().
3134c1bb86cdSEric Blake * H3. offs < 0, errno = ENXIO: start is beyond EOF
3135c1bb86cdSEric Blake * If this happens, the file has been truncated behind our
3136c1bb86cdSEric Blake * back since we opened it. Treat it like a trailing hole.
3137c1bb86cdSEric Blake * H4. offs < 0, errno != ENXIO: we learned nothing
3138c1bb86cdSEric Blake * Pretend we know nothing at all, i.e. "forget" about D1.
3139c1bb86cdSEric Blake */
3140c1bb86cdSEric Blake offs = lseek(s->fd, start, SEEK_HOLE);
3141c1bb86cdSEric Blake if (offs < 0) {
3142c1bb86cdSEric Blake return -errno; /* D1 and (H3 or H4) */
3143c1bb86cdSEric Blake }
3144a03083a0SJeff Cody
3145a03083a0SJeff Cody if (offs < start) {
3146a03083a0SJeff Cody /* This is not a valid return by lseek(). We are safe to just return
3147a03083a0SJeff Cody * -EIO in this case, and we'll treat it like H4. */
3148a03083a0SJeff Cody return -EIO;
3149a03083a0SJeff Cody }
3150c1bb86cdSEric Blake
3151c1bb86cdSEric Blake if (offs > start) {
3152c1bb86cdSEric Blake /*
3153c1bb86cdSEric Blake * D1 and H2: either in data, next hole at offs, or it was in
3154c1bb86cdSEric Blake * data but is now in a trailing hole. In the latter case,
3155c1bb86cdSEric Blake * all bets are off. Treating it as if it there was data all
3156c1bb86cdSEric Blake * the way to EOF is safe, so simply do that.
3157c1bb86cdSEric Blake */
3158c1bb86cdSEric Blake *data = start;
3159c1bb86cdSEric Blake *hole = offs;
3160c1bb86cdSEric Blake return 0;
3161c1bb86cdSEric Blake }
3162c1bb86cdSEric Blake
3163c1bb86cdSEric Blake /* D1 and H1 */
3164c1bb86cdSEric Blake return -EBUSY;
3165c1bb86cdSEric Blake #else
3166c1bb86cdSEric Blake return -ENOTSUP;
3167c1bb86cdSEric Blake #endif
3168c1bb86cdSEric Blake }
3169c1bb86cdSEric Blake
3170c1bb86cdSEric Blake /*
3171a290f085SEric Blake * Returns the allocation status of the specified offset.
3172c1bb86cdSEric Blake *
3173a290f085SEric Blake * The block layer guarantees 'offset' and 'bytes' are within bounds.
3174c1bb86cdSEric Blake *
3175a290f085SEric Blake * 'pnum' is set to the number of bytes (including and immediately following
3176a290f085SEric Blake * the specified offset) that are known to be in the same
3177c1bb86cdSEric Blake * allocated/unallocated state.
3178c1bb86cdSEric Blake *
3179869e7ee8SHanna Reitz * 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may
3180869e7ee8SHanna Reitz * well exceed it.
3181c1bb86cdSEric Blake */
raw_co_block_status(BlockDriverState * bs,bool want_zero,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)3182a290f085SEric Blake static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
3183a290f085SEric Blake bool want_zero,
3184a290f085SEric Blake int64_t offset,
3185a290f085SEric Blake int64_t bytes, int64_t *pnum,
3186a290f085SEric Blake int64_t *map,
3187c1bb86cdSEric Blake BlockDriverState **file)
3188c1bb86cdSEric Blake {
3189a290f085SEric Blake off_t data = 0, hole = 0;
3190c1bb86cdSEric Blake int ret;
3191c1bb86cdSEric Blake
31929c3db310SMax Reitz assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment));
31939c3db310SMax Reitz
3194c1bb86cdSEric Blake ret = fd_open(bs);
3195c1bb86cdSEric Blake if (ret < 0) {
3196c1bb86cdSEric Blake return ret;
3197c1bb86cdSEric Blake }
3198c1bb86cdSEric Blake
3199a290f085SEric Blake if (!want_zero) {
3200a290f085SEric Blake *pnum = bytes;
3201a290f085SEric Blake *map = offset;
3202a290f085SEric Blake *file = bs;
3203a290f085SEric Blake return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
3204c1bb86cdSEric Blake }
3205c1bb86cdSEric Blake
3206a290f085SEric Blake ret = find_allocation(bs, offset, &data, &hole);
3207c1bb86cdSEric Blake if (ret == -ENXIO) {
3208c1bb86cdSEric Blake /* Trailing hole */
3209a290f085SEric Blake *pnum = bytes;
3210c1bb86cdSEric Blake ret = BDRV_BLOCK_ZERO;
3211c1bb86cdSEric Blake } else if (ret < 0) {
3212c1bb86cdSEric Blake /* No info available, so pretend there are no holes */
3213a290f085SEric Blake *pnum = bytes;
3214c1bb86cdSEric Blake ret = BDRV_BLOCK_DATA;
3215a290f085SEric Blake } else if (data == offset) {
3216a290f085SEric Blake /* On a data extent, compute bytes to the end of the extent,
3217c1bb86cdSEric Blake * possibly including a partial sector at EOF. */
3218869e7ee8SHanna Reitz *pnum = hole - offset;
32199c3db310SMax Reitz
32209c3db310SMax Reitz /*
32219c3db310SMax Reitz * We are not allowed to return partial sectors, though, so
32229c3db310SMax Reitz * round up if necessary.
32239c3db310SMax Reitz */
32249c3db310SMax Reitz if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) {
322536c6c877SPaolo Bonzini int64_t file_length = raw_getlength(bs);
32269c3db310SMax Reitz if (file_length > 0) {
32279c3db310SMax Reitz /* Ignore errors, this is just a safeguard */
32289c3db310SMax Reitz assert(hole == file_length);
32299c3db310SMax Reitz }
32309c3db310SMax Reitz *pnum = ROUND_UP(*pnum, bs->bl.request_alignment);
32319c3db310SMax Reitz }
32329c3db310SMax Reitz
3233c1bb86cdSEric Blake ret = BDRV_BLOCK_DATA;
3234c1bb86cdSEric Blake } else {
3235a290f085SEric Blake /* On a hole, compute bytes to the beginning of the next extent. */
3236a290f085SEric Blake assert(hole == offset);
3237869e7ee8SHanna Reitz *pnum = data - offset;
3238c1bb86cdSEric Blake ret = BDRV_BLOCK_ZERO;
3239c1bb86cdSEric Blake }
3240a290f085SEric Blake *map = offset;
3241c1bb86cdSEric Blake *file = bs;
3242a290f085SEric Blake return ret | BDRV_BLOCK_OFFSET_VALID;
3243c1bb86cdSEric Blake }
3244c1bb86cdSEric Blake
324531be8a2aSStefan Hajnoczi #if defined(__linux__)
324631be8a2aSStefan Hajnoczi /* Verify that the file is not in the page cache */
check_cache_dropped(BlockDriverState * bs,Error ** errp)324736c6c877SPaolo Bonzini static void check_cache_dropped(BlockDriverState *bs, Error **errp)
324831be8a2aSStefan Hajnoczi {
324931be8a2aSStefan Hajnoczi const size_t window_size = 128 * 1024 * 1024;
325031be8a2aSStefan Hajnoczi BDRVRawState *s = bs->opaque;
325131be8a2aSStefan Hajnoczi void *window = NULL;
325231be8a2aSStefan Hajnoczi size_t length = 0;
325331be8a2aSStefan Hajnoczi unsigned char *vec;
325431be8a2aSStefan Hajnoczi size_t page_size;
325531be8a2aSStefan Hajnoczi off_t offset;
325631be8a2aSStefan Hajnoczi off_t end;
325731be8a2aSStefan Hajnoczi
325831be8a2aSStefan Hajnoczi /* mincore(2) page status information requires 1 byte per page */
325931be8a2aSStefan Hajnoczi page_size = sysconf(_SC_PAGESIZE);
326031be8a2aSStefan Hajnoczi vec = g_malloc(DIV_ROUND_UP(window_size, page_size));
326131be8a2aSStefan Hajnoczi
326236c6c877SPaolo Bonzini end = raw_getlength(bs);
326331be8a2aSStefan Hajnoczi
326431be8a2aSStefan Hajnoczi for (offset = 0; offset < end; offset += window_size) {
326531be8a2aSStefan Hajnoczi void *new_window;
326631be8a2aSStefan Hajnoczi size_t new_length;
326731be8a2aSStefan Hajnoczi size_t vec_end;
326831be8a2aSStefan Hajnoczi size_t i;
326931be8a2aSStefan Hajnoczi int ret;
327031be8a2aSStefan Hajnoczi
327131be8a2aSStefan Hajnoczi /* Unmap previous window if size has changed */
327231be8a2aSStefan Hajnoczi new_length = MIN(end - offset, window_size);
327331be8a2aSStefan Hajnoczi if (new_length != length) {
327431be8a2aSStefan Hajnoczi munmap(window, length);
327531be8a2aSStefan Hajnoczi window = NULL;
327631be8a2aSStefan Hajnoczi length = 0;
327731be8a2aSStefan Hajnoczi }
327831be8a2aSStefan Hajnoczi
327931be8a2aSStefan Hajnoczi new_window = mmap(window, new_length, PROT_NONE, MAP_PRIVATE,
328031be8a2aSStefan Hajnoczi s->fd, offset);
328131be8a2aSStefan Hajnoczi if (new_window == MAP_FAILED) {
328231be8a2aSStefan Hajnoczi error_setg_errno(errp, errno, "mmap failed");
328331be8a2aSStefan Hajnoczi break;
328431be8a2aSStefan Hajnoczi }
328531be8a2aSStefan Hajnoczi
328631be8a2aSStefan Hajnoczi window = new_window;
328731be8a2aSStefan Hajnoczi length = new_length;
328831be8a2aSStefan Hajnoczi
328931be8a2aSStefan Hajnoczi ret = mincore(window, length, vec);
329031be8a2aSStefan Hajnoczi if (ret < 0) {
329131be8a2aSStefan Hajnoczi error_setg_errno(errp, errno, "mincore failed");
329231be8a2aSStefan Hajnoczi break;
329331be8a2aSStefan Hajnoczi }
329431be8a2aSStefan Hajnoczi
329531be8a2aSStefan Hajnoczi vec_end = DIV_ROUND_UP(length, page_size);
329631be8a2aSStefan Hajnoczi for (i = 0; i < vec_end; i++) {
329731be8a2aSStefan Hajnoczi if (vec[i] & 0x1) {
329831be8a2aSStefan Hajnoczi break;
329931be8a2aSStefan Hajnoczi }
330031be8a2aSStefan Hajnoczi }
330177ed971bSMarkus Armbruster if (i < vec_end) {
330277ed971bSMarkus Armbruster error_setg(errp, "page cache still in use!");
330377ed971bSMarkus Armbruster break;
330477ed971bSMarkus Armbruster }
330531be8a2aSStefan Hajnoczi }
330631be8a2aSStefan Hajnoczi
330731be8a2aSStefan Hajnoczi if (window) {
330831be8a2aSStefan Hajnoczi munmap(window, length);
330931be8a2aSStefan Hajnoczi }
331031be8a2aSStefan Hajnoczi
331131be8a2aSStefan Hajnoczi g_free(vec);
331231be8a2aSStefan Hajnoczi }
331331be8a2aSStefan Hajnoczi #endif /* __linux__ */
331431be8a2aSStefan Hajnoczi
331588095349SEmanuele Giuseppe Esposito static void coroutine_fn GRAPH_RDLOCK
raw_co_invalidate_cache(BlockDriverState * bs,Error ** errp)331688095349SEmanuele Giuseppe Esposito raw_co_invalidate_cache(BlockDriverState *bs, Error **errp)
3317dd577a26SStefan Hajnoczi {
3318dd577a26SStefan Hajnoczi BDRVRawState *s = bs->opaque;
3319dd577a26SStefan Hajnoczi int ret;
3320dd577a26SStefan Hajnoczi
3321dd577a26SStefan Hajnoczi ret = fd_open(bs);
3322dd577a26SStefan Hajnoczi if (ret < 0) {
3323dd577a26SStefan Hajnoczi error_setg_errno(errp, -ret, "The file descriptor is not open");
3324dd577a26SStefan Hajnoczi return;
3325dd577a26SStefan Hajnoczi }
3326dd577a26SStefan Hajnoczi
3327f357fcd8SStefan Hajnoczi if (!s->drop_cache) {
3328f357fcd8SStefan Hajnoczi return;
3329f357fcd8SStefan Hajnoczi }
3330f357fcd8SStefan Hajnoczi
3331dd577a26SStefan Hajnoczi if (s->open_flags & O_DIRECT) {
3332dd577a26SStefan Hajnoczi return; /* No host kernel page cache */
3333dd577a26SStefan Hajnoczi }
3334dd577a26SStefan Hajnoczi
3335dd577a26SStefan Hajnoczi #if defined(__linux__)
3336dd577a26SStefan Hajnoczi /* This sets the scene for the next syscall... */
3337dd577a26SStefan Hajnoczi ret = bdrv_co_flush(bs);
3338dd577a26SStefan Hajnoczi if (ret < 0) {
3339dd577a26SStefan Hajnoczi error_setg_errno(errp, -ret, "flush failed");
3340dd577a26SStefan Hajnoczi return;
3341dd577a26SStefan Hajnoczi }
3342dd577a26SStefan Hajnoczi
3343dd577a26SStefan Hajnoczi /* Linux does not invalidate pages that are dirty, locked, or mmapped by a
3344dd577a26SStefan Hajnoczi * process. These limitations are okay because we just fsynced the file,
3345dd577a26SStefan Hajnoczi * we don't use mmap, and the file should not be in use by other processes.
3346dd577a26SStefan Hajnoczi */
3347dd577a26SStefan Hajnoczi ret = posix_fadvise(s->fd, 0, 0, POSIX_FADV_DONTNEED);
3348dd577a26SStefan Hajnoczi if (ret != 0) { /* the return value is a positive errno */
3349dd577a26SStefan Hajnoczi error_setg_errno(errp, ret, "fadvise failed");
3350dd577a26SStefan Hajnoczi return;
3351dd577a26SStefan Hajnoczi }
335231be8a2aSStefan Hajnoczi
335331be8a2aSStefan Hajnoczi if (s->check_cache_dropped) {
335431be8a2aSStefan Hajnoczi check_cache_dropped(bs, errp);
335531be8a2aSStefan Hajnoczi }
3356dd577a26SStefan Hajnoczi #else /* __linux__ */
3357dd577a26SStefan Hajnoczi /* Do nothing. Live migration to a remote host with cache.direct=off is
3358dd577a26SStefan Hajnoczi * unsupported on other host operating systems. Cache consistency issues
3359dd577a26SStefan Hajnoczi * may occur but no error is reported here, partly because that's the
3360dd577a26SStefan Hajnoczi * historical behavior and partly because it's hard to differentiate valid
3361dd577a26SStefan Hajnoczi * configurations that should not cause errors.
3362dd577a26SStefan Hajnoczi */
3363dd577a26SStefan Hajnoczi #endif /* !__linux__ */
3364dd577a26SStefan Hajnoczi }
3365dd577a26SStefan Hajnoczi
raw_account_discard(BDRVRawState * s,uint64_t nbytes,int ret)33661c450366SAnton Nefedov static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret)
33671c450366SAnton Nefedov {
33681c450366SAnton Nefedov if (ret) {
33691c450366SAnton Nefedov s->stats.discard_nb_failed++;
33701c450366SAnton Nefedov } else {
33711c450366SAnton Nefedov s->stats.discard_nb_ok++;
33721c450366SAnton Nefedov s->stats.discard_bytes_ok += nbytes;
33731c450366SAnton Nefedov }
33741c450366SAnton Nefedov }
33751c450366SAnton Nefedov
33766d43eaa3SSam Li /*
33776d43eaa3SSam Li * zone report - Get a zone block device's information in the form
33786d43eaa3SSam Li * of an array of zone descriptors.
33796d43eaa3SSam Li * zones is an array of zone descriptors to hold zone information on reply;
33806d43eaa3SSam Li * offset can be any byte within the entire size of the device;
33813202d8e4SMichael Tokarev * nr_zones is the maximum number of sectors the command should operate on.
33826d43eaa3SSam Li */
33836d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
raw_co_zone_report(BlockDriverState * bs,int64_t offset,unsigned int * nr_zones,BlockZoneDescriptor * zones)33846d43eaa3SSam Li static int coroutine_fn raw_co_zone_report(BlockDriverState *bs, int64_t offset,
33856d43eaa3SSam Li unsigned int *nr_zones,
33866d43eaa3SSam Li BlockZoneDescriptor *zones) {
33876d43eaa3SSam Li BDRVRawState *s = bs->opaque;
33886d43eaa3SSam Li RawPosixAIOData acb = (RawPosixAIOData) {
33896d43eaa3SSam Li .bs = bs,
33906d43eaa3SSam Li .aio_fildes = s->fd,
33916d43eaa3SSam Li .aio_type = QEMU_AIO_ZONE_REPORT,
33926d43eaa3SSam Li .aio_offset = offset,
33936d43eaa3SSam Li .zone_report = {
33946d43eaa3SSam Li .nr_zones = nr_zones,
33956d43eaa3SSam Li .zones = zones,
33966d43eaa3SSam Li },
33976d43eaa3SSam Li };
33986d43eaa3SSam Li
3399142e307eSSam Li trace_zbd_zone_report(bs, *nr_zones, offset >> BDRV_SECTOR_BITS);
34006d43eaa3SSam Li return raw_thread_pool_submit(handle_aiocb_zone_report, &acb);
34016d43eaa3SSam Li }
34026d43eaa3SSam Li #endif
34036d43eaa3SSam Li
34046d43eaa3SSam Li /*
34056d43eaa3SSam Li * zone management operations - Execute an operation on a zone
34066d43eaa3SSam Li */
34076d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
raw_co_zone_mgmt(BlockDriverState * bs,BlockZoneOp op,int64_t offset,int64_t len)34086d43eaa3SSam Li static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
34096d43eaa3SSam Li int64_t offset, int64_t len) {
34106d43eaa3SSam Li BDRVRawState *s = bs->opaque;
34116d43eaa3SSam Li RawPosixAIOData acb;
34126d43eaa3SSam Li int64_t zone_size, zone_size_mask;
34136d43eaa3SSam Li const char *op_name;
34146d43eaa3SSam Li unsigned long zo;
34156d43eaa3SSam Li int ret;
3416a3c41f06SSam Li BlockZoneWps *wps = bs->wps;
34176d43eaa3SSam Li int64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
34186d43eaa3SSam Li
34196d43eaa3SSam Li zone_size = bs->bl.zone_size;
34206d43eaa3SSam Li zone_size_mask = zone_size - 1;
34216d43eaa3SSam Li if (offset & zone_size_mask) {
34226d43eaa3SSam Li error_report("sector offset %" PRId64 " is not aligned to zone size "
34236d43eaa3SSam Li "%" PRId64 "", offset / 512, zone_size / 512);
34246d43eaa3SSam Li return -EINVAL;
34256d43eaa3SSam Li }
34266d43eaa3SSam Li
34276d43eaa3SSam Li if (((offset + len) < capacity && len & zone_size_mask) ||
34286d43eaa3SSam Li offset + len > capacity) {
34296d43eaa3SSam Li error_report("number of sectors %" PRId64 " is not aligned to zone size"
34306d43eaa3SSam Li " %" PRId64 "", len / 512, zone_size / 512);
34316d43eaa3SSam Li return -EINVAL;
34326d43eaa3SSam Li }
34336d43eaa3SSam Li
3434a3c41f06SSam Li uint32_t i = offset / bs->bl.zone_size;
3435a3c41f06SSam Li uint32_t nrz = len / bs->bl.zone_size;
3436a3c41f06SSam Li uint64_t *wp = &wps->wp[i];
3437a3c41f06SSam Li if (BDRV_ZT_IS_CONV(*wp) && len != capacity) {
3438a3c41f06SSam Li error_report("zone mgmt operations are not allowed for conventional zones");
3439a3c41f06SSam Li return -EIO;
3440a3c41f06SSam Li }
3441a3c41f06SSam Li
34426d43eaa3SSam Li switch (op) {
34436d43eaa3SSam Li case BLK_ZO_OPEN:
34446d43eaa3SSam Li op_name = "BLKOPENZONE";
34456d43eaa3SSam Li zo = BLKOPENZONE;
34466d43eaa3SSam Li break;
34476d43eaa3SSam Li case BLK_ZO_CLOSE:
34486d43eaa3SSam Li op_name = "BLKCLOSEZONE";
34496d43eaa3SSam Li zo = BLKCLOSEZONE;
34506d43eaa3SSam Li break;
34516d43eaa3SSam Li case BLK_ZO_FINISH:
34526d43eaa3SSam Li op_name = "BLKFINISHZONE";
34536d43eaa3SSam Li zo = BLKFINISHZONE;
34546d43eaa3SSam Li break;
34556d43eaa3SSam Li case BLK_ZO_RESET:
34566d43eaa3SSam Li op_name = "BLKRESETZONE";
34576d43eaa3SSam Li zo = BLKRESETZONE;
34586d43eaa3SSam Li break;
34596d43eaa3SSam Li default:
34606d43eaa3SSam Li error_report("Unsupported zone op: 0x%x", op);
34616d43eaa3SSam Li return -ENOTSUP;
34626d43eaa3SSam Li }
34636d43eaa3SSam Li
34646d43eaa3SSam Li acb = (RawPosixAIOData) {
34656d43eaa3SSam Li .bs = bs,
34666d43eaa3SSam Li .aio_fildes = s->fd,
34676d43eaa3SSam Li .aio_type = QEMU_AIO_ZONE_MGMT,
34686d43eaa3SSam Li .aio_offset = offset,
34696d43eaa3SSam Li .aio_nbytes = len,
34706d43eaa3SSam Li .zone_mgmt = {
34716d43eaa3SSam Li .op = zo,
34726d43eaa3SSam Li },
34736d43eaa3SSam Li };
34746d43eaa3SSam Li
3475142e307eSSam Li trace_zbd_zone_mgmt(bs, op_name, offset >> BDRV_SECTOR_BITS,
3476142e307eSSam Li len >> BDRV_SECTOR_BITS);
34776d43eaa3SSam Li ret = raw_thread_pool_submit(handle_aiocb_zone_mgmt, &acb);
34786d43eaa3SSam Li if (ret != 0) {
347910b9e080SSam Li update_zones_wp(bs, s->fd, offset, nrz);
34806d43eaa3SSam Li error_report("ioctl %s failed %d", op_name, ret);
3481a3c41f06SSam Li return ret;
3482a3c41f06SSam Li }
3483a3c41f06SSam Li
3484a3c41f06SSam Li if (zo == BLKRESETZONE && len == capacity) {
3485a3c41f06SSam Li ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 1);
3486a3c41f06SSam Li if (ret < 0) {
3487a3c41f06SSam Li error_report("reporting single wp failed");
3488a3c41f06SSam Li return ret;
3489a3c41f06SSam Li }
3490a3c41f06SSam Li } else if (zo == BLKRESETZONE) {
3491a3c41f06SSam Li for (unsigned int j = 0; j < nrz; ++j) {
3492a3c41f06SSam Li wp[j] = offset + j * zone_size;
3493a3c41f06SSam Li }
3494a3c41f06SSam Li } else if (zo == BLKFINISHZONE) {
3495a3c41f06SSam Li for (unsigned int j = 0; j < nrz; ++j) {
3496a3c41f06SSam Li /* The zoned device allows the last zone smaller that the
3497a3c41f06SSam Li * zone size. */
3498a3c41f06SSam Li wp[j] = MIN(offset + (j + 1) * zone_size, offset + len);
3499a3c41f06SSam Li }
35006d43eaa3SSam Li }
35016d43eaa3SSam Li
35026d43eaa3SSam Li return ret;
35036d43eaa3SSam Li }
35046d43eaa3SSam Li #endif
35056d43eaa3SSam Li
35064751d09aSSam Li #if defined(CONFIG_BLKZONED)
raw_co_zone_append(BlockDriverState * bs,int64_t * offset,QEMUIOVector * qiov,BdrvRequestFlags flags)35074751d09aSSam Li static int coroutine_fn raw_co_zone_append(BlockDriverState *bs,
35084751d09aSSam Li int64_t *offset,
35094751d09aSSam Li QEMUIOVector *qiov,
35104751d09aSSam Li BdrvRequestFlags flags) {
35114751d09aSSam Li assert(flags == 0);
35124751d09aSSam Li int64_t zone_size_mask = bs->bl.zone_size - 1;
35134751d09aSSam Li int64_t iov_len = 0;
35144751d09aSSam Li int64_t len = 0;
35154751d09aSSam Li
35164751d09aSSam Li if (*offset & zone_size_mask) {
35174751d09aSSam Li error_report("sector offset %" PRId64 " is not aligned to zone size "
35184751d09aSSam Li "%" PRId32 "", *offset / 512, bs->bl.zone_size / 512);
35194751d09aSSam Li return -EINVAL;
35204751d09aSSam Li }
35214751d09aSSam Li
35224751d09aSSam Li int64_t wg = bs->bl.write_granularity;
35234751d09aSSam Li int64_t wg_mask = wg - 1;
35244751d09aSSam Li for (int i = 0; i < qiov->niov; i++) {
35254751d09aSSam Li iov_len = qiov->iov[i].iov_len;
35264751d09aSSam Li if (iov_len & wg_mask) {
35274751d09aSSam Li error_report("len of IOVector[%d] %" PRId64 " is not aligned to "
35284751d09aSSam Li "block size %" PRId64 "", i, iov_len, wg);
35294751d09aSSam Li return -EINVAL;
35304751d09aSSam Li }
35314751d09aSSam Li len += iov_len;
35324751d09aSSam Li }
35334751d09aSSam Li
35346c811e19SSam Li trace_zbd_zone_append(bs, *offset >> BDRV_SECTOR_BITS);
3535ad4feacaSNaohiro Aota return raw_co_prw(bs, offset, len, qiov, QEMU_AIO_ZONE_APPEND);
35364751d09aSSam Li }
35374751d09aSSam Li #endif
35384751d09aSSam Li
353933d70fb6SKevin Wolf static coroutine_fn int
raw_do_pdiscard(BlockDriverState * bs,int64_t offset,int64_t bytes,bool blkdev)35400c802287SVladimir Sementsov-Ogievskiy raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes,
35410c802287SVladimir Sementsov-Ogievskiy bool blkdev)
3542c1bb86cdSEric Blake {
3543c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
354446ee0f46SKevin Wolf RawPosixAIOData acb;
35451c450366SAnton Nefedov int ret;
3546c1bb86cdSEric Blake
354746ee0f46SKevin Wolf acb = (RawPosixAIOData) {
354846ee0f46SKevin Wolf .bs = bs,
354946ee0f46SKevin Wolf .aio_fildes = s->fd,
355046ee0f46SKevin Wolf .aio_type = QEMU_AIO_DISCARD,
355146ee0f46SKevin Wolf .aio_offset = offset,
355246ee0f46SKevin Wolf .aio_nbytes = bytes,
355346ee0f46SKevin Wolf };
355446ee0f46SKevin Wolf
355546ee0f46SKevin Wolf if (blkdev) {
355646ee0f46SKevin Wolf acb.aio_type |= QEMU_AIO_BLKDEV;
355746ee0f46SKevin Wolf }
355846ee0f46SKevin Wolf
35590fdb7311SEmanuele Giuseppe Esposito ret = raw_thread_pool_submit(handle_aiocb_discard, &acb);
35601c450366SAnton Nefedov raw_account_discard(s, bytes, ret);
35611c450366SAnton Nefedov return ret;
356246ee0f46SKevin Wolf }
356346ee0f46SKevin Wolf
356446ee0f46SKevin Wolf static coroutine_fn int
raw_co_pdiscard(BlockDriverState * bs,int64_t offset,int64_t bytes)35650c802287SVladimir Sementsov-Ogievskiy raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
356646ee0f46SKevin Wolf {
356746ee0f46SKevin Wolf return raw_do_pdiscard(bs, offset, bytes, false);
3568c1bb86cdSEric Blake }
3569c1bb86cdSEric Blake
35707154d8aeSKevin Wolf static int coroutine_fn
raw_do_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags,bool blkdev)3571f34b2bcfSVladimir Sementsov-Ogievskiy raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
35727154d8aeSKevin Wolf BdrvRequestFlags flags, bool blkdev)
35737154d8aeSKevin Wolf {
35747154d8aeSKevin Wolf BDRVRawState *s = bs->opaque;
35757154d8aeSKevin Wolf RawPosixAIOData acb;
35767154d8aeSKevin Wolf ThreadPoolFunc *handler;
35777154d8aeSKevin Wolf
3578292d06b9SMax Reitz #ifdef CONFIG_FALLOCATE
3579292d06b9SMax Reitz if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) {
3580292d06b9SMax Reitz BdrvTrackedRequest *req;
3581292d06b9SMax Reitz
3582292d06b9SMax Reitz /*
3583292d06b9SMax Reitz * This is a workaround for a bug in the Linux XFS driver,
3584292d06b9SMax Reitz * where writes submitted through the AIO interface will be
3585292d06b9SMax Reitz * discarded if they happen beyond a concurrently running
3586292d06b9SMax Reitz * fallocate() that increases the file length (i.e., both the
3587292d06b9SMax Reitz * write and the fallocate() happen beyond the EOF).
3588292d06b9SMax Reitz *
3589292d06b9SMax Reitz * To work around it, we extend the tracked request for this
3590292d06b9SMax Reitz * zero write until INT64_MAX (effectively infinity), and mark
3591292d06b9SMax Reitz * it as serializing.
3592292d06b9SMax Reitz *
3593292d06b9SMax Reitz * We have to enable this workaround for all filesystems and
3594292d06b9SMax Reitz * AIO modes (not just XFS with aio=native), because for
3595292d06b9SMax Reitz * remote filesystems we do not know the host configuration.
3596292d06b9SMax Reitz */
3597292d06b9SMax Reitz
3598292d06b9SMax Reitz req = bdrv_co_get_self_request(bs);
3599292d06b9SMax Reitz assert(req);
3600292d06b9SMax Reitz assert(req->type == BDRV_TRACKED_WRITE);
3601292d06b9SMax Reitz assert(req->offset <= offset);
3602292d06b9SMax Reitz assert(req->offset + req->bytes >= offset + bytes);
3603292d06b9SMax Reitz
36048b117001SVladimir Sementsov-Ogievskiy req->bytes = BDRV_MAX_LENGTH - req->offset;
36058b117001SVladimir Sementsov-Ogievskiy
360669b55e03SVladimir Sementsov-Ogievskiy bdrv_check_request(req->offset, req->bytes, &error_abort);
3607292d06b9SMax Reitz
36088ac5aab2SVladimir Sementsov-Ogievskiy bdrv_make_request_serialising(req, bs->bl.request_alignment);
3609292d06b9SMax Reitz }
3610292d06b9SMax Reitz #endif
3611292d06b9SMax Reitz
36127154d8aeSKevin Wolf acb = (RawPosixAIOData) {
36137154d8aeSKevin Wolf .bs = bs,
36147154d8aeSKevin Wolf .aio_fildes = s->fd,
36157154d8aeSKevin Wolf .aio_type = QEMU_AIO_WRITE_ZEROES,
36167154d8aeSKevin Wolf .aio_offset = offset,
36177154d8aeSKevin Wolf .aio_nbytes = bytes,
36187154d8aeSKevin Wolf };
36197154d8aeSKevin Wolf
36207154d8aeSKevin Wolf if (blkdev) {
36217154d8aeSKevin Wolf acb.aio_type |= QEMU_AIO_BLKDEV;
36227154d8aeSKevin Wolf }
3623738301e1SKevin Wolf if (flags & BDRV_REQ_NO_FALLBACK) {
3624738301e1SKevin Wolf acb.aio_type |= QEMU_AIO_NO_FALLBACK;
3625738301e1SKevin Wolf }
36267154d8aeSKevin Wolf
36277154d8aeSKevin Wolf if (flags & BDRV_REQ_MAY_UNMAP) {
36287154d8aeSKevin Wolf acb.aio_type |= QEMU_AIO_DISCARD;
36297154d8aeSKevin Wolf handler = handle_aiocb_write_zeroes_unmap;
36307154d8aeSKevin Wolf } else {
36317154d8aeSKevin Wolf handler = handle_aiocb_write_zeroes;
36327154d8aeSKevin Wolf }
36337154d8aeSKevin Wolf
36340fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handler, &acb);
36357154d8aeSKevin Wolf }
36367154d8aeSKevin Wolf
raw_co_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags)3637c1bb86cdSEric Blake static int coroutine_fn raw_co_pwrite_zeroes(
3638c1bb86cdSEric Blake BlockDriverState *bs, int64_t offset,
3639f34b2bcfSVladimir Sementsov-Ogievskiy int64_t bytes, BdrvRequestFlags flags)
3640c1bb86cdSEric Blake {
36417154d8aeSKevin Wolf return raw_do_pwrite_zeroes(bs, offset, bytes, flags, false);
3642c1bb86cdSEric Blake }
3643c1bb86cdSEric Blake
36443d47eb0aSEmanuele Giuseppe Esposito static int coroutine_fn
raw_co_get_info(BlockDriverState * bs,BlockDriverInfo * bdi)36453d47eb0aSEmanuele Giuseppe Esposito raw_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
3646c1bb86cdSEric Blake {
3647c1bb86cdSEric Blake return 0;
3648c1bb86cdSEric Blake }
3649c1bb86cdSEric Blake
raw_get_specific_info(BlockDriverState * bs,Error ** errp)36507f36a50aSHanna Reitz static ImageInfoSpecific *raw_get_specific_info(BlockDriverState *bs,
36517f36a50aSHanna Reitz Error **errp)
36527f36a50aSHanna Reitz {
36537f36a50aSHanna Reitz ImageInfoSpecificFile *file_info = g_new0(ImageInfoSpecificFile, 1);
36547f36a50aSHanna Reitz ImageInfoSpecific *spec_info = g_new(ImageInfoSpecific, 1);
36557f36a50aSHanna Reitz
36567f36a50aSHanna Reitz *spec_info = (ImageInfoSpecific){
36577f36a50aSHanna Reitz .type = IMAGE_INFO_SPECIFIC_KIND_FILE,
36587f36a50aSHanna Reitz .u.file.data = file_info,
36597f36a50aSHanna Reitz };
36607f36a50aSHanna Reitz
36617f36a50aSHanna Reitz #ifdef FS_IOC_FSGETXATTR
36627f36a50aSHanna Reitz {
36637f36a50aSHanna Reitz BDRVRawState *s = bs->opaque;
36647f36a50aSHanna Reitz struct fsxattr attr;
36657f36a50aSHanna Reitz int ret;
36667f36a50aSHanna Reitz
36677f36a50aSHanna Reitz ret = ioctl(s->fd, FS_IOC_FSGETXATTR, &attr);
36687f36a50aSHanna Reitz if (!ret && attr.fsx_extsize != 0) {
36697f36a50aSHanna Reitz file_info->has_extent_size_hint = true;
36707f36a50aSHanna Reitz file_info->extent_size_hint = attr.fsx_extsize;
36717f36a50aSHanna Reitz }
36727f36a50aSHanna Reitz }
36737f36a50aSHanna Reitz #endif
36747f36a50aSHanna Reitz
36757f36a50aSHanna Reitz return spec_info;
36767f36a50aSHanna Reitz }
36777f36a50aSHanna Reitz
get_blockstats_specific_file(BlockDriverState * bs)3678d9245599SAnton Nefedov static BlockStatsSpecificFile get_blockstats_specific_file(BlockDriverState *bs)
3679d9245599SAnton Nefedov {
3680d9245599SAnton Nefedov BDRVRawState *s = bs->opaque;
3681d9245599SAnton Nefedov return (BlockStatsSpecificFile) {
3682d9245599SAnton Nefedov .discard_nb_ok = s->stats.discard_nb_ok,
3683d9245599SAnton Nefedov .discard_nb_failed = s->stats.discard_nb_failed,
3684d9245599SAnton Nefedov .discard_bytes_ok = s->stats.discard_bytes_ok,
3685d9245599SAnton Nefedov };
3686d9245599SAnton Nefedov }
3687d9245599SAnton Nefedov
raw_get_specific_stats(BlockDriverState * bs)3688d9245599SAnton Nefedov static BlockStatsSpecific *raw_get_specific_stats(BlockDriverState *bs)
3689d9245599SAnton Nefedov {
3690d9245599SAnton Nefedov BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
3691d9245599SAnton Nefedov
3692d9245599SAnton Nefedov stats->driver = BLOCKDEV_DRIVER_FILE;
3693d9245599SAnton Nefedov stats->u.file = get_blockstats_specific_file(bs);
3694d9245599SAnton Nefedov
3695d9245599SAnton Nefedov return stats;
3696d9245599SAnton Nefedov }
3697d9245599SAnton Nefedov
369814176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE)
hdev_get_specific_stats(BlockDriverState * bs)3699d9245599SAnton Nefedov static BlockStatsSpecific *hdev_get_specific_stats(BlockDriverState *bs)
3700d9245599SAnton Nefedov {
3701d9245599SAnton Nefedov BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
3702d9245599SAnton Nefedov
3703d9245599SAnton Nefedov stats->driver = BLOCKDEV_DRIVER_HOST_DEVICE;
3704d9245599SAnton Nefedov stats->u.host_device = get_blockstats_specific_file(bs);
3705d9245599SAnton Nefedov
3706d9245599SAnton Nefedov return stats;
3707d9245599SAnton Nefedov }
370814176c8dSJoelle van Dyne #endif /* HAVE_HOST_BLOCK_DEVICE */
3709d9245599SAnton Nefedov
3710c1bb86cdSEric Blake static QemuOptsList raw_create_opts = {
3711c1bb86cdSEric Blake .name = "raw-create-opts",
3712c1bb86cdSEric Blake .head = QTAILQ_HEAD_INITIALIZER(raw_create_opts.head),
3713c1bb86cdSEric Blake .desc = {
3714c1bb86cdSEric Blake {
3715c1bb86cdSEric Blake .name = BLOCK_OPT_SIZE,
3716c1bb86cdSEric Blake .type = QEMU_OPT_SIZE,
3717c1bb86cdSEric Blake .help = "Virtual disk size"
3718c1bb86cdSEric Blake },
3719c1bb86cdSEric Blake {
3720c1bb86cdSEric Blake .name = BLOCK_OPT_NOCOW,
3721c1bb86cdSEric Blake .type = QEMU_OPT_BOOL,
3722c1bb86cdSEric Blake .help = "Turn off copy-on-write (valid only on btrfs)"
3723c1bb86cdSEric Blake },
3724c1bb86cdSEric Blake {
3725c1bb86cdSEric Blake .name = BLOCK_OPT_PREALLOC,
3726c1bb86cdSEric Blake .type = QEMU_OPT_STRING,
3727abea0053SStefano Garzarella .help = "Preallocation mode (allowed values: off"
3728abea0053SStefano Garzarella #ifdef CONFIG_POSIX_FALLOCATE
3729abea0053SStefano Garzarella ", falloc"
3730abea0053SStefano Garzarella #endif
3731abea0053SStefano Garzarella ", full)"
3732c1bb86cdSEric Blake },
3733ffa244c8SKevin Wolf {
3734ffa244c8SKevin Wolf .name = BLOCK_OPT_EXTENT_SIZE_HINT,
3735ffa244c8SKevin Wolf .type = QEMU_OPT_SIZE,
3736ffa244c8SKevin Wolf .help = "Extent size hint for the image file, 0 to disable"
3737ffa244c8SKevin Wolf },
3738c1bb86cdSEric Blake { /* end of list */ }
3739c1bb86cdSEric Blake }
3740c1bb86cdSEric Blake };
3741c1bb86cdSEric Blake
raw_check_perm(BlockDriverState * bs,uint64_t perm,uint64_t shared,Error ** errp)3742244a5668SFam Zheng static int raw_check_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared,
3743244a5668SFam Zheng Error **errp)
3744244a5668SFam Zheng {
37456ceabe6fSKevin Wolf BDRVRawState *s = bs->opaque;
374672373e40SVladimir Sementsov-Ogievskiy int input_flags = s->reopen_state ? s->reopen_state->flags : bs->open_flags;
37476ceabe6fSKevin Wolf int open_flags;
37486ceabe6fSKevin Wolf int ret;
37496ceabe6fSKevin Wolf
37506ceabe6fSKevin Wolf /* We may need a new fd if auto-read-only switches the mode */
375172373e40SVladimir Sementsov-Ogievskiy ret = raw_reconfigure_getfd(bs, input_flags, &open_flags, perm,
37526ceabe6fSKevin Wolf false, errp);
37536ceabe6fSKevin Wolf if (ret < 0) {
37546ceabe6fSKevin Wolf return ret;
37556ceabe6fSKevin Wolf } else if (ret != s->fd) {
375672373e40SVladimir Sementsov-Ogievskiy Error *local_err = NULL;
375772373e40SVladimir Sementsov-Ogievskiy
375872373e40SVladimir Sementsov-Ogievskiy /*
375972373e40SVladimir Sementsov-Ogievskiy * Fail already check_perm() if we can't get a working O_DIRECT
376072373e40SVladimir Sementsov-Ogievskiy * alignment with the new fd.
376172373e40SVladimir Sementsov-Ogievskiy */
376272373e40SVladimir Sementsov-Ogievskiy raw_probe_alignment(bs, ret, &local_err);
376372373e40SVladimir Sementsov-Ogievskiy if (local_err) {
376472373e40SVladimir Sementsov-Ogievskiy error_propagate(errp, local_err);
376572373e40SVladimir Sementsov-Ogievskiy return -EINVAL;
376672373e40SVladimir Sementsov-Ogievskiy }
376772373e40SVladimir Sementsov-Ogievskiy
37686ceabe6fSKevin Wolf s->perm_change_fd = ret;
3769094e3639SMax Reitz s->perm_change_flags = open_flags;
37706ceabe6fSKevin Wolf }
37716ceabe6fSKevin Wolf
37726ceabe6fSKevin Wolf /* Prepare permissions on old fd to avoid conflicts between old and new,
37736ceabe6fSKevin Wolf * but keep everything locked that new will need. */
37746ceabe6fSKevin Wolf ret = raw_handle_perm_lock(bs, RAW_PL_PREPARE, perm, shared, errp);
37756ceabe6fSKevin Wolf if (ret < 0) {
37766ceabe6fSKevin Wolf goto fail;
37776ceabe6fSKevin Wolf }
37786ceabe6fSKevin Wolf
37796ceabe6fSKevin Wolf /* Copy locks to the new fd */
3780eb43ea16SLi Feng if (s->perm_change_fd && s->use_lock) {
37816ceabe6fSKevin Wolf ret = raw_apply_lock_bytes(NULL, s->perm_change_fd, perm, ~shared,
37826ceabe6fSKevin Wolf false, errp);
37836ceabe6fSKevin Wolf if (ret < 0) {
37846ceabe6fSKevin Wolf raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL);
37856ceabe6fSKevin Wolf goto fail;
37866ceabe6fSKevin Wolf }
37876ceabe6fSKevin Wolf }
37886ceabe6fSKevin Wolf return 0;
37896ceabe6fSKevin Wolf
37906ceabe6fSKevin Wolf fail:
379172373e40SVladimir Sementsov-Ogievskiy if (s->perm_change_fd) {
37926ceabe6fSKevin Wolf qemu_close(s->perm_change_fd);
37936ceabe6fSKevin Wolf }
37946ceabe6fSKevin Wolf s->perm_change_fd = 0;
37956ceabe6fSKevin Wolf return ret;
3796244a5668SFam Zheng }
3797244a5668SFam Zheng
raw_set_perm(BlockDriverState * bs,uint64_t perm,uint64_t shared)3798244a5668SFam Zheng static void raw_set_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared)
3799244a5668SFam Zheng {
3800244a5668SFam Zheng BDRVRawState *s = bs->opaque;
38016ceabe6fSKevin Wolf
38026ceabe6fSKevin Wolf /* For reopen, we have already switched to the new fd (.bdrv_set_perm is
38036ceabe6fSKevin Wolf * called after .bdrv_reopen_commit) */
38046ceabe6fSKevin Wolf if (s->perm_change_fd && s->fd != s->perm_change_fd) {
38056ceabe6fSKevin Wolf qemu_close(s->fd);
38066ceabe6fSKevin Wolf s->fd = s->perm_change_fd;
3807094e3639SMax Reitz s->open_flags = s->perm_change_flags;
38086ceabe6fSKevin Wolf }
38096ceabe6fSKevin Wolf s->perm_change_fd = 0;
38106ceabe6fSKevin Wolf
3811244a5668SFam Zheng raw_handle_perm_lock(bs, RAW_PL_COMMIT, perm, shared, NULL);
3812244a5668SFam Zheng s->perm = perm;
3813244a5668SFam Zheng s->shared_perm = shared;
3814244a5668SFam Zheng }
3815244a5668SFam Zheng
raw_abort_perm_update(BlockDriverState * bs)3816244a5668SFam Zheng static void raw_abort_perm_update(BlockDriverState *bs)
3817244a5668SFam Zheng {
38186ceabe6fSKevin Wolf BDRVRawState *s = bs->opaque;
38196ceabe6fSKevin Wolf
38206ceabe6fSKevin Wolf /* For reopen, .bdrv_reopen_abort is called afterwards and will close
38216ceabe6fSKevin Wolf * the file descriptor. */
382272373e40SVladimir Sementsov-Ogievskiy if (s->perm_change_fd) {
38236ceabe6fSKevin Wolf qemu_close(s->perm_change_fd);
38246ceabe6fSKevin Wolf }
38256ceabe6fSKevin Wolf s->perm_change_fd = 0;
38266ceabe6fSKevin Wolf
3827244a5668SFam Zheng raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL);
3828244a5668SFam Zheng }
3829244a5668SFam Zheng
raw_co_copy_range_from(BlockDriverState * bs,BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3830742bf09bSEmanuele Giuseppe Esposito static int coroutine_fn GRAPH_RDLOCK raw_co_copy_range_from(
383148535049SVladimir Sementsov-Ogievskiy BlockDriverState *bs, BdrvChild *src, int64_t src_offset,
383248535049SVladimir Sementsov-Ogievskiy BdrvChild *dst, int64_t dst_offset, int64_t bytes,
383367b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags read_flags, BdrvRequestFlags write_flags)
38341efad060SFam Zheng {
383567b51fb9SVladimir Sementsov-Ogievskiy return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
383667b51fb9SVladimir Sementsov-Ogievskiy read_flags, write_flags);
38371efad060SFam Zheng }
38381efad060SFam Zheng
3839742bf09bSEmanuele Giuseppe Esposito static int coroutine_fn GRAPH_RDLOCK
raw_co_copy_range_to(BlockDriverState * bs,BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3840742bf09bSEmanuele Giuseppe Esposito raw_co_copy_range_to(BlockDriverState *bs,
3841742bf09bSEmanuele Giuseppe Esposito BdrvChild *src, int64_t src_offset,
3842742bf09bSEmanuele Giuseppe Esposito BdrvChild *dst, int64_t dst_offset,
3843742bf09bSEmanuele Giuseppe Esposito int64_t bytes, BdrvRequestFlags read_flags,
384467b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags write_flags)
38451efad060SFam Zheng {
384658a209c4SKevin Wolf RawPosixAIOData acb;
38471efad060SFam Zheng BDRVRawState *s = bs->opaque;
38481efad060SFam Zheng BDRVRawState *src_s;
38491efad060SFam Zheng
38501efad060SFam Zheng assert(dst->bs == bs);
38511efad060SFam Zheng if (src->bs->drv->bdrv_co_copy_range_to != raw_co_copy_range_to) {
38521efad060SFam Zheng return -ENOTSUP;
38531efad060SFam Zheng }
38541efad060SFam Zheng
38551efad060SFam Zheng src_s = src->bs->opaque;
38569f850f67SFam Zheng if (fd_open(src->bs) < 0 || fd_open(dst->bs) < 0) {
38571efad060SFam Zheng return -EIO;
38581efad060SFam Zheng }
385958a209c4SKevin Wolf
386058a209c4SKevin Wolf acb = (RawPosixAIOData) {
386158a209c4SKevin Wolf .bs = bs,
386258a209c4SKevin Wolf .aio_type = QEMU_AIO_COPY_RANGE,
386358a209c4SKevin Wolf .aio_fildes = src_s->fd,
386458a209c4SKevin Wolf .aio_offset = src_offset,
386558a209c4SKevin Wolf .aio_nbytes = bytes,
386658a209c4SKevin Wolf .copy_range = {
386758a209c4SKevin Wolf .aio_fd2 = s->fd,
386858a209c4SKevin Wolf .aio_offset2 = dst_offset,
386958a209c4SKevin Wolf },
387058a209c4SKevin Wolf };
387158a209c4SKevin Wolf
38720fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_copy_range, &acb);
38731efad060SFam Zheng }
38741efad060SFam Zheng
3875c1bb86cdSEric Blake BlockDriver bdrv_file = {
3876c1bb86cdSEric Blake .format_name = "file",
3877c1bb86cdSEric Blake .protocol_name = "file",
3878c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState),
3879c1bb86cdSEric Blake .bdrv_needs_filename = true,
3880c1bb86cdSEric Blake .bdrv_probe = NULL, /* no probe for protocols */
3881c1bb86cdSEric Blake .bdrv_parse_filename = raw_parse_filename,
3882c1bb86cdSEric Blake .bdrv_file_open = raw_open,
3883c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare,
3884c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit,
3885c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort,
3886c1bb86cdSEric Blake .bdrv_close = raw_close,
3887927f11e1SKevin Wolf .bdrv_co_create = raw_co_create,
3888efc75e2aSStefan Hajnoczi .bdrv_co_create_opts = raw_co_create_opts,
3889c1bb86cdSEric Blake .bdrv_has_zero_init = bdrv_has_zero_init_1,
3890a290f085SEric Blake .bdrv_co_block_status = raw_co_block_status,
3891dd577a26SStefan Hajnoczi .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
3892c1bb86cdSEric Blake .bdrv_co_pwrite_zeroes = raw_co_pwrite_zeroes,
38939bffae14SDaniel Henrique Barboza .bdrv_co_delete_file = raw_co_delete_file,
3894c1bb86cdSEric Blake
3895c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv,
3896c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev,
389733d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
389833d70fb6SKevin Wolf .bdrv_co_pdiscard = raw_co_pdiscard,
38991efad060SFam Zheng .bdrv_co_copy_range_from = raw_co_copy_range_from,
39001efad060SFam Zheng .bdrv_co_copy_range_to = raw_co_copy_range_to,
3901c1bb86cdSEric Blake .bdrv_refresh_limits = raw_refresh_limits,
3902c1bb86cdSEric Blake
3903061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate,
3904c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength,
39053d47eb0aSEmanuele Giuseppe Esposito .bdrv_co_get_info = raw_co_get_info,
39067f36a50aSHanna Reitz .bdrv_get_specific_info = raw_get_specific_info,
390782618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
3908d9245599SAnton Nefedov .bdrv_get_specific_stats = raw_get_specific_stats,
3909244a5668SFam Zheng .bdrv_check_perm = raw_check_perm,
3910244a5668SFam Zheng .bdrv_set_perm = raw_set_perm,
3911244a5668SFam Zheng .bdrv_abort_perm_update = raw_abort_perm_update,
3912c1bb86cdSEric Blake .create_opts = &raw_create_opts,
39138a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts,
3914c1bb86cdSEric Blake };
3915c1bb86cdSEric Blake
3916c1bb86cdSEric Blake /***********************************************/
3917c1bb86cdSEric Blake /* host device */
3918c1bb86cdSEric Blake
391914176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE)
392014176c8dSJoelle van Dyne
3921c1bb86cdSEric Blake #if defined(__APPLE__) && defined(__MACH__)
3922c1bb86cdSEric Blake static kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath,
3923c1bb86cdSEric Blake CFIndex maxPathSize, int flags);
3924aa44d3f6SPhilippe Mathieu-Daudé
3925aa44d3f6SPhilippe Mathieu-Daudé #if !defined(MAC_OS_VERSION_12_0) \
3926aa44d3f6SPhilippe Mathieu-Daudé || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0)
3927aa44d3f6SPhilippe Mathieu-Daudé #define IOMainPort IOMasterPort
3928aa44d3f6SPhilippe Mathieu-Daudé #endif
3929aa44d3f6SPhilippe Mathieu-Daudé
FindEjectableOpticalMedia(io_iterator_t * mediaIterator)3930c1bb86cdSEric Blake static char *FindEjectableOpticalMedia(io_iterator_t *mediaIterator)
3931c1bb86cdSEric Blake {
3932c1bb86cdSEric Blake kern_return_t kernResult = KERN_FAILURE;
3933aa44d3f6SPhilippe Mathieu-Daudé mach_port_t mainPort;
3934c1bb86cdSEric Blake CFMutableDictionaryRef classesToMatch;
3935c1bb86cdSEric Blake const char *matching_array[] = {kIODVDMediaClass, kIOCDMediaClass};
3936c1bb86cdSEric Blake char *mediaType = NULL;
3937c1bb86cdSEric Blake
3938aa44d3f6SPhilippe Mathieu-Daudé kernResult = IOMainPort(MACH_PORT_NULL, &mainPort);
3939c1bb86cdSEric Blake if ( KERN_SUCCESS != kernResult ) {
3940aa44d3f6SPhilippe Mathieu-Daudé printf("IOMainPort returned %d\n", kernResult);
3941c1bb86cdSEric Blake }
3942c1bb86cdSEric Blake
3943c1bb86cdSEric Blake int index;
3944c1bb86cdSEric Blake for (index = 0; index < ARRAY_SIZE(matching_array); index++) {
3945c1bb86cdSEric Blake classesToMatch = IOServiceMatching(matching_array[index]);
3946c1bb86cdSEric Blake if (classesToMatch == NULL) {
3947c1bb86cdSEric Blake error_report("IOServiceMatching returned NULL for %s",
3948c1bb86cdSEric Blake matching_array[index]);
3949c1bb86cdSEric Blake continue;
3950c1bb86cdSEric Blake }
3951c1bb86cdSEric Blake CFDictionarySetValue(classesToMatch, CFSTR(kIOMediaEjectableKey),
3952c1bb86cdSEric Blake kCFBooleanTrue);
3953aa44d3f6SPhilippe Mathieu-Daudé kernResult = IOServiceGetMatchingServices(mainPort, classesToMatch,
3954c1bb86cdSEric Blake mediaIterator);
3955c1bb86cdSEric Blake if (kernResult != KERN_SUCCESS) {
3956c1bb86cdSEric Blake error_report("Note: IOServiceGetMatchingServices returned %d",
3957c1bb86cdSEric Blake kernResult);
3958c1bb86cdSEric Blake continue;
3959c1bb86cdSEric Blake }
3960c1bb86cdSEric Blake
3961c1bb86cdSEric Blake /* If a match was found, leave the loop */
3962c1bb86cdSEric Blake if (*mediaIterator != 0) {
39634f7d28d7SLaurent Vivier trace_file_FindEjectableOpticalMedia(matching_array[index]);
3964c1bb86cdSEric Blake mediaType = g_strdup(matching_array[index]);
3965c1bb86cdSEric Blake break;
3966c1bb86cdSEric Blake }
3967c1bb86cdSEric Blake }
3968c1bb86cdSEric Blake return mediaType;
3969c1bb86cdSEric Blake }
3970c1bb86cdSEric Blake
GetBSDPath(io_iterator_t mediaIterator,char * bsdPath,CFIndex maxPathSize,int flags)3971c1bb86cdSEric Blake kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath,
3972c1bb86cdSEric Blake CFIndex maxPathSize, int flags)
3973c1bb86cdSEric Blake {
3974c1bb86cdSEric Blake io_object_t nextMedia;
3975c1bb86cdSEric Blake kern_return_t kernResult = KERN_FAILURE;
3976c1bb86cdSEric Blake *bsdPath = '\0';
3977c1bb86cdSEric Blake nextMedia = IOIteratorNext( mediaIterator );
3978c1bb86cdSEric Blake if ( nextMedia )
3979c1bb86cdSEric Blake {
3980c1bb86cdSEric Blake CFTypeRef bsdPathAsCFString;
3981c1bb86cdSEric Blake bsdPathAsCFString = IORegistryEntryCreateCFProperty( nextMedia, CFSTR( kIOBSDNameKey ), kCFAllocatorDefault, 0 );
3982c1bb86cdSEric Blake if ( bsdPathAsCFString ) {
3983c1bb86cdSEric Blake size_t devPathLength;
3984c1bb86cdSEric Blake strcpy( bsdPath, _PATH_DEV );
3985c1bb86cdSEric Blake if (flags & BDRV_O_NOCACHE) {
3986c1bb86cdSEric Blake strcat(bsdPath, "r");
3987c1bb86cdSEric Blake }
3988c1bb86cdSEric Blake devPathLength = strlen( bsdPath );
3989c1bb86cdSEric Blake if ( CFStringGetCString( bsdPathAsCFString, bsdPath + devPathLength, maxPathSize - devPathLength, kCFStringEncodingASCII ) ) {
3990c1bb86cdSEric Blake kernResult = KERN_SUCCESS;
3991c1bb86cdSEric Blake }
3992c1bb86cdSEric Blake CFRelease( bsdPathAsCFString );
3993c1bb86cdSEric Blake }
3994c1bb86cdSEric Blake IOObjectRelease( nextMedia );
3995c1bb86cdSEric Blake }
3996c1bb86cdSEric Blake
3997c1bb86cdSEric Blake return kernResult;
3998c1bb86cdSEric Blake }
3999c1bb86cdSEric Blake
4000c1bb86cdSEric Blake /* Sets up a real cdrom for use in QEMU */
setup_cdrom(char * bsd_path,Error ** errp)4001c1bb86cdSEric Blake static bool setup_cdrom(char *bsd_path, Error **errp)
4002c1bb86cdSEric Blake {
4003c1bb86cdSEric Blake int index, num_of_test_partitions = 2, fd;
4004c1bb86cdSEric Blake char test_partition[MAXPATHLEN];
4005c1bb86cdSEric Blake bool partition_found = false;
4006c1bb86cdSEric Blake
4007c1bb86cdSEric Blake /* look for a working partition */
4008c1bb86cdSEric Blake for (index = 0; index < num_of_test_partitions; index++) {
4009c1bb86cdSEric Blake snprintf(test_partition, sizeof(test_partition), "%ss%d", bsd_path,
4010c1bb86cdSEric Blake index);
4011b18a24a9SDaniel P. Berrangé fd = qemu_open(test_partition, O_RDONLY | O_BINARY | O_LARGEFILE, NULL);
4012c1bb86cdSEric Blake if (fd >= 0) {
4013c1bb86cdSEric Blake partition_found = true;
4014c1bb86cdSEric Blake qemu_close(fd);
4015c1bb86cdSEric Blake break;
4016c1bb86cdSEric Blake }
4017c1bb86cdSEric Blake }
4018c1bb86cdSEric Blake
4019c1bb86cdSEric Blake /* if a working partition on the device was not found */
4020c1bb86cdSEric Blake if (partition_found == false) {
4021c1bb86cdSEric Blake error_setg(errp, "Failed to find a working partition on disc");
4022c1bb86cdSEric Blake } else {
40234f7d28d7SLaurent Vivier trace_file_setup_cdrom(test_partition);
4024c1bb86cdSEric Blake pstrcpy(bsd_path, MAXPATHLEN, test_partition);
4025c1bb86cdSEric Blake }
4026c1bb86cdSEric Blake return partition_found;
4027c1bb86cdSEric Blake }
4028c1bb86cdSEric Blake
4029c1bb86cdSEric Blake /* Prints directions on mounting and unmounting a device */
print_unmounting_directions(const char * file_name)4030c1bb86cdSEric Blake static void print_unmounting_directions(const char *file_name)
4031c1bb86cdSEric Blake {
4032c1bb86cdSEric Blake error_report("If device %s is mounted on the desktop, unmount"
4033c1bb86cdSEric Blake " it first before using it in QEMU", file_name);
4034c1bb86cdSEric Blake error_report("Command to unmount device: diskutil unmountDisk %s",
4035c1bb86cdSEric Blake file_name);
4036c1bb86cdSEric Blake error_report("Command to mount device: diskutil mountDisk %s", file_name);
4037c1bb86cdSEric Blake }
4038c1bb86cdSEric Blake
4039c1bb86cdSEric Blake #endif /* defined(__APPLE__) && defined(__MACH__) */
4040c1bb86cdSEric Blake
hdev_probe_device(const char * filename)4041c1bb86cdSEric Blake static int hdev_probe_device(const char *filename)
4042c1bb86cdSEric Blake {
4043c1bb86cdSEric Blake struct stat st;
4044c1bb86cdSEric Blake
4045c1bb86cdSEric Blake /* allow a dedicated CD-ROM driver to match with a higher priority */
4046c1bb86cdSEric Blake if (strstart(filename, "/dev/cdrom", NULL))
4047c1bb86cdSEric Blake return 50;
4048c1bb86cdSEric Blake
4049c1bb86cdSEric Blake if (stat(filename, &st) >= 0 &&
4050c1bb86cdSEric Blake (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) {
4051c1bb86cdSEric Blake return 100;
4052c1bb86cdSEric Blake }
4053c1bb86cdSEric Blake
4054c1bb86cdSEric Blake return 0;
4055c1bb86cdSEric Blake }
4056c1bb86cdSEric Blake
hdev_parse_filename(const char * filename,QDict * options,Error ** errp)4057c1bb86cdSEric Blake static void hdev_parse_filename(const char *filename, QDict *options,
4058c1bb86cdSEric Blake Error **errp)
4059c1bb86cdSEric Blake {
406003c320d8SMax Reitz bdrv_parse_filename_strip_prefix(filename, "host_device:", options);
4061c1bb86cdSEric Blake }
4062c1bb86cdSEric Blake
hdev_is_sg(BlockDriverState * bs)4063c1bb86cdSEric Blake static bool hdev_is_sg(BlockDriverState *bs)
4064c1bb86cdSEric Blake {
4065c1bb86cdSEric Blake
4066c1bb86cdSEric Blake #if defined(__linux__)
4067c1bb86cdSEric Blake
4068c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4069c1bb86cdSEric Blake struct stat st;
4070c1bb86cdSEric Blake struct sg_scsi_id scsiid;
4071c1bb86cdSEric Blake int sg_version;
4072c1bb86cdSEric Blake int ret;
4073c1bb86cdSEric Blake
4074c1bb86cdSEric Blake if (stat(bs->filename, &st) < 0 || !S_ISCHR(st.st_mode)) {
4075c1bb86cdSEric Blake return false;
4076c1bb86cdSEric Blake }
4077c1bb86cdSEric Blake
4078c1bb86cdSEric Blake ret = ioctl(s->fd, SG_GET_VERSION_NUM, &sg_version);
4079c1bb86cdSEric Blake if (ret < 0) {
4080c1bb86cdSEric Blake return false;
4081c1bb86cdSEric Blake }
4082c1bb86cdSEric Blake
4083c1bb86cdSEric Blake ret = ioctl(s->fd, SG_GET_SCSI_ID, &scsiid);
4084c1bb86cdSEric Blake if (ret >= 0) {
40854f7d28d7SLaurent Vivier trace_file_hdev_is_sg(scsiid.scsi_type, sg_version);
4086c1bb86cdSEric Blake return true;
4087c1bb86cdSEric Blake }
4088c1bb86cdSEric Blake
4089c1bb86cdSEric Blake #endif
4090c1bb86cdSEric Blake
4091c1bb86cdSEric Blake return false;
4092c1bb86cdSEric Blake }
4093c1bb86cdSEric Blake
hdev_open(BlockDriverState * bs,QDict * options,int flags,Error ** errp)4094c1bb86cdSEric Blake static int hdev_open(BlockDriverState *bs, QDict *options, int flags,
4095c1bb86cdSEric Blake Error **errp)
4096c1bb86cdSEric Blake {
4097c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4098c1bb86cdSEric Blake int ret;
4099c1bb86cdSEric Blake
4100c1bb86cdSEric Blake #if defined(__APPLE__) && defined(__MACH__)
4101129c7d1cSMarkus Armbruster /*
4102129c7d1cSMarkus Armbruster * Caution: while qdict_get_str() is fine, getting non-string types
4103129c7d1cSMarkus Armbruster * would require more care. When @options come from -blockdev or
4104129c7d1cSMarkus Armbruster * blockdev_add, its members are typed according to the QAPI
4105129c7d1cSMarkus Armbruster * schema, but when they come from -drive, they're all QString.
4106129c7d1cSMarkus Armbruster */
4107c1bb86cdSEric Blake const char *filename = qdict_get_str(options, "filename");
4108c1bb86cdSEric Blake char bsd_path[MAXPATHLEN] = "";
4109c1bb86cdSEric Blake bool error_occurred = false;
4110c1bb86cdSEric Blake
4111c1bb86cdSEric Blake /* If using a real cdrom */
4112c1bb86cdSEric Blake if (strcmp(filename, "/dev/cdrom") == 0) {
4113c1bb86cdSEric Blake char *mediaType = NULL;
4114c1bb86cdSEric Blake kern_return_t ret_val;
4115c1bb86cdSEric Blake io_iterator_t mediaIterator = 0;
4116c1bb86cdSEric Blake
4117c1bb86cdSEric Blake mediaType = FindEjectableOpticalMedia(&mediaIterator);
4118c1bb86cdSEric Blake if (mediaType == NULL) {
4119c1bb86cdSEric Blake error_setg(errp, "Please make sure your CD/DVD is in the optical"
4120c1bb86cdSEric Blake " drive");
4121c1bb86cdSEric Blake error_occurred = true;
4122c1bb86cdSEric Blake goto hdev_open_Mac_error;
4123c1bb86cdSEric Blake }
4124c1bb86cdSEric Blake
4125c1bb86cdSEric Blake ret_val = GetBSDPath(mediaIterator, bsd_path, sizeof(bsd_path), flags);
4126c1bb86cdSEric Blake if (ret_val != KERN_SUCCESS) {
4127c1bb86cdSEric Blake error_setg(errp, "Could not get BSD path for optical drive");
4128c1bb86cdSEric Blake error_occurred = true;
4129c1bb86cdSEric Blake goto hdev_open_Mac_error;
4130c1bb86cdSEric Blake }
4131c1bb86cdSEric Blake
4132c1bb86cdSEric Blake /* If a real optical drive was not found */
4133c1bb86cdSEric Blake if (bsd_path[0] == '\0') {
4134c1bb86cdSEric Blake error_setg(errp, "Failed to obtain bsd path for optical drive");
4135c1bb86cdSEric Blake error_occurred = true;
4136c1bb86cdSEric Blake goto hdev_open_Mac_error;
4137c1bb86cdSEric Blake }
4138c1bb86cdSEric Blake
4139c1bb86cdSEric Blake /* If using a cdrom disc and finding a partition on the disc failed */
4140c1bb86cdSEric Blake if (strncmp(mediaType, kIOCDMediaClass, 9) == 0 &&
4141c1bb86cdSEric Blake setup_cdrom(bsd_path, errp) == false) {
4142c1bb86cdSEric Blake print_unmounting_directions(bsd_path);
4143c1bb86cdSEric Blake error_occurred = true;
4144c1bb86cdSEric Blake goto hdev_open_Mac_error;
4145c1bb86cdSEric Blake }
4146c1bb86cdSEric Blake
414746f5ac20SEric Blake qdict_put_str(options, "filename", bsd_path);
4148c1bb86cdSEric Blake
4149c1bb86cdSEric Blake hdev_open_Mac_error:
4150c1bb86cdSEric Blake g_free(mediaType);
4151c1bb86cdSEric Blake if (mediaIterator) {
4152c1bb86cdSEric Blake IOObjectRelease(mediaIterator);
4153c1bb86cdSEric Blake }
4154c1bb86cdSEric Blake if (error_occurred) {
4155c1bb86cdSEric Blake return -ENOENT;
4156c1bb86cdSEric Blake }
4157c1bb86cdSEric Blake }
4158c1bb86cdSEric Blake #endif /* defined(__APPLE__) && defined(__MACH__) */
4159c1bb86cdSEric Blake
4160c1bb86cdSEric Blake s->type = FTYPE_FILE;
4161c1bb86cdSEric Blake
4162668f62ecSMarkus Armbruster ret = raw_open_common(bs, options, flags, 0, true, errp);
4163c1bb86cdSEric Blake if (ret < 0) {
4164c1bb86cdSEric Blake #if defined(__APPLE__) && defined(__MACH__)
4165c1bb86cdSEric Blake if (*bsd_path) {
4166c1bb86cdSEric Blake filename = bsd_path;
4167c1bb86cdSEric Blake }
4168c1bb86cdSEric Blake /* if a physical device experienced an error while being opened */
4169c1bb86cdSEric Blake if (strncmp(filename, "/dev/", 5) == 0) {
4170c1bb86cdSEric Blake print_unmounting_directions(filename);
4171c1bb86cdSEric Blake }
4172c1bb86cdSEric Blake #endif /* defined(__APPLE__) && defined(__MACH__) */
4173c1bb86cdSEric Blake return ret;
4174c1bb86cdSEric Blake }
4175c1bb86cdSEric Blake
4176c1bb86cdSEric Blake /* Since this does ioctl the device must be already opened */
4177c1bb86cdSEric Blake bs->sg = hdev_is_sg(bs);
4178c1bb86cdSEric Blake
4179c1bb86cdSEric Blake return ret;
4180c1bb86cdSEric Blake }
4181c1bb86cdSEric Blake
4182c1bb86cdSEric Blake #if defined(__linux__)
41832f3a7ab3SKevin Wolf static int coroutine_fn
hdev_co_ioctl(BlockDriverState * bs,unsigned long int req,void * buf)41842f3a7ab3SKevin Wolf hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
4185c1bb86cdSEric Blake {
4186c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
418703425671SKevin Wolf RawPosixAIOData acb;
41882f3a7ab3SKevin Wolf int ret;
4189c1bb86cdSEric Blake
41902f3a7ab3SKevin Wolf ret = fd_open(bs);
41912f3a7ab3SKevin Wolf if (ret < 0) {
41922f3a7ab3SKevin Wolf return ret;
41932f3a7ab3SKevin Wolf }
4194c1bb86cdSEric Blake
41957c9e5276SPaolo Bonzini if (req == SG_IO && s->pr_mgr) {
41967c9e5276SPaolo Bonzini struct sg_io_hdr *io_hdr = buf;
41977c9e5276SPaolo Bonzini if (io_hdr->cmdp[0] == PERSISTENT_RESERVE_OUT ||
41987c9e5276SPaolo Bonzini io_hdr->cmdp[0] == PERSISTENT_RESERVE_IN) {
41990fdb7311SEmanuele Giuseppe Esposito return pr_manager_execute(s->pr_mgr, qemu_get_current_aio_context(),
42002f3a7ab3SKevin Wolf s->fd, io_hdr);
42017c9e5276SPaolo Bonzini }
42027c9e5276SPaolo Bonzini }
42037c9e5276SPaolo Bonzini
420403425671SKevin Wolf acb = (RawPosixAIOData) {
420503425671SKevin Wolf .bs = bs,
420603425671SKevin Wolf .aio_type = QEMU_AIO_IOCTL,
420703425671SKevin Wolf .aio_fildes = s->fd,
420803425671SKevin Wolf .aio_offset = 0,
420903425671SKevin Wolf .ioctl = {
421003425671SKevin Wolf .buf = buf,
421103425671SKevin Wolf .cmd = req,
421203425671SKevin Wolf },
421303425671SKevin Wolf };
421403425671SKevin Wolf
42150fdb7311SEmanuele Giuseppe Esposito return raw_thread_pool_submit(handle_aiocb_ioctl, &acb);
4216c1bb86cdSEric Blake }
4217c1bb86cdSEric Blake #endif /* linux */
4218c1bb86cdSEric Blake
421933d70fb6SKevin Wolf static coroutine_fn int
hdev_co_pdiscard(BlockDriverState * bs,int64_t offset,int64_t bytes)42200c802287SVladimir Sementsov-Ogievskiy hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
4221c1bb86cdSEric Blake {
42221c450366SAnton Nefedov BDRVRawState *s = bs->opaque;
422333d70fb6SKevin Wolf int ret;
4224c1bb86cdSEric Blake
422533d70fb6SKevin Wolf ret = fd_open(bs);
422633d70fb6SKevin Wolf if (ret < 0) {
42271c450366SAnton Nefedov raw_account_discard(s, bytes, ret);
422833d70fb6SKevin Wolf return ret;
4229c1bb86cdSEric Blake }
423046ee0f46SKevin Wolf return raw_do_pdiscard(bs, offset, bytes, true);
4231c1bb86cdSEric Blake }
4232c1bb86cdSEric Blake
hdev_co_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags)4233c1bb86cdSEric Blake static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs,
4234f34b2bcfSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, BdrvRequestFlags flags)
4235c1bb86cdSEric Blake {
4236c1bb86cdSEric Blake int rc;
4237c1bb86cdSEric Blake
4238c1bb86cdSEric Blake rc = fd_open(bs);
4239c1bb86cdSEric Blake if (rc < 0) {
4240c1bb86cdSEric Blake return rc;
4241c1bb86cdSEric Blake }
424234fa110eSKevin Wolf
42437154d8aeSKevin Wolf return raw_do_pwrite_zeroes(bs, offset, bytes, flags, true);
4244c1bb86cdSEric Blake }
4245c1bb86cdSEric Blake
4246c1bb86cdSEric Blake static BlockDriver bdrv_host_device = {
4247c1bb86cdSEric Blake .format_name = "host_device",
4248c1bb86cdSEric Blake .protocol_name = "host_device",
4249c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState),
4250c1bb86cdSEric Blake .bdrv_needs_filename = true,
4251c1bb86cdSEric Blake .bdrv_probe_device = hdev_probe_device,
4252c1bb86cdSEric Blake .bdrv_parse_filename = hdev_parse_filename,
4253c1bb86cdSEric Blake .bdrv_file_open = hdev_open,
4254c1bb86cdSEric Blake .bdrv_close = raw_close,
4255c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare,
4256c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit,
4257c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort,
42585a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple,
42595a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple,
42608a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts,
4261dd577a26SStefan Hajnoczi .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
4262c1bb86cdSEric Blake .bdrv_co_pwrite_zeroes = hdev_co_pwrite_zeroes,
4263c1bb86cdSEric Blake
4264c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv,
4265c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev,
426633d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
426733d70fb6SKevin Wolf .bdrv_co_pdiscard = hdev_co_pdiscard,
42681efad060SFam Zheng .bdrv_co_copy_range_from = raw_co_copy_range_from,
42691efad060SFam Zheng .bdrv_co_copy_range_to = raw_co_copy_range_to,
4270c1bb86cdSEric Blake .bdrv_refresh_limits = raw_refresh_limits,
4271c1bb86cdSEric Blake
4272061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate,
4273c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength,
42743d47eb0aSEmanuele Giuseppe Esposito .bdrv_co_get_info = raw_co_get_info,
42757f36a50aSHanna Reitz .bdrv_get_specific_info = raw_get_specific_info,
427682618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
4277d9245599SAnton Nefedov .bdrv_get_specific_stats = hdev_get_specific_stats,
4278244a5668SFam Zheng .bdrv_check_perm = raw_check_perm,
4279244a5668SFam Zheng .bdrv_set_perm = raw_set_perm,
4280244a5668SFam Zheng .bdrv_abort_perm_update = raw_abort_perm_update,
4281c1bb86cdSEric Blake .bdrv_probe_blocksizes = hdev_probe_blocksizes,
4282c1bb86cdSEric Blake .bdrv_probe_geometry = hdev_probe_geometry,
4283c1bb86cdSEric Blake
4284c1bb86cdSEric Blake /* generic scsi device */
4285c1bb86cdSEric Blake #ifdef __linux__
42862f3a7ab3SKevin Wolf .bdrv_co_ioctl = hdev_co_ioctl,
4287c1bb86cdSEric Blake #endif
42886d43eaa3SSam Li
42896d43eaa3SSam Li /* zoned device */
42906d43eaa3SSam Li #if defined(CONFIG_BLKZONED)
42916d43eaa3SSam Li /* zone management operations */
42926d43eaa3SSam Li .bdrv_co_zone_report = raw_co_zone_report,
42936d43eaa3SSam Li .bdrv_co_zone_mgmt = raw_co_zone_mgmt,
42944751d09aSSam Li .bdrv_co_zone_append = raw_co_zone_append,
42956d43eaa3SSam Li #endif
4296c1bb86cdSEric Blake };
4297c1bb86cdSEric Blake
4298c1bb86cdSEric Blake #if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
cdrom_parse_filename(const char * filename,QDict * options,Error ** errp)4299c1bb86cdSEric Blake static void cdrom_parse_filename(const char *filename, QDict *options,
4300c1bb86cdSEric Blake Error **errp)
4301c1bb86cdSEric Blake {
430203c320d8SMax Reitz bdrv_parse_filename_strip_prefix(filename, "host_cdrom:", options);
4303c1bb86cdSEric Blake }
43048c6f27e7SPaolo Bonzini
cdrom_refresh_limits(BlockDriverState * bs,Error ** errp)43058c6f27e7SPaolo Bonzini static void cdrom_refresh_limits(BlockDriverState *bs, Error **errp)
43068c6f27e7SPaolo Bonzini {
43078c6f27e7SPaolo Bonzini bs->bl.has_variable_length = true;
43088c6f27e7SPaolo Bonzini raw_refresh_limits(bs, errp);
43098c6f27e7SPaolo Bonzini }
4310c1bb86cdSEric Blake #endif
4311c1bb86cdSEric Blake
4312c1bb86cdSEric Blake #ifdef __linux__
cdrom_open(BlockDriverState * bs,QDict * options,int flags,Error ** errp)4313c1bb86cdSEric Blake static int cdrom_open(BlockDriverState *bs, QDict *options, int flags,
4314c1bb86cdSEric Blake Error **errp)
4315c1bb86cdSEric Blake {
4316c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4317c1bb86cdSEric Blake
4318c1bb86cdSEric Blake s->type = FTYPE_CD;
4319c1bb86cdSEric Blake
4320c1bb86cdSEric Blake /* open will not fail even if no CD is inserted, so add O_NONBLOCK */
4321230ff739SJohn Snow return raw_open_common(bs, options, flags, O_NONBLOCK, true, errp);
4322c1bb86cdSEric Blake }
4323c1bb86cdSEric Blake
cdrom_probe_device(const char * filename)4324c1bb86cdSEric Blake static int cdrom_probe_device(const char *filename)
4325c1bb86cdSEric Blake {
4326c1bb86cdSEric Blake int fd, ret;
4327c1bb86cdSEric Blake int prio = 0;
4328c1bb86cdSEric Blake struct stat st;
4329c1bb86cdSEric Blake
4330b18a24a9SDaniel P. Berrangé fd = qemu_open(filename, O_RDONLY | O_NONBLOCK, NULL);
4331c1bb86cdSEric Blake if (fd < 0) {
4332c1bb86cdSEric Blake goto out;
4333c1bb86cdSEric Blake }
4334c1bb86cdSEric Blake ret = fstat(fd, &st);
4335c1bb86cdSEric Blake if (ret == -1 || !S_ISBLK(st.st_mode)) {
4336c1bb86cdSEric Blake goto outc;
4337c1bb86cdSEric Blake }
4338c1bb86cdSEric Blake
4339c1bb86cdSEric Blake /* Attempt to detect via a CDROM specific ioctl */
4340c1bb86cdSEric Blake ret = ioctl(fd, CDROM_DRIVE_STATUS, CDSL_CURRENT);
4341c1bb86cdSEric Blake if (ret >= 0)
4342c1bb86cdSEric Blake prio = 100;
4343c1bb86cdSEric Blake
4344c1bb86cdSEric Blake outc:
4345c1bb86cdSEric Blake qemu_close(fd);
4346c1bb86cdSEric Blake out:
4347c1bb86cdSEric Blake return prio;
4348c1bb86cdSEric Blake }
4349c1bb86cdSEric Blake
cdrom_co_is_inserted(BlockDriverState * bs)43501e97be91SEmanuele Giuseppe Esposito static bool coroutine_fn cdrom_co_is_inserted(BlockDriverState *bs)
4351c1bb86cdSEric Blake {
4352c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4353c1bb86cdSEric Blake int ret;
4354c1bb86cdSEric Blake
4355c1bb86cdSEric Blake ret = ioctl(s->fd, CDROM_DRIVE_STATUS, CDSL_CURRENT);
4356c1bb86cdSEric Blake return ret == CDS_DISC_OK;
4357c1bb86cdSEric Blake }
4358c1bb86cdSEric Blake
cdrom_co_eject(BlockDriverState * bs,bool eject_flag)43592531b390SEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_eject(BlockDriverState *bs, bool eject_flag)
4360c1bb86cdSEric Blake {
4361c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4362c1bb86cdSEric Blake
4363c1bb86cdSEric Blake if (eject_flag) {
4364c1bb86cdSEric Blake if (ioctl(s->fd, CDROMEJECT, NULL) < 0)
4365c1bb86cdSEric Blake perror("CDROMEJECT");
4366c1bb86cdSEric Blake } else {
4367c1bb86cdSEric Blake if (ioctl(s->fd, CDROMCLOSETRAY, NULL) < 0)
4368c1bb86cdSEric Blake perror("CDROMEJECT");
4369c1bb86cdSEric Blake }
4370c1bb86cdSEric Blake }
4371c1bb86cdSEric Blake
cdrom_co_lock_medium(BlockDriverState * bs,bool locked)43722c75261cSEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_lock_medium(BlockDriverState *bs, bool locked)
4373c1bb86cdSEric Blake {
4374c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4375c1bb86cdSEric Blake
4376c1bb86cdSEric Blake if (ioctl(s->fd, CDROM_LOCKDOOR, locked) < 0) {
4377c1bb86cdSEric Blake /*
4378c1bb86cdSEric Blake * Note: an error can happen if the distribution automatically
4379c1bb86cdSEric Blake * mounts the CD-ROM
4380c1bb86cdSEric Blake */
4381c1bb86cdSEric Blake /* perror("CDROM_LOCKDOOR"); */
4382c1bb86cdSEric Blake }
4383c1bb86cdSEric Blake }
4384c1bb86cdSEric Blake
4385c1bb86cdSEric Blake static BlockDriver bdrv_host_cdrom = {
4386c1bb86cdSEric Blake .format_name = "host_cdrom",
4387c1bb86cdSEric Blake .protocol_name = "host_cdrom",
4388c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState),
4389c1bb86cdSEric Blake .bdrv_needs_filename = true,
4390c1bb86cdSEric Blake .bdrv_probe_device = cdrom_probe_device,
4391c1bb86cdSEric Blake .bdrv_parse_filename = cdrom_parse_filename,
4392c1bb86cdSEric Blake .bdrv_file_open = cdrom_open,
4393c1bb86cdSEric Blake .bdrv_close = raw_close,
4394c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare,
4395c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit,
4396c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort,
43975a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple,
43985a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple,
43998a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts,
4400dd577a26SStefan Hajnoczi .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
4401c1bb86cdSEric Blake
4402c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv,
4403c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev,
440433d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
44058c6f27e7SPaolo Bonzini .bdrv_refresh_limits = cdrom_refresh_limits,
4406c1bb86cdSEric Blake
4407061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate,
4408c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength,
440982618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
4410c1bb86cdSEric Blake
4411c1bb86cdSEric Blake /* removable device support */
44121e97be91SEmanuele Giuseppe Esposito .bdrv_co_is_inserted = cdrom_co_is_inserted,
44132531b390SEmanuele Giuseppe Esposito .bdrv_co_eject = cdrom_co_eject,
44142c75261cSEmanuele Giuseppe Esposito .bdrv_co_lock_medium = cdrom_co_lock_medium,
4415c1bb86cdSEric Blake
4416c1bb86cdSEric Blake /* generic scsi device */
44172f3a7ab3SKevin Wolf .bdrv_co_ioctl = hdev_co_ioctl,
4418c1bb86cdSEric Blake };
4419c1bb86cdSEric Blake #endif /* __linux__ */
4420c1bb86cdSEric Blake
4421c1bb86cdSEric Blake #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
cdrom_open(BlockDriverState * bs,QDict * options,int flags,Error ** errp)4422c1bb86cdSEric Blake static int cdrom_open(BlockDriverState *bs, QDict *options, int flags,
4423c1bb86cdSEric Blake Error **errp)
4424c1bb86cdSEric Blake {
4425c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4426c1bb86cdSEric Blake int ret;
4427c1bb86cdSEric Blake
4428c1bb86cdSEric Blake s->type = FTYPE_CD;
4429c1bb86cdSEric Blake
4430668f62ecSMarkus Armbruster ret = raw_open_common(bs, options, flags, 0, true, errp);
4431c1bb86cdSEric Blake if (ret) {
4432c1bb86cdSEric Blake return ret;
4433c1bb86cdSEric Blake }
4434c1bb86cdSEric Blake
4435c1bb86cdSEric Blake /* make sure the door isn't locked at this time */
4436c1bb86cdSEric Blake ioctl(s->fd, CDIOCALLOW);
4437c1bb86cdSEric Blake return 0;
4438c1bb86cdSEric Blake }
4439c1bb86cdSEric Blake
cdrom_probe_device(const char * filename)4440c1bb86cdSEric Blake static int cdrom_probe_device(const char *filename)
4441c1bb86cdSEric Blake {
4442c1bb86cdSEric Blake if (strstart(filename, "/dev/cd", NULL) ||
4443c1bb86cdSEric Blake strstart(filename, "/dev/acd", NULL))
4444c1bb86cdSEric Blake return 100;
4445c1bb86cdSEric Blake return 0;
4446c1bb86cdSEric Blake }
4447c1bb86cdSEric Blake
cdrom_reopen(BlockDriverState * bs)4448c1bb86cdSEric Blake static int cdrom_reopen(BlockDriverState *bs)
4449c1bb86cdSEric Blake {
4450c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4451c1bb86cdSEric Blake int fd;
4452c1bb86cdSEric Blake
4453c1bb86cdSEric Blake /*
4454c1bb86cdSEric Blake * Force reread of possibly changed/newly loaded disc,
4455c1bb86cdSEric Blake * FreeBSD seems to not notice sometimes...
4456c1bb86cdSEric Blake */
4457c1bb86cdSEric Blake if (s->fd >= 0)
4458c1bb86cdSEric Blake qemu_close(s->fd);
4459b18a24a9SDaniel P. Berrangé fd = qemu_open(bs->filename, s->open_flags, NULL);
4460c1bb86cdSEric Blake if (fd < 0) {
4461c1bb86cdSEric Blake s->fd = -1;
4462c1bb86cdSEric Blake return -EIO;
4463c1bb86cdSEric Blake }
4464c1bb86cdSEric Blake s->fd = fd;
4465c1bb86cdSEric Blake
4466c1bb86cdSEric Blake /* make sure the door isn't locked at this time */
4467c1bb86cdSEric Blake ioctl(s->fd, CDIOCALLOW);
4468c1bb86cdSEric Blake return 0;
4469c1bb86cdSEric Blake }
4470c1bb86cdSEric Blake
cdrom_co_is_inserted(BlockDriverState * bs)44711e97be91SEmanuele Giuseppe Esposito static bool coroutine_fn cdrom_co_is_inserted(BlockDriverState *bs)
4472c1bb86cdSEric Blake {
447336c6c877SPaolo Bonzini return raw_getlength(bs) > 0;
4474c1bb86cdSEric Blake }
4475c1bb86cdSEric Blake
cdrom_co_eject(BlockDriverState * bs,bool eject_flag)44762531b390SEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_eject(BlockDriverState *bs, bool eject_flag)
4477c1bb86cdSEric Blake {
4478c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4479c1bb86cdSEric Blake
4480c1bb86cdSEric Blake if (s->fd < 0)
4481c1bb86cdSEric Blake return;
4482c1bb86cdSEric Blake
4483c1bb86cdSEric Blake (void) ioctl(s->fd, CDIOCALLOW);
4484c1bb86cdSEric Blake
4485c1bb86cdSEric Blake if (eject_flag) {
4486c1bb86cdSEric Blake if (ioctl(s->fd, CDIOCEJECT) < 0)
4487c1bb86cdSEric Blake perror("CDIOCEJECT");
4488c1bb86cdSEric Blake } else {
4489c1bb86cdSEric Blake if (ioctl(s->fd, CDIOCCLOSE) < 0)
4490c1bb86cdSEric Blake perror("CDIOCCLOSE");
4491c1bb86cdSEric Blake }
4492c1bb86cdSEric Blake
4493c1bb86cdSEric Blake cdrom_reopen(bs);
4494c1bb86cdSEric Blake }
4495c1bb86cdSEric Blake
cdrom_co_lock_medium(BlockDriverState * bs,bool locked)44962c75261cSEmanuele Giuseppe Esposito static void coroutine_fn cdrom_co_lock_medium(BlockDriverState *bs, bool locked)
4497c1bb86cdSEric Blake {
4498c1bb86cdSEric Blake BDRVRawState *s = bs->opaque;
4499c1bb86cdSEric Blake
4500c1bb86cdSEric Blake if (s->fd < 0)
4501c1bb86cdSEric Blake return;
4502c1bb86cdSEric Blake if (ioctl(s->fd, (locked ? CDIOCPREVENT : CDIOCALLOW)) < 0) {
4503c1bb86cdSEric Blake /*
4504c1bb86cdSEric Blake * Note: an error can happen if the distribution automatically
4505c1bb86cdSEric Blake * mounts the CD-ROM
4506c1bb86cdSEric Blake */
4507c1bb86cdSEric Blake /* perror("CDROM_LOCKDOOR"); */
4508c1bb86cdSEric Blake }
4509c1bb86cdSEric Blake }
4510c1bb86cdSEric Blake
4511c1bb86cdSEric Blake static BlockDriver bdrv_host_cdrom = {
4512c1bb86cdSEric Blake .format_name = "host_cdrom",
4513c1bb86cdSEric Blake .protocol_name = "host_cdrom",
4514c1bb86cdSEric Blake .instance_size = sizeof(BDRVRawState),
4515c1bb86cdSEric Blake .bdrv_needs_filename = true,
4516c1bb86cdSEric Blake .bdrv_probe_device = cdrom_probe_device,
4517c1bb86cdSEric Blake .bdrv_parse_filename = cdrom_parse_filename,
4518c1bb86cdSEric Blake .bdrv_file_open = cdrom_open,
4519c1bb86cdSEric Blake .bdrv_close = raw_close,
4520c1bb86cdSEric Blake .bdrv_reopen_prepare = raw_reopen_prepare,
4521c1bb86cdSEric Blake .bdrv_reopen_commit = raw_reopen_commit,
4522c1bb86cdSEric Blake .bdrv_reopen_abort = raw_reopen_abort,
45235a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple,
45245a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple,
45258a2ce0bcSAlberto Garcia .mutable_opts = mutable_opts,
4526c1bb86cdSEric Blake
4527c1bb86cdSEric Blake .bdrv_co_preadv = raw_co_preadv,
4528c1bb86cdSEric Blake .bdrv_co_pwritev = raw_co_pwritev,
452933d70fb6SKevin Wolf .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
45308c6f27e7SPaolo Bonzini .bdrv_refresh_limits = cdrom_refresh_limits,
4531c1bb86cdSEric Blake
4532061ca8a3SKevin Wolf .bdrv_co_truncate = raw_co_truncate,
4533c86422c5SEmanuele Giuseppe Esposito .bdrv_co_getlength = raw_co_getlength,
453482618d7bSEmanuele Giuseppe Esposito .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
4535c1bb86cdSEric Blake
4536c1bb86cdSEric Blake /* removable device support */
45371e97be91SEmanuele Giuseppe Esposito .bdrv_co_is_inserted = cdrom_co_is_inserted,
45382531b390SEmanuele Giuseppe Esposito .bdrv_co_eject = cdrom_co_eject,
45392c75261cSEmanuele Giuseppe Esposito .bdrv_co_lock_medium = cdrom_co_lock_medium,
4540c1bb86cdSEric Blake };
4541c1bb86cdSEric Blake #endif /* __FreeBSD__ */
4542c1bb86cdSEric Blake
454314176c8dSJoelle van Dyne #endif /* HAVE_HOST_BLOCK_DEVICE */
454414176c8dSJoelle van Dyne
bdrv_file_init(void)4545c1bb86cdSEric Blake static void bdrv_file_init(void)
4546c1bb86cdSEric Blake {
4547c1bb86cdSEric Blake /*
4548c1bb86cdSEric Blake * Register all the drivers. Note that order is important, the driver
4549c1bb86cdSEric Blake * registered last will get probed first.
4550c1bb86cdSEric Blake */
4551c1bb86cdSEric Blake bdrv_register(&bdrv_file);
455214176c8dSJoelle van Dyne #if defined(HAVE_HOST_BLOCK_DEVICE)
4553c1bb86cdSEric Blake bdrv_register(&bdrv_host_device);
4554c1bb86cdSEric Blake #ifdef __linux__
4555c1bb86cdSEric Blake bdrv_register(&bdrv_host_cdrom);
4556c1bb86cdSEric Blake #endif
4557c1bb86cdSEric Blake #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
4558c1bb86cdSEric Blake bdrv_register(&bdrv_host_cdrom);
4559c1bb86cdSEric Blake #endif
456014176c8dSJoelle van Dyne #endif /* HAVE_HOST_BLOCK_DEVICE */
4561c1bb86cdSEric Blake }
4562c1bb86cdSEric Blake
4563c1bb86cdSEric Blake block_init(bdrv_file_init);
4564