xref: /dragonfly/sys/vfs/hammer2/hammer2_ioctl.c (revision 97fa55c4)
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  * Ioctl Functions.
37  *
38  * WARNING! The ioctl functions which manipulate the connection state need
39  *	    to be able to run without deadlock on the volume's chain lock.
40  *	    Most of these functions use a separate lock.
41  */
42 
43 #include "hammer2.h"
44 
45 static int hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data);
46 static int hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data);
47 static int hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data);
48 static int hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data);
49 static int hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data);
50 static int hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data);
51 static int hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data);
52 static int hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data);
53 static int hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data);
54 static int hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data);
55 static int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data);
56 static int hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data);
57 static int hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data);
58 static int hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data);
59 static int hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data);
60 static int hammer2_ioctl_debug_dump(hammer2_inode_t *ip, u_int flags);
61 //static int hammer2_ioctl_inode_comp_set(hammer2_inode_t *ip, void *data);
62 //static int hammer2_ioctl_inode_comp_rec_set(hammer2_inode_t *ip, void *data);
63 //static int hammer2_ioctl_inode_comp_rec_set2(hammer2_inode_t *ip, void *data);
64 static int hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data);
65 static int hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data);
66 
67 int
68 hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, int fflag,
69 	      struct ucred *cred)
70 {
71 	int error;
72 
73 	/*
74 	 * Standard root cred checks, will be selectively ignored below
75 	 * for ioctls that do not require root creds.
76 	 */
77 	error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0);
78 
79 	switch(com) {
80 	case HAMMER2IOC_VERSION_GET:
81 		error = hammer2_ioctl_version_get(ip, data);
82 		break;
83 	case HAMMER2IOC_RECLUSTER:
84 		if (error == 0)
85 			error = hammer2_ioctl_recluster(ip, data);
86 		break;
87 	case HAMMER2IOC_REMOTE_SCAN:
88 		if (error == 0)
89 			error = hammer2_ioctl_remote_scan(ip, data);
90 		break;
91 	case HAMMER2IOC_REMOTE_ADD:
92 		if (error == 0)
93 			error = hammer2_ioctl_remote_add(ip, data);
94 		break;
95 	case HAMMER2IOC_REMOTE_DEL:
96 		if (error == 0)
97 			error = hammer2_ioctl_remote_del(ip, data);
98 		break;
99 	case HAMMER2IOC_REMOTE_REP:
100 		if (error == 0)
101 			error = hammer2_ioctl_remote_rep(ip, data);
102 		break;
103 	case HAMMER2IOC_SOCKET_GET:
104 		if (error == 0)
105 			error = hammer2_ioctl_socket_get(ip, data);
106 		break;
107 	case HAMMER2IOC_SOCKET_SET:
108 		if (error == 0)
109 			error = hammer2_ioctl_socket_set(ip, data);
110 		break;
111 	case HAMMER2IOC_PFS_GET:
112 		if (error == 0)
113 			error = hammer2_ioctl_pfs_get(ip, data);
114 		break;
115 	case HAMMER2IOC_PFS_LOOKUP:
116 		if (error == 0)
117 			error = hammer2_ioctl_pfs_lookup(ip, data);
118 		break;
119 	case HAMMER2IOC_PFS_CREATE:
120 		if (error == 0)
121 			error = hammer2_ioctl_pfs_create(ip, data);
122 		break;
123 	case HAMMER2IOC_PFS_DELETE:
124 		if (error == 0)
125 			error = hammer2_ioctl_pfs_delete(ip, data);
126 		break;
127 	case HAMMER2IOC_PFS_SNAPSHOT:
128 		if (error == 0)
129 			error = hammer2_ioctl_pfs_snapshot(ip, data);
130 		break;
131 	case HAMMER2IOC_INODE_GET:
132 		error = hammer2_ioctl_inode_get(ip, data);
133 		break;
134 	case HAMMER2IOC_INODE_SET:
135 		if (error == 0)
136 			error = hammer2_ioctl_inode_set(ip, data);
137 		break;
138 	case HAMMER2IOC_BULKFREE_SCAN:
139 		error = hammer2_ioctl_bulkfree_scan(ip, data);
140 		break;
141 	case HAMMER2IOC_BULKFREE_ASYNC:
142 		error = hammer2_ioctl_bulkfree_scan(ip, NULL);
143 		break;
144 	/*case HAMMER2IOC_INODE_COMP_SET:
145 		error = hammer2_ioctl_inode_comp_set(ip, data);
146 		break;
147 	case HAMMER2IOC_INODE_COMP_REC_SET:
148 	 	error = hammer2_ioctl_inode_comp_rec_set(ip, data);
149 	 	break;
150 	case HAMMER2IOC_INODE_COMP_REC_SET2:
151 		error = hammer2_ioctl_inode_comp_rec_set2(ip, data);
152 		break;*/
153 	case HAMMER2IOC_DESTROY:
154 		if (error == 0)
155 			error = hammer2_ioctl_destroy(ip, data);
156 		break;
157 	case HAMMER2IOC_DEBUG_DUMP:
158 		error = hammer2_ioctl_debug_dump(ip, *(u_int *)data);
159 		break;
160 	default:
161 		error = EOPNOTSUPP;
162 		break;
163 	}
164 	return (error);
165 }
166 
167 /*
168  * Retrieve version and basic info
169  */
170 static int
171 hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data)
172 {
173 	hammer2_ioc_version_t *version = data;
174 	hammer2_dev_t *hmp;
175 
176 	hmp = ip->pmp->pfs_hmps[0];
177 	if (hmp)
178 		version->version = hmp->voldata.version;
179 	else
180 		version->version = -1;
181 	return 0;
182 }
183 
184 static int
185 hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data)
186 {
187 	hammer2_ioc_recluster_t *recl = data;
188 	struct vnode *vproot;
189 	struct file *fp;
190 	hammer2_cluster_t *cluster;
191 	int error;
192 
193 	fp = holdfp(curthread, recl->fd, -1);
194 	if (fp) {
195 		error = VFS_ROOT(ip->pmp->mp, &vproot);
196 		if (error == 0) {
197 			cluster = &ip->pmp->iroot->cluster;
198 			kprintf("reconnect to cluster: nc=%d focus=%p\n",
199 				cluster->nchains, cluster->focus);
200 			if (cluster->nchains != 1 || cluster->focus == NULL) {
201 				kprintf("not a local device mount\n");
202 				error = EINVAL;
203 			} else {
204 				hammer2_cluster_reconnect(cluster->focus->hmp,
205 							  fp);
206 				kprintf("ok\n");
207 				error = 0;
208 			}
209 			vput(vproot);
210 		}
211 	} else {
212 		error = EINVAL;
213 	}
214 	return error;
215 }
216 
217 /*
218  * Retrieve information about a remote
219  */
220 static int
221 hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data)
222 {
223 	hammer2_dev_t *hmp;
224 	hammer2_ioc_remote_t *remote = data;
225 	int copyid = remote->copyid;
226 
227 	hmp = ip->pmp->pfs_hmps[0];
228 	if (hmp == NULL)
229 		return (EINVAL);
230 
231 	if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
232 		return (EINVAL);
233 
234 	hammer2_voldata_lock(hmp);
235 	remote->copy1 = hmp->voldata.copyinfo[copyid];
236 	hammer2_voldata_unlock(hmp);
237 
238 	/*
239 	 * Adjust nextid (GET only)
240 	 */
241 	while (++copyid < HAMMER2_COPYID_COUNT &&
242 	       hmp->voldata.copyinfo[copyid].copyid == 0) {
243 		;
244 	}
245 	if (copyid == HAMMER2_COPYID_COUNT)
246 		remote->nextid = -1;
247 	else
248 		remote->nextid = copyid;
249 
250 	return(0);
251 }
252 
253 /*
254  * Add new remote entry
255  */
256 static int
257 hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data)
258 {
259 	hammer2_ioc_remote_t *remote = data;
260 	hammer2_pfs_t *pmp = ip->pmp;
261 	hammer2_dev_t *hmp;
262 	int copyid = remote->copyid;
263 	int error = 0;
264 
265 	hmp = pmp->pfs_hmps[0];
266 	if (hmp == NULL)
267 		return (EINVAL);
268 	if (copyid >= HAMMER2_COPYID_COUNT)
269 		return (EINVAL);
270 
271 	hammer2_voldata_lock(hmp);
272 	if (copyid < 0) {
273 		for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
274 			if (hmp->voldata.copyinfo[copyid].copyid == 0)
275 				break;
276 		}
277 		if (copyid == HAMMER2_COPYID_COUNT) {
278 			error = ENOSPC;
279 			goto failed;
280 		}
281 	}
282 	hammer2_voldata_modify(hmp);
283 	remote->copy1.copyid = copyid;
284 	hmp->voldata.copyinfo[copyid] = remote->copy1;
285 	hammer2_volconf_update(hmp, copyid);
286 failed:
287 	hammer2_voldata_unlock(hmp);
288 	return (error);
289 }
290 
291 /*
292  * Delete existing remote entry
293  */
294 static int
295 hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data)
296 {
297 	hammer2_ioc_remote_t *remote = data;
298 	hammer2_pfs_t *pmp = ip->pmp;
299 	hammer2_dev_t *hmp;
300 	int copyid = remote->copyid;
301 	int error = 0;
302 
303 	hmp = pmp->pfs_hmps[0];
304 	if (hmp == NULL)
305 		return (EINVAL);
306 	if (copyid >= HAMMER2_COPYID_COUNT)
307 		return (EINVAL);
308 	remote->copy1.path[sizeof(remote->copy1.path) - 1] = 0;
309 	hammer2_voldata_lock(hmp);
310 	if (copyid < 0) {
311 		for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
312 			if (hmp->voldata.copyinfo[copyid].copyid == 0)
313 				continue;
314 			if (strcmp(remote->copy1.path,
315 			    hmp->voldata.copyinfo[copyid].path) == 0) {
316 				break;
317 			}
318 		}
319 		if (copyid == HAMMER2_COPYID_COUNT) {
320 			error = ENOENT;
321 			goto failed;
322 		}
323 	}
324 	hammer2_voldata_modify(hmp);
325 	hmp->voldata.copyinfo[copyid].copyid = 0;
326 	hammer2_volconf_update(hmp, copyid);
327 failed:
328 	hammer2_voldata_unlock(hmp);
329 	return (error);
330 }
331 
332 /*
333  * Replace existing remote entry
334  */
335 static int
336 hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data)
337 {
338 	hammer2_ioc_remote_t *remote = data;
339 	hammer2_dev_t *hmp;
340 	int copyid = remote->copyid;
341 
342 	hmp = ip->pmp->pfs_hmps[0];
343 	if (hmp == NULL)
344 		return (EINVAL);
345 	if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
346 		return (EINVAL);
347 
348 	hammer2_voldata_lock(hmp);
349 	hammer2_voldata_modify(hmp);
350 	/*hammer2_volconf_update(hmp, copyid);*/
351 	hammer2_voldata_unlock(hmp);
352 
353 	return(0);
354 }
355 
356 /*
357  * Retrieve communications socket
358  */
359 static int
360 hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data)
361 {
362 	return (EOPNOTSUPP);
363 }
364 
365 /*
366  * Set communications socket for connection
367  */
368 static int
369 hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data)
370 {
371 	hammer2_ioc_remote_t *remote = data;
372 	hammer2_dev_t *hmp;
373 	int copyid = remote->copyid;
374 
375 	hmp = ip->pmp->pfs_hmps[0];
376 	if (hmp == NULL)
377 		return (EINVAL);
378 	if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
379 		return (EINVAL);
380 
381 	hammer2_voldata_lock(hmp);
382 	hammer2_voldata_unlock(hmp);
383 
384 	return(0);
385 }
386 
387 /*
388  * Used to scan and retrieve PFS information.  PFS's are directories under
389  * the super-root.
390  *
391  * To scan PFSs pass name_key=0.  The function will scan for the next
392  * PFS and set all fields, as well as set name_next to the next key.
393  * When no PFSs remain, name_next is set to (hammer2_key_t)-1.
394  *
395  * To retrieve a particular PFS by key, specify the key but note that
396  * the ioctl will return the lowest key >= specified_key, so the caller
397  * must verify the key.
398  *
399  * To retrieve the PFS associated with the file descriptor, pass
400  * name_key set to (hammer2_key_t)-1.
401  */
402 static int
403 hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data)
404 {
405 	const hammer2_inode_data_t *ripdata;
406 	hammer2_dev_t *hmp;
407 	hammer2_ioc_pfs_t *pfs;
408 	hammer2_chain_t *parent;
409 	hammer2_chain_t *chain;
410 	hammer2_key_t key_next;
411 	hammer2_key_t save_key;
412 	int error;
413 
414 	hmp = ip->pmp->pfs_hmps[0];
415 	if (hmp == NULL)
416 		return (EINVAL);
417 
418 	pfs = data;
419 	save_key = pfs->name_key;
420 	error = 0;
421 
422 	/*
423 	 * Setup
424 	 */
425 	if (save_key == (hammer2_key_t)-1) {
426 		hammer2_inode_lock(ip->pmp->iroot, 0);
427 		parent = NULL;
428 		chain = hammer2_inode_chain(ip->pmp->iroot, 0,
429 					    HAMMER2_RESOLVE_ALWAYS |
430 					    HAMMER2_RESOLVE_SHARED);
431 	} else {
432 		hammer2_inode_lock(hmp->spmp->iroot, 0);
433 		parent = hammer2_inode_chain(hmp->spmp->iroot, 0,
434 					    HAMMER2_RESOLVE_ALWAYS |
435 					    HAMMER2_RESOLVE_SHARED);
436 		chain = hammer2_chain_lookup(&parent, &key_next,
437 					    pfs->name_key, HAMMER2_KEY_MAX,
438 					    &error,
439 					    HAMMER2_LOOKUP_SHARED);
440 	}
441 
442 	/*
443 	 * Locate next PFS
444 	 */
445 	while (chain) {
446 		if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
447 			break;
448 		if (parent == NULL) {
449 			hammer2_chain_unlock(chain);
450 			hammer2_chain_drop(chain);
451 			chain = NULL;
452 			break;
453 		}
454 		chain = hammer2_chain_next(&parent, chain, &key_next,
455 					    key_next, HAMMER2_KEY_MAX,
456 					    &error,
457 					    HAMMER2_LOOKUP_SHARED);
458 	}
459 	error = hammer2_error_to_errno(error);
460 
461 	/*
462 	 * Load the data being returned by the ioctl.
463 	 */
464 	if (chain && chain->error == 0) {
465 		ripdata = &chain->data->ipdata;
466 		pfs->name_key = ripdata->meta.name_key;
467 		pfs->pfs_type = ripdata->meta.pfs_type;
468 		pfs->pfs_subtype = ripdata->meta.pfs_subtype;
469 		pfs->pfs_clid = ripdata->meta.pfs_clid;
470 		pfs->pfs_fsid = ripdata->meta.pfs_fsid;
471 		KKASSERT(ripdata->meta.name_len < sizeof(pfs->name));
472 		bcopy(ripdata->filename, pfs->name, ripdata->meta.name_len);
473 		pfs->name[ripdata->meta.name_len] = 0;
474 		ripdata = NULL;	/* safety */
475 
476 		/*
477 		 * Calculate name_next, if any.  We are only accessing
478 		 * chain->bref so we can ignore chain->error (if the key
479 		 * is used later it will error then).
480 		 */
481 		if (parent == NULL) {
482 			pfs->name_next = (hammer2_key_t)-1;
483 		} else {
484 			chain = hammer2_chain_next(&parent, chain, &key_next,
485 						    key_next, HAMMER2_KEY_MAX,
486 						    &error,
487 						    HAMMER2_LOOKUP_SHARED);
488 			if (chain)
489 				pfs->name_next = chain->bref.key;
490 			else
491 				pfs->name_next = (hammer2_key_t)-1;
492 		}
493 	} else {
494 		pfs->name_next = (hammer2_key_t)-1;
495 		error = ENOENT;
496 	}
497 
498 	/*
499 	 * Cleanup
500 	 */
501 	if (chain) {
502 		hammer2_chain_unlock(chain);
503 		hammer2_chain_drop(chain);
504 	}
505 	if (parent) {
506 		hammer2_chain_unlock(parent);
507 		hammer2_chain_drop(parent);
508 	}
509 	if (save_key == (hammer2_key_t)-1) {
510 		hammer2_inode_unlock(ip->pmp->iroot);
511 	} else {
512 		hammer2_inode_unlock(hmp->spmp->iroot);
513 	}
514 
515 	return (error);
516 }
517 
518 /*
519  * Find a specific PFS by name
520  */
521 static int
522 hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data)
523 {
524 	const hammer2_inode_data_t *ripdata;
525 	hammer2_dev_t *hmp;
526 	hammer2_ioc_pfs_t *pfs;
527 	hammer2_chain_t *parent;
528 	hammer2_chain_t *chain;
529 	hammer2_key_t key_next;
530 	hammer2_key_t lhc;
531 	int error;
532 	size_t len;
533 
534 	hmp = ip->pmp->pfs_hmps[0];
535 	if (hmp == NULL)
536 		return (EINVAL);
537 
538 	pfs = data;
539 	error = 0;
540 
541 	hammer2_inode_lock(hmp->spmp->iroot, HAMMER2_RESOLVE_SHARED);
542 	parent = hammer2_inode_chain(hmp->spmp->iroot, 0,
543 				     HAMMER2_RESOLVE_ALWAYS |
544 				     HAMMER2_RESOLVE_SHARED);
545 
546 	pfs->name[sizeof(pfs->name) - 1] = 0;
547 	len = strlen(pfs->name);
548 	lhc = hammer2_dirhash(pfs->name, len);
549 
550 	chain = hammer2_chain_lookup(&parent, &key_next,
551 					 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
552 					 &error, HAMMER2_LOOKUP_SHARED);
553 	while (chain) {
554 		if (hammer2_chain_dirent_test(chain, pfs->name, len))
555 			break;
556 		chain = hammer2_chain_next(&parent, chain, &key_next,
557 					   key_next,
558 					   lhc + HAMMER2_DIRHASH_LOMASK,
559 					   &error, HAMMER2_LOOKUP_SHARED);
560 	}
561 	error = hammer2_error_to_errno(error);
562 
563 	/*
564 	 * Load the data being returned by the ioctl.
565 	 */
566 	if (chain && chain->error == 0) {
567 		KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE);
568 		ripdata = &chain->data->ipdata;
569 		pfs->name_key = ripdata->meta.name_key;
570 		pfs->pfs_type = ripdata->meta.pfs_type;
571 		pfs->pfs_subtype = ripdata->meta.pfs_subtype;
572 		pfs->pfs_clid = ripdata->meta.pfs_clid;
573 		pfs->pfs_fsid = ripdata->meta.pfs_fsid;
574 		ripdata = NULL;
575 
576 		hammer2_chain_unlock(chain);
577 		hammer2_chain_drop(chain);
578 	} else if (error == 0) {
579 		error = ENOENT;
580 	}
581 	if (parent) {
582 		hammer2_chain_unlock(parent);
583 		hammer2_chain_drop(parent);
584 	}
585 	hammer2_inode_unlock(hmp->spmp->iroot);
586 
587 	return (error);
588 }
589 
590 /*
591  * Create a new PFS under the super-root
592  */
593 static int
594 hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data)
595 {
596 	hammer2_inode_data_t *nipdata;
597 	hammer2_chain_t *nchain;
598 	hammer2_dev_t *hmp;
599 	hammer2_dev_t *force_local;
600 	hammer2_ioc_pfs_t *pfs;
601 	hammer2_inode_t *nip;
602 	hammer2_tid_t mtid;
603 	int error;
604 
605 	hmp = ip->pmp->pfs_hmps[0];	/* XXX */
606 	if (hmp == NULL)
607 		return (EINVAL);
608 
609 	pfs = data;
610 	nip = NULL;
611 
612 	if (pfs->name[0] == 0)
613 		return(EINVAL);
614 	pfs->name[sizeof(pfs->name) - 1] = 0;	/* ensure 0-termination */
615 
616 	if (hammer2_ioctl_pfs_lookup(ip, pfs) == 0)
617 		return(EEXIST);
618 
619 	hammer2_trans_init(hmp->spmp, 0);
620 	mtid = hammer2_trans_sub(hmp->spmp);
621 	nip = hammer2_inode_create(hmp->spmp->iroot, hmp->spmp->iroot,
622 				   NULL, NULL,
623 				   pfs->name, strlen(pfs->name), 0,
624 				   1, HAMMER2_OBJTYPE_DIRECTORY, 0,
625 				   HAMMER2_INSERT_PFSROOT, &error);
626 	if (error == 0) {
627 		nip->flags |= HAMMER2_INODE_NOSIDEQ;
628 		hammer2_inode_modify(nip);
629 		nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS);
630 		error = hammer2_chain_modify(nchain, mtid, 0, 0);
631 		KKASSERT(error == 0);
632 		nipdata = &nchain->data->ipdata;
633 
634 		nip->meta.pfs_type = pfs->pfs_type;
635 		nip->meta.pfs_subtype = pfs->pfs_subtype;
636 		nip->meta.pfs_clid = pfs->pfs_clid;
637 		nip->meta.pfs_fsid = pfs->pfs_fsid;
638 		nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT;
639 
640 		/*
641 		 * Set default compression and check algorithm.  This
642 		 * can be changed later.
643 		 *
644 		 * Do not allow compression on PFS's with the special name
645 		 * "boot", the boot loader can't decompress (yet).
646 		 */
647 		nip->meta.comp_algo =
648 			HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT);
649 		nip->meta.check_algo =
650 			HAMMER2_ENC_ALGO( HAMMER2_CHECK_XXHASH64);
651 
652 		if (strcasecmp(pfs->name, "boot") == 0) {
653 			nip->meta.comp_algo =
654 				HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO);
655 		}
656 
657 		/*
658 		 * Super-root isn't mounted, fsync it
659 		 */
660 		hammer2_chain_unlock(nchain);
661 		hammer2_inode_ref(nip);
662 		hammer2_inode_unlock(nip);
663 		hammer2_inode_chain_sync(nip);
664 		hammer2_inode_chain_flush(nip);
665 		KKASSERT(nip->refs == 1);
666 		hammer2_inode_drop(nip);
667 
668 		/*
669 		 * We still have a ref on the chain, relock and associate
670 		 * with an appropriate PFS.
671 		 */
672 		force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
673 
674 		hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS);
675 		nipdata = &nchain->data->ipdata;
676 		kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata->filename);
677 		hammer2_pfsalloc(nchain, nipdata,
678 				 nchain->bref.modify_tid, force_local);
679 
680 		hammer2_chain_unlock(nchain);
681 		hammer2_chain_drop(nchain);
682 
683 	}
684 	hammer2_trans_done(hmp->spmp, 1);
685 
686 	return (error);
687 }
688 
689 /*
690  * Destroy an existing PFS under the super-root
691  */
692 static int
693 hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data)
694 {
695 	hammer2_ioc_pfs_t *pfs = data;
696 	hammer2_dev_t	*hmp;
697 	hammer2_pfs_t	*spmp;
698 	hammer2_pfs_t	*pmp;
699 	hammer2_xop_unlink_t *xop;
700 	hammer2_inode_t *dip;
701 	hammer2_inode_t *iroot;
702 	int error;
703 	int i;
704 
705 	/*
706 	 * The PFS should be probed, so we should be able to
707 	 * locate it.  We only delete the PFS from the
708 	 * specific H2 block device (hmp), not all of
709 	 * them.  We must remove the PFS from the cluster
710 	 * before we can destroy it.
711 	 */
712 	hmp = ip->pmp->pfs_hmps[0];
713 	if (hmp == NULL)
714 		return (EINVAL);
715 
716 	pfs->name[sizeof(pfs->name) - 1] = 0;	/* ensure termination */
717 
718 	lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
719 
720 	TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
721 		for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
722 			if (pmp->pfs_hmps[i] != hmp)
723 				continue;
724 			if (pmp->pfs_names[i] &&
725 			    strcmp(pmp->pfs_names[i], pfs->name) == 0) {
726 				break;
727 			}
728 		}
729 		if (i != HAMMER2_MAXCLUSTER)
730 			break;
731 	}
732 
733 	if (pmp == NULL) {
734 		lockmgr(&hammer2_mntlk, LK_RELEASE);
735 		return ENOENT;
736 	}
737 
738 	/*
739 	 * Ok, we found the pmp and we have the index.  Permanently remove
740 	 * the PFS from the cluster
741 	 */
742 	iroot = pmp->iroot;
743 	kprintf("FOUND PFS %s CLINDEX %d\n", pfs->name, i);
744 	hammer2_pfsdealloc(pmp, i, 1);
745 
746 	lockmgr(&hammer2_mntlk, LK_RELEASE);
747 
748 	/*
749 	 * Now destroy the PFS under its device using the per-device
750 	 * super-root.
751 	 */
752 	spmp = hmp->spmp;
753 	dip = spmp->iroot;
754 	hammer2_trans_init(spmp, 0);
755 	hammer2_inode_lock(dip, 0);
756 
757 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
758 	hammer2_xop_setname(&xop->head, pfs->name, strlen(pfs->name));
759 	xop->isdir = 2;
760 	xop->dopermanent = H2DOPERM_PERMANENT | H2DOPERM_FORCE;
761 	hammer2_xop_start(&xop->head, hammer2_xop_unlink);
762 
763 	error = hammer2_xop_collect(&xop->head, 0);
764 
765 	hammer2_inode_unlock(dip);
766 
767 #if 0
768         if (error == 0) {
769                 ip = hammer2_inode_get(dip->pmp, dip, &xop->head, -1);
770                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
771                 if (ip) {
772                         hammer2_inode_unlink_finisher(ip, 0);
773                         hammer2_inode_unlock(ip);
774                 }
775         } else {
776                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
777         }
778 #endif
779 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
780 
781 	hammer2_trans_done(spmp, 1);
782 
783 	return (hammer2_error_to_errno(error));
784 }
785 
786 static int
787 hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data)
788 {
789 	const hammer2_inode_data_t *ripdata;
790 	hammer2_ioc_pfs_t *pfs = data;
791 	hammer2_dev_t	*hmp;
792 	hammer2_pfs_t	*pmp;
793 	hammer2_chain_t	*chain;
794 	hammer2_inode_t *nip;
795 	hammer2_tid_t	mtid;
796 	size_t name_len;
797 	hammer2_key_t lhc;
798 	struct vattr vat;
799 	int error;
800 #if 0
801 	uuid_t opfs_clid;
802 #endif
803 
804 	if (pfs->name[0] == 0)
805 		return(EINVAL);
806 	if (pfs->name[sizeof(pfs->name)-1] != 0)
807 		return(EINVAL);
808 
809 	pmp = ip->pmp;
810 	ip = pmp->iroot;
811 
812 	hmp = pmp->pfs_hmps[0];
813 	if (hmp == NULL)
814 		return (EINVAL);
815 
816 	lockmgr(&hmp->bulklk, LK_EXCLUSIVE);
817 
818 	hammer2_vfs_sync(pmp->mp, MNT_WAIT);
819 
820 	hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
821 	mtid = hammer2_trans_sub(pmp);
822 	hammer2_inode_lock(ip, 0);
823 	hammer2_inode_modify(ip);
824 	ip->meta.pfs_lsnap_tid = mtid;
825 
826 	/* XXX cluster it! */
827 	chain = hammer2_inode_chain(ip, 0, HAMMER2_RESOLVE_ALWAYS);
828 
829 	name_len = strlen(pfs->name);
830 	lhc = hammer2_dirhash(pfs->name, name_len);
831 
832 	/*
833 	 * Get the clid
834 	 */
835 	ripdata = &chain->data->ipdata;
836 #if 0
837 	opfs_clid = ripdata->meta.pfs_clid;
838 #endif
839 	hmp = chain->hmp;
840 
841 	/*
842 	 * Create the snapshot directory under the super-root
843 	 *
844 	 * Set PFS type, generate a unique filesystem id, and generate
845 	 * a cluster id.  Use the same clid when snapshotting a PFS root,
846 	 * which theoretically allows the snapshot to be used as part of
847 	 * the same cluster (perhaps as a cache).
848 	 *
849 	 * Copy the (flushed) blockref array.  Theoretically we could use
850 	 * chain_duplicate() but it becomes difficult to disentangle
851 	 * the shared core so for now just brute-force it.
852 	 */
853 	VATTR_NULL(&vat);
854 	vat.va_type = VDIR;
855 	vat.va_mode = 0755;
856 	hammer2_chain_unlock(chain);
857 	nip = hammer2_inode_create(hmp->spmp->iroot, hmp->spmp->iroot,
858 				   &vat, proc0.p_ucred,
859 				   pfs->name, name_len, 0,
860 				   1, 0, 0,
861 				   HAMMER2_INSERT_PFSROOT, &error);
862 	hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
863 	ripdata = &chain->data->ipdata;
864 
865 	if (nip) {
866 		hammer2_dev_t *force_local;
867 		hammer2_chain_t *nchain;
868 		hammer2_inode_data_t *wipdata;
869 		hammer2_key_t	starting_inum;
870 
871 		nip->flags |= HAMMER2_INODE_NOSIDEQ;
872 		hammer2_inode_modify(nip);
873 		nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS);
874 		error = hammer2_chain_modify(nchain, mtid, 0, 0);
875 		KKASSERT(error == 0);
876 		wipdata = &nchain->data->ipdata;
877 
878 		starting_inum = ip->pmp->inode_tid + 1;
879 		nip->meta.pfs_inum = starting_inum;
880 		nip->meta.pfs_type = HAMMER2_PFSTYPE_MASTER;
881 		nip->meta.pfs_subtype = HAMMER2_PFSSUBTYPE_SNAPSHOT;
882 		nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT;
883 		nchain->bref.embed.stats = chain->bref.embed.stats;
884 
885 		kern_uuidgen(&nip->meta.pfs_fsid, 1);
886 
887 #if 0
888 		/*
889 		 * Give the snapshot its own private cluster id.  As a
890 		 * snapshot no further synchronization with the original
891 		 * cluster will be done.
892 		 */
893 		if (chain->flags & HAMMER2_CHAIN_PFSBOUNDARY)
894 			nip->meta.pfs_clid = opfs_clid;
895 		else
896 			kern_uuidgen(&nip->meta.pfs_clid, 1);
897 #endif
898 		kern_uuidgen(&nip->meta.pfs_clid, 1);
899 		nchain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT;
900 
901 		/* XXX hack blockset copy */
902 		/* XXX doesn't work with real cluster */
903 		wipdata->meta = nip->meta;
904 		wipdata->u.blockset = ripdata->u.blockset;
905 
906 		KKASSERT(wipdata == &nchain->data->ipdata);
907 
908 		hammer2_chain_unlock(nchain);
909 		hammer2_inode_ref(nip);
910 		hammer2_inode_unlock(nip);
911 		hammer2_inode_chain_sync(nip);
912 		hammer2_inode_chain_flush(nip);
913 		KKASSERT(nip->refs == 1);
914 		hammer2_inode_drop(nip);
915 
916 		force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
917 
918 		hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS);
919 		wipdata = &nchain->data->ipdata;
920 		kprintf("SNAPSHOT LOCAL PFS (IOCTL): %s\n", wipdata->filename);
921 		hammer2_pfsalloc(nchain, wipdata, nchain->bref.modify_tid,
922 				 force_local);
923 		nchain->pmp->inode_tid = starting_inum;
924 
925 		hammer2_chain_unlock(nchain);
926 		hammer2_chain_drop(nchain);
927 	}
928 
929 	hammer2_chain_unlock(chain);
930 	hammer2_chain_drop(chain);
931 
932 	hammer2_inode_unlock(ip);
933 	hammer2_trans_done(pmp, 1);
934 
935 	lockmgr(&hmp->bulklk, LK_RELEASE);
936 
937 	return (hammer2_error_to_errno(error));
938 }
939 
940 /*
941  * Retrieve the raw inode structure, non-inclusive of node-specific data.
942  */
943 static int
944 hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data)
945 {
946 	hammer2_ioc_inode_t *ino;
947 	hammer2_chain_t *chain;
948 	int error;
949 	int i;
950 
951 	ino = data;
952 	error = 0;
953 
954 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
955 	ino->data_count = 0;
956 	ino->inode_count = 0;
957 	for (i = 0; i < ip->cluster.nchains; ++i) {
958 		if ((chain = ip->cluster.array[i].chain) != NULL) {
959 			if (ino->data_count <
960 			    chain->bref.embed.stats.data_count) {
961 				ino->data_count =
962 					chain->bref.embed.stats.data_count;
963 			}
964 			if (ino->inode_count <
965 			    chain->bref.embed.stats.inode_count) {
966 				ino->inode_count =
967 					chain->bref.embed.stats.inode_count;
968 			}
969 		}
970 	}
971 	bzero(&ino->ip_data, sizeof(ino->ip_data));
972 	ino->ip_data.meta = ip->meta;
973 	ino->kdata = ip;
974 	hammer2_inode_unlock(ip);
975 
976 	return hammer2_error_to_errno(error);
977 }
978 
979 /*
980  * Set various parameters in an inode which cannot be set through
981  * normal filesystem VNOPS.
982  */
983 static int
984 hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data)
985 {
986 	hammer2_ioc_inode_t *ino = data;
987 	int error = 0;
988 
989 	hammer2_trans_init(ip->pmp, 0);
990 	hammer2_inode_lock(ip, 0);
991 
992 	if ((ino->flags & HAMMER2IOC_INODE_FLAG_CHECK) &&
993 	    ip->meta.check_algo != ino->ip_data.meta.check_algo) {
994 		hammer2_inode_modify(ip);
995 		ip->meta.check_algo = ino->ip_data.meta.check_algo;
996 	}
997 	if ((ino->flags & HAMMER2IOC_INODE_FLAG_COMP) &&
998 	    ip->meta.comp_algo != ino->ip_data.meta.comp_algo) {
999 		hammer2_inode_modify(ip);
1000 		ip->meta.comp_algo = ino->ip_data.meta.comp_algo;
1001 	}
1002 	ino->kdata = ip;
1003 
1004 	/* Ignore these flags for now...*/
1005 	if ((ino->flags & HAMMER2IOC_INODE_FLAG_IQUOTA) &&
1006 	    ip->meta.inode_quota != ino->ip_data.meta.inode_quota) {
1007 		hammer2_inode_modify(ip);
1008 		ip->meta.inode_quota = ino->ip_data.meta.inode_quota;
1009 	}
1010 	if ((ino->flags & HAMMER2IOC_INODE_FLAG_DQUOTA) &&
1011 	    ip->meta.data_quota != ino->ip_data.meta.data_quota) {
1012 		hammer2_inode_modify(ip);
1013 		ip->meta.data_quota = ino->ip_data.meta.data_quota;
1014 	}
1015 	if ((ino->flags & HAMMER2IOC_INODE_FLAG_COPIES) &&
1016 	    ip->meta.ncopies != ino->ip_data.meta.ncopies) {
1017 		hammer2_inode_modify(ip);
1018 		ip->meta.ncopies = ino->ip_data.meta.ncopies;
1019 	}
1020 	hammer2_inode_unlock(ip);
1021 	hammer2_trans_done(ip->pmp, 1);
1022 
1023 	return (hammer2_error_to_errno(error));
1024 }
1025 
1026 static
1027 int
1028 hammer2_ioctl_debug_dump(hammer2_inode_t *ip, u_int flags)
1029 {
1030 	hammer2_chain_t *chain;
1031 	int count = 100000;
1032 	int i;
1033 
1034 	for (i = 0; i < ip->cluster.nchains; ++i) {
1035 		chain = ip->cluster.array[i].chain;
1036 		if (chain == NULL)
1037 			continue;
1038 		hammer2_dump_chain(chain, 0, &count, 'i', flags);
1039 	}
1040 	return 0;
1041 }
1042 
1043 /*
1044  * Executes one flush/free pass per call.  If trying to recover
1045  * data we just freed up a moment ago it can take up to six passes
1046  * to fully free the blocks.  Note that passes occur automatically based
1047  * on free space as the storage fills up, but manual passes may be needed
1048  * if storage becomes almost completely full.
1049  */
1050 static
1051 int
1052 hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data)
1053 {
1054 	hammer2_ioc_bulkfree_t *bfi = data;
1055 	hammer2_dev_t	*hmp;
1056 	hammer2_pfs_t	*pmp;
1057 	hammer2_chain_t *vchain;
1058 	int error;
1059 	int didsnap;
1060 
1061 	pmp = ip->pmp;
1062 	ip = pmp->iroot;
1063 
1064 	hmp = pmp->pfs_hmps[0];
1065 	if (hmp == NULL)
1066 		return (EINVAL);
1067 	if (bfi == NULL)
1068 		return (EINVAL);
1069 
1070 	/*
1071 	 * Bulkfree has to be serialized to guarantee at least one sync
1072 	 * inbetween bulkfrees.
1073 	 */
1074 	error = lockmgr(&hmp->bflock, LK_EXCLUSIVE | LK_PCATCH);
1075 	if (error)
1076 		return error;
1077 
1078 	/*
1079 	 * sync the filesystem and obtain a snapshot of the synchronized
1080 	 * hmp volume header.  We treat the snapshot as an independent
1081 	 * entity.
1082 	 *
1083 	 * If ENOSPC occurs we should continue, because bulkfree is the only
1084 	 * way to fix that.  The flush will have flushed everything it could
1085 	 * and not left any modified chains.  Otherwise an error is fatal.
1086 	 */
1087 	error = hammer2_vfs_sync(pmp->mp, MNT_WAIT);
1088 	if (error && error != ENOSPC)
1089 		goto failed;
1090 
1091 	/*
1092 	 * If we have an ENOSPC error we have to bulkfree on the live
1093 	 * topology.  Otherwise we can bulkfree on a snapshot.
1094 	 */
1095 	if (error) {
1096 		kprintf("hammer2: WARNING! Bulkfree forced to use live "
1097 			"topology\n");
1098 		vchain = &hmp->vchain;
1099 		hammer2_chain_ref(vchain);
1100 		didsnap = 0;
1101 	} else {
1102 		vchain = hammer2_chain_bulksnap(hmp);
1103 		didsnap = 1;
1104 	}
1105 
1106 	/*
1107 	 * Bulkfree on a snapshot does not need a transaction, which allows
1108 	 * it to run concurrently with any operation other than another
1109 	 * bulkfree.
1110 	 *
1111 	 * If we are running bulkfree on the live topology we have to be
1112 	 * in a FLUSH transaction.
1113 	 */
1114 	if (didsnap == 0)
1115 		hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
1116 
1117 	if (bfi) {
1118 		hammer2_thr_freeze(&hmp->bfthr);
1119 		error = hammer2_bulkfree_pass(hmp, vchain, bfi);
1120 		hammer2_thr_unfreeze(&hmp->bfthr);
1121 	}
1122 	if (didsnap) {
1123 		hammer2_chain_bulkdrop(vchain);
1124 	} else {
1125 		hammer2_chain_drop(vchain);
1126 		hammer2_trans_done(pmp, 1);
1127 	}
1128 	error = hammer2_error_to_errno(error);
1129 
1130 failed:
1131 	lockmgr(&hmp->bflock, LK_RELEASE);
1132 	return error;
1133 }
1134 
1135 /*
1136  * Unconditionally delete meta-data in a hammer2 filesystem
1137  */
1138 static
1139 int
1140 hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data)
1141 {
1142 	hammer2_ioc_destroy_t *iocd = data;
1143 	hammer2_pfs_t *pmp = ip->pmp;
1144 	int error;
1145 
1146 	if (pmp->ronly) {
1147 		error = EROFS;
1148 		return error;
1149 	}
1150 
1151 	switch(iocd->cmd) {
1152 	case HAMMER2_DELETE_FILE:
1153 		/*
1154 		 * Destroy a bad directory entry by name.  Caller must
1155 		 * pass the directory as fd.
1156 		 */
1157 		{
1158 		hammer2_xop_unlink_t *xop;
1159 
1160 		if (iocd->path[sizeof(iocd->path)-1]) {
1161 			error = EINVAL;
1162 			break;
1163 		}
1164 		if (ip->meta.type != HAMMER2_OBJTYPE_DIRECTORY) {
1165 			error = EINVAL;
1166 			break;
1167 		}
1168 		hammer2_pfs_memory_wait(ip, 0);
1169 		hammer2_trans_init(pmp, 0);
1170 		hammer2_inode_lock(ip, 0);
1171 
1172 		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1173 		hammer2_xop_setname(&xop->head, iocd->path, strlen(iocd->path));
1174 		xop->isdir = -1;
1175 		xop->dopermanent = H2DOPERM_PERMANENT |
1176 				   H2DOPERM_FORCE |
1177 				   H2DOPERM_IGNINO;
1178 		hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1179 
1180 		error = hammer2_xop_collect(&xop->head, 0);
1181 		error = hammer2_error_to_errno(error);
1182 		hammer2_inode_unlock(ip);
1183 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1184 		hammer2_trans_done(pmp, 1);
1185 		}
1186 		break;
1187 	case HAMMER2_DELETE_INUM:
1188 		/*
1189 		 * Destroy a bad inode by inode number.
1190 		 */
1191 		{
1192 		hammer2_xop_lookup_t *xop;
1193 
1194 		if (iocd->inum < 1) {
1195 			error = EINVAL;
1196 			break;
1197 		}
1198 		hammer2_pfs_memory_wait(ip, 0);
1199 		hammer2_trans_init(pmp, 0);
1200 
1201 		xop = hammer2_xop_alloc(pmp->iroot, 0);
1202 		xop->lhc = iocd->inum;
1203 		hammer2_xop_start(&xop->head, hammer2_xop_lookup);
1204 		error = hammer2_xop_collect(&xop->head, 0);
1205 		if (error == 0) {
1206 			ip = hammer2_inode_get(pmp, NULL, &xop->head, -1);
1207 			hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1208 			if (ip) {
1209 				ip->meta.nlinks = 1;
1210 				hammer2_inode_unlink_finisher(ip, 0);
1211 				hammer2_inode_unlock(ip);
1212 			}
1213 		} else {
1214 			hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1215 		}
1216 		}
1217 		break;
1218 	default:
1219 		error = EINVAL;
1220 		break;
1221 	}
1222 	return error;
1223 }
1224