1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.32 2008/11/13 02:23:29 dillon Exp $
35 */
36
37 #include "hammer.h"
38
39 static int hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip,
40 struct hammer_ioc_history *hist);
41 static int hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip,
42 struct hammer_ioc_synctid *std);
43 static int hammer_ioc_get_version(hammer_transaction_t trans,
44 hammer_inode_t ip,
45 struct hammer_ioc_version *ver);
46 static int hammer_ioc_set_version(hammer_transaction_t trans,
47 hammer_inode_t ip,
48 struct hammer_ioc_version *ver);
49 static int hammer_ioc_get_info(hammer_transaction_t trans,
50 struct hammer_ioc_info *info);
51 static int hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
52 struct hammer_ioc_snapshot *snap);
53 static int hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
54 struct hammer_ioc_snapshot *snap);
55 static int hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
56 struct hammer_ioc_snapshot *snap);
57 static int hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip,
58 struct hammer_ioc_config *snap);
59 static int hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip,
60 struct hammer_ioc_config *snap);
61 static int hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip,
62 struct hammer_ioc_data *data);
63
64 int
hammer_ioctl(hammer_inode_t ip,u_long com,caddr_t data,int fflag,struct ucred * cred)65 hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag,
66 struct ucred *cred)
67 {
68 struct hammer_transaction trans;
69 struct hammer_mount *hmp;
70 int error;
71
72 error = caps_priv_check(cred, SYSCAP_NOVFS_IOCTL);
73 hmp = ip->hmp;
74
75 hammer_start_transaction(&trans, hmp);
76
77 switch(com) {
78 case HAMMERIOC_PRUNE:
79 if (error == 0 && hmp->ronly)
80 error = EROFS;
81 if (error == 0) {
82 error = hammer_ioc_prune(&trans, ip,
83 (struct hammer_ioc_prune *)data);
84 }
85 break;
86 case HAMMERIOC_GETHISTORY:
87 error = hammer_ioc_gethistory(&trans, ip,
88 (struct hammer_ioc_history *)data);
89 break;
90 case HAMMERIOC_REBLOCK:
91 if (error == 0 && hmp->ronly)
92 error = EROFS;
93 if (error == 0) {
94 error = hammer_ioc_reblock(&trans, ip,
95 (struct hammer_ioc_reblock *)data);
96 }
97 break;
98 case HAMMERIOC_REBALANCE:
99 /*
100 * Rebalancing needs to lock a lot of B-Tree nodes. The
101 * children and children's children. Systems with very
102 * little memory will not be able to do it.
103 */
104 if (error == 0 && hmp->ronly)
105 error = EROFS;
106 if (error == 0 && nbuf < HAMMER_REBALANCE_MIN_BUFS) {
107 hkprintf("System has insufficient buffers "
108 "to rebalance the tree. nbuf < %d\n",
109 HAMMER_REBALANCE_MIN_BUFS);
110 error = ENOSPC;
111 }
112 if (error == 0) {
113 error = hammer_ioc_rebalance(&trans, ip,
114 (struct hammer_ioc_rebalance *)data);
115 }
116 break;
117 case HAMMERIOC_SYNCTID:
118 error = hammer_ioc_synctid(&trans, ip,
119 (struct hammer_ioc_synctid *)data);
120 break;
121 case HAMMERIOC_GET_PSEUDOFS:
122 error = hammer_ioc_get_pseudofs(&trans, ip,
123 (struct hammer_ioc_pseudofs_rw *)data);
124 break;
125 case HAMMERIOC_SET_PSEUDOFS:
126 if (error == 0 && hmp->ronly)
127 error = EROFS;
128 if (error == 0) {
129 error = hammer_ioc_set_pseudofs(&trans, ip, cred,
130 (struct hammer_ioc_pseudofs_rw *)data);
131 }
132 break;
133 case HAMMERIOC_UPG_PSEUDOFS:
134 if (error == 0 && hmp->ronly)
135 error = EROFS;
136 if (error == 0) {
137 error = hammer_ioc_upgrade_pseudofs(&trans, ip,
138 (struct hammer_ioc_pseudofs_rw *)data);
139 }
140 break;
141 case HAMMERIOC_DGD_PSEUDOFS:
142 if (error == 0 && hmp->ronly)
143 error = EROFS;
144 if (error == 0) {
145 error = hammer_ioc_downgrade_pseudofs(&trans, ip,
146 (struct hammer_ioc_pseudofs_rw *)data);
147 }
148 break;
149 case HAMMERIOC_RMR_PSEUDOFS:
150 if (error == 0 && hmp->ronly)
151 error = EROFS;
152 if (error == 0) {
153 error = hammer_ioc_destroy_pseudofs(&trans, ip,
154 (struct hammer_ioc_pseudofs_rw *)data);
155 }
156 break;
157 case HAMMERIOC_WAI_PSEUDOFS:
158 if (error == 0) {
159 error = hammer_ioc_wait_pseudofs(&trans, ip,
160 (struct hammer_ioc_pseudofs_rw *)data);
161 }
162 break;
163 case HAMMERIOC_MIRROR_READ:
164 if (error == 0) {
165 error = hammer_ioc_mirror_read(&trans, ip,
166 (struct hammer_ioc_mirror_rw *)data);
167 }
168 break;
169 case HAMMERIOC_MIRROR_WRITE:
170 if (error == 0 && hmp->ronly)
171 error = EROFS;
172 if (error == 0) {
173 error = hammer_ioc_mirror_write(&trans, ip,
174 (struct hammer_ioc_mirror_rw *)data);
175 }
176 break;
177 case HAMMERIOC_GET_VERSION:
178 error = hammer_ioc_get_version(&trans, ip,
179 (struct hammer_ioc_version *)data);
180 break;
181 case HAMMERIOC_GET_INFO:
182 error = hammer_ioc_get_info(&trans,
183 (struct hammer_ioc_info *)data);
184 break;
185 case HAMMERIOC_SET_VERSION:
186 if (error == 0 && hmp->ronly)
187 error = EROFS;
188 if (error == 0) {
189 error = hammer_ioc_set_version(&trans, ip,
190 (struct hammer_ioc_version *)data);
191 }
192 break;
193 case HAMMERIOC_ADD_VOLUME:
194 if (error == 0 && hmp->ronly)
195 error = EROFS;
196 if (error == 0) {
197 error = caps_priv_check(cred, SYSCAP_NOVFS_IOCTL);
198 if (error == 0)
199 error = hammer_ioc_volume_add(&trans, ip,
200 (struct hammer_ioc_volume *)data);
201 }
202 break;
203 case HAMMERIOC_DEL_VOLUME:
204 if (error == 0 && hmp->ronly)
205 error = EROFS;
206 if (error == 0) {
207 error = caps_priv_check(cred, SYSCAP_NOVFS_IOCTL);
208 if (error == 0)
209 error = hammer_ioc_volume_del(&trans, ip,
210 (struct hammer_ioc_volume *)data);
211 }
212 break;
213 case HAMMERIOC_LIST_VOLUMES:
214 error = hammer_ioc_volume_list(&trans, ip,
215 (struct hammer_ioc_volume_list *)data);
216 break;
217 case HAMMERIOC_ADD_SNAPSHOT:
218 if (error == 0 && hmp->ronly)
219 error = EROFS;
220 if (error == 0) {
221 error = hammer_ioc_add_snapshot(
222 &trans, ip, (struct hammer_ioc_snapshot *)data);
223 }
224 break;
225 case HAMMERIOC_DEL_SNAPSHOT:
226 if (error == 0 && hmp->ronly)
227 error = EROFS;
228 if (error == 0) {
229 error = hammer_ioc_del_snapshot(
230 &trans, ip, (struct hammer_ioc_snapshot *)data);
231 }
232 break;
233 case HAMMERIOC_GET_SNAPSHOT:
234 error = hammer_ioc_get_snapshot(
235 &trans, ip, (struct hammer_ioc_snapshot *)data);
236 break;
237 case HAMMERIOC_GET_CONFIG:
238 error = hammer_ioc_get_config(
239 &trans, ip, (struct hammer_ioc_config *)data);
240 break;
241 case HAMMERIOC_SET_CONFIG:
242 if (error == 0 && hmp->ronly)
243 error = EROFS;
244 if (error == 0) {
245 error = hammer_ioc_set_config(
246 &trans, ip, (struct hammer_ioc_config *)data);
247 }
248 break;
249 case HAMMERIOC_DEDUP:
250 if (error == 0 && hmp->ronly)
251 error = EROFS;
252 if (error == 0) {
253 error = hammer_ioc_dedup(
254 &trans, ip, (struct hammer_ioc_dedup *)data);
255 }
256 break;
257 case HAMMERIOC_GET_DATA:
258 if (error == 0) {
259 error = hammer_ioc_get_data(
260 &trans, ip, (struct hammer_ioc_data *)data);
261 }
262 break;
263 case HAMMERIOC_SCAN_PSEUDOFS:
264 error = hammer_ioc_scan_pseudofs(
265 &trans, ip, (struct hammer_ioc_pseudofs_rw *)data);
266 break;
267 default:
268 error = EOPNOTSUPP;
269 break;
270 }
271 hammer_done_transaction(&trans);
272 return (error);
273 }
274
275 /*
276 * Iterate through an object's inode or an object's records and record
277 * modification TIDs.
278 */
279 static void add_history(hammer_inode_t ip, struct hammer_ioc_history *hist,
280 hammer_btree_elm_t elm);
281
282 static
283 int
hammer_ioc_gethistory(hammer_transaction_t trans,hammer_inode_t ip,struct hammer_ioc_history * hist)284 hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip,
285 struct hammer_ioc_history *hist)
286 {
287 struct hammer_cursor cursor;
288 hammer_btree_elm_t elm;
289 int error;
290
291 /*
292 * Validate the structure and initialize for return.
293 */
294 if (hist->beg_tid > hist->end_tid)
295 return(EINVAL);
296 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
297 if (hist->key > hist->nxt_key)
298 return(EINVAL);
299 }
300
301 hist->obj_id = ip->obj_id;
302 hist->count = 0;
303 hist->nxt_tid = hist->end_tid;
304 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_TID;
305 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_KEY;
306 hist->head.flags &= ~HAMMER_IOC_HISTORY_EOF;
307 hist->head.flags &= ~HAMMER_IOC_HISTORY_UNSYNCED;
308 if ((ip->flags & HAMMER_INODE_MODMASK) &
309 ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
310 hist->head.flags |= HAMMER_IOC_HISTORY_UNSYNCED;
311 }
312
313 /*
314 * Setup the cursor. We can't handle undeletable records
315 * (create_tid of 0) at the moment. A create_tid of 0 has
316 * a special meaning and cannot be specified in the cursor.
317 */
318 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
319 if (error) {
320 hammer_done_cursor(&cursor);
321 return(error);
322 }
323
324 cursor.key_beg.obj_id = hist->obj_id;
325 cursor.key_beg.create_tid = hist->beg_tid;
326 cursor.key_beg.delete_tid = 0;
327 cursor.key_beg.obj_type = 0;
328 if (cursor.key_beg.create_tid == HAMMER_MIN_TID)
329 cursor.key_beg.create_tid = 1;
330
331 cursor.key_end.obj_id = hist->obj_id;
332 cursor.key_end.create_tid = hist->end_tid;
333 cursor.key_end.delete_tid = 0;
334 cursor.key_end.obj_type = 0;
335
336 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE;
337
338 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
339 /*
340 * key-range within the file. For a regular file the
341 * on-disk key represents BASE+LEN, not BASE, so the
342 * first possible record containing the offset 'key'
343 * has an on-disk key of (key + 1).
344 */
345 cursor.key_beg.key = hist->key;
346 cursor.key_end.key = HAMMER_MAX_KEY;
347 cursor.key_beg.localization = ip->obj_localization |
348 HAMMER_LOCALIZE_MISC;
349 cursor.key_end.localization = ip->obj_localization |
350 HAMMER_LOCALIZE_MISC;
351
352 switch(ip->ino_data.obj_type) {
353 case HAMMER_OBJTYPE_REGFILE:
354 ++cursor.key_beg.key;
355 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
356 break;
357 case HAMMER_OBJTYPE_DIRECTORY:
358 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
359 cursor.key_beg.localization = ip->obj_localization |
360 hammer_dir_localization(ip);
361 cursor.key_end.localization = ip->obj_localization |
362 hammer_dir_localization(ip);
363 break;
364 case HAMMER_OBJTYPE_DBFILE:
365 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
366 break;
367 default:
368 error = EINVAL;
369 break;
370 }
371 cursor.key_end.rec_type = cursor.key_beg.rec_type;
372 } else {
373 /*
374 * The inode itself.
375 */
376 cursor.key_beg.key = 0;
377 cursor.key_end.key = 0;
378 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
379 cursor.key_end.rec_type = HAMMER_RECTYPE_INODE;
380 cursor.key_beg.localization = ip->obj_localization |
381 HAMMER_LOCALIZE_INODE;
382 cursor.key_end.localization = ip->obj_localization |
383 HAMMER_LOCALIZE_INODE;
384 }
385
386 error = hammer_btree_first(&cursor);
387 while (error == 0) {
388 elm = &cursor.node->ondisk->elms[cursor.index];
389
390 add_history(ip, hist, elm);
391 if (hist->head.flags & (HAMMER_IOC_HISTORY_NEXT_TID |
392 HAMMER_IOC_HISTORY_NEXT_KEY |
393 HAMMER_IOC_HISTORY_EOF)) {
394 break;
395 }
396 error = hammer_btree_iterate(&cursor);
397 }
398 if (error == ENOENT) {
399 hist->head.flags |= HAMMER_IOC_HISTORY_EOF;
400 error = 0;
401 }
402 hammer_done_cursor(&cursor);
403 return(error);
404 }
405
406 /*
407 * Add the scanned element to the ioctl return structure. Some special
408 * casing is required for regular files to accomodate how data ranges are
409 * stored on-disk.
410 */
411 static void
add_history(hammer_inode_t ip,struct hammer_ioc_history * hist,hammer_btree_elm_t elm)412 add_history(hammer_inode_t ip, struct hammer_ioc_history *hist,
413 hammer_btree_elm_t elm)
414 {
415 int i;
416
417 if (elm->base.btype != HAMMER_BTREE_TYPE_RECORD)
418 return;
419 if ((hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) &&
420 ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE) {
421 /*
422 * Adjust nxt_key
423 */
424 if (hist->nxt_key > elm->leaf.base.key - elm->leaf.data_len &&
425 hist->key < elm->leaf.base.key - elm->leaf.data_len) {
426 hist->nxt_key = elm->leaf.base.key - elm->leaf.data_len;
427 }
428 if (hist->nxt_key > elm->leaf.base.key)
429 hist->nxt_key = elm->leaf.base.key;
430
431 /*
432 * Record is beyond MAXPHYS, there won't be any more records
433 * in the iteration covering the requested offset (key).
434 */
435 if (elm->leaf.base.key >= MAXPHYS &&
436 elm->leaf.base.key - MAXPHYS > hist->key) {
437 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY;
438 }
439
440 /*
441 * Data-range of record does not cover the key.
442 */
443 if (elm->leaf.base.key - elm->leaf.data_len > hist->key)
444 return;
445
446 } else if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
447 /*
448 * Adjust nxt_key
449 */
450 if (hist->nxt_key > elm->leaf.base.key &&
451 hist->key < elm->leaf.base.key) {
452 hist->nxt_key = elm->leaf.base.key;
453 }
454
455 /*
456 * Record is beyond the requested key.
457 */
458 if (elm->leaf.base.key > hist->key)
459 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY;
460 }
461
462 /*
463 * Add create_tid if it is in-bounds.
464 */
465 i = hist->count;
466 if ((i == 0 ||
467 elm->leaf.base.create_tid != hist->hist_ary[i - 1].tid) &&
468 elm->leaf.base.create_tid >= hist->beg_tid &&
469 elm->leaf.base.create_tid < hist->end_tid) {
470 if (hist->count == HAMMER_MAX_HISTORY_ELMS) {
471 hist->nxt_tid = elm->leaf.base.create_tid;
472 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID;
473 return;
474 }
475 hist->hist_ary[i].tid = elm->leaf.base.create_tid;
476 hist->hist_ary[i].time32 = elm->leaf.create_ts;
477 ++hist->count;
478 }
479
480 /*
481 * Add delete_tid if it is in-bounds. Note that different portions
482 * of the history may have overlapping data ranges with different
483 * delete_tid's. If this case occurs the delete_tid may match the
484 * create_tid of a following record. XXX
485 *
486 * [ ]
487 * [ ]
488 */
489 i = hist->count;
490 if (elm->leaf.base.delete_tid &&
491 elm->leaf.base.delete_tid >= hist->beg_tid &&
492 elm->leaf.base.delete_tid < hist->end_tid) {
493 if (i == HAMMER_MAX_HISTORY_ELMS) {
494 hist->nxt_tid = elm->leaf.base.delete_tid;
495 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID;
496 return;
497 }
498 hist->hist_ary[i].tid = elm->leaf.base.delete_tid;
499 hist->hist_ary[i].time32 = elm->leaf.delete_ts;
500 ++hist->count;
501 }
502 }
503
504 /*
505 * Acquire synchronization TID
506 */
507 static
508 int
hammer_ioc_synctid(hammer_transaction_t trans,hammer_inode_t ip,struct hammer_ioc_synctid * std)509 hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip,
510 struct hammer_ioc_synctid *std)
511 {
512 hammer_mount_t hmp = ip->hmp;
513 int error = 0;
514
515 switch(std->op) {
516 case HAMMER_SYNCTID_NONE:
517 std->tid = hmp->flusher.tid; /* inaccurate */
518 break;
519 case HAMMER_SYNCTID_ASYNC:
520 hammer_queue_inodes_flusher(hmp, MNT_NOWAIT);
521 hammer_flusher_async(hmp, NULL);
522 std->tid = hmp->flusher.tid; /* inaccurate */
523 break;
524 case HAMMER_SYNCTID_SYNC1:
525 hammer_queue_inodes_flusher(hmp, MNT_WAIT);
526 hammer_flusher_sync(hmp);
527 std->tid = hmp->flusher.tid;
528 break;
529 case HAMMER_SYNCTID_SYNC2:
530 hammer_queue_inodes_flusher(hmp, MNT_WAIT);
531 hammer_flusher_sync(hmp);
532 std->tid = hmp->flusher.tid;
533 hammer_flusher_sync(hmp);
534 break;
535 default:
536 error = EOPNOTSUPP;
537 break;
538 }
539 return(error);
540 }
541
542 /*
543 * Retrieve version info.
544 *
545 * Load min_version, wip_version, and max_versino. If cur_version is passed
546 * as 0 then load the current version into cur_version. Load the description
547 * for cur_version into the description array.
548 *
549 * Returns 0 on success, EINVAL if cur_version is non-zero and set to an
550 * unsupported value.
551 */
552 static
553 int
hammer_ioc_get_version(hammer_transaction_t trans,hammer_inode_t ip,struct hammer_ioc_version * ver)554 hammer_ioc_get_version(hammer_transaction_t trans, hammer_inode_t ip,
555 struct hammer_ioc_version *ver)
556 {
557 int error = 0;
558
559 ver->min_version = HAMMER_VOL_VERSION_MIN;
560 ver->wip_version = HAMMER_VOL_VERSION_WIP;
561 ver->max_version = HAMMER_VOL_VERSION_MAX;
562 if (ver->cur_version == 0)
563 ver->cur_version = trans->hmp->version;
564 switch(ver->cur_version) {
565 case 1:
566 ksnprintf(ver->description, sizeof(ver->description),
567 "First HAMMER release (DragonFly 2.0+)");
568 break;
569 case 2:
570 ksnprintf(ver->description, sizeof(ver->description),
571 "New directory entry layout (DragonFly 2.3+)");
572 break;
573 case 3:
574 ksnprintf(ver->description, sizeof(ver->description),
575 "New snapshot management (DragonFly 2.5+)");
576 break;
577 case 4:
578 ksnprintf(ver->description, sizeof(ver->description),
579 "New undo/flush, faster flush/sync (DragonFly 2.5+)");
580 break;
581 case 5:
582 ksnprintf(ver->description, sizeof(ver->description),
583 "Adjustments for dedup support (DragonFly 2.9+)");
584 break;
585 case 6:
586 ksnprintf(ver->description, sizeof(ver->description),
587 "Directory Hash ALG1 (tmp/rename resistance)");
588 break;
589 case 7:
590 ksnprintf(ver->description, sizeof(ver->description),
591 "Use ISCSI CRC (faster than original crc32)");
592 break;
593 default:
594 ksnprintf(ver->description, sizeof(ver->description),
595 "Unknown");
596 error = EINVAL;
597 break;
598 }
599 return(error);
600 };
601
602 /*
603 * Set version info
604 */
605 static
606 int
hammer_ioc_set_version(hammer_transaction_t trans,hammer_inode_t ip,struct hammer_ioc_version * ver)607 hammer_ioc_set_version(hammer_transaction_t trans, hammer_inode_t ip,
608 struct hammer_ioc_version *ver)
609 {
610 hammer_mount_t hmp = trans->hmp;
611 struct hammer_cursor cursor;
612 hammer_volume_t volume;
613 int error;
614 int over = hmp->version;
615
616 /*
617 * Generally do not allow downgrades. However, version 4 can
618 * be downgraded to version 3.
619 */
620 if (ver->cur_version < hmp->version) {
621 if (!(ver->cur_version == 3 && hmp->version == 4))
622 return(EINVAL);
623 }
624 if (ver->cur_version == hmp->version)
625 return(0);
626 if (ver->cur_version > HAMMER_VOL_VERSION_MAX)
627 return(EINVAL);
628 if (hmp->ronly)
629 return(EROFS);
630
631 /*
632 * Update the root volume header and the version cached in
633 * the hammer_mount structure.
634 */
635 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
636 if (error)
637 goto failed;
638 hammer_lock_ex(&hmp->flusher.finalize_lock);
639 hammer_sync_lock_ex(trans);
640 hmp->version = ver->cur_version;
641
642 /*
643 * If upgrading from version < 4 to version >= 4 the UNDO FIFO
644 * must be reinitialized.
645 */
646 if (over < HAMMER_VOL_VERSION_FOUR &&
647 ver->cur_version >= HAMMER_VOL_VERSION_FOUR) {
648 hkprintf("upgrade undo to version 4\n");
649 error = hammer_upgrade_undo_4(trans);
650 if (error)
651 goto failed;
652 }
653
654 /*
655 * Adjust the version in the volume header
656 */
657 volume = hammer_get_root_volume(hmp, &error);
658 KKASSERT(error == 0);
659 hammer_modify_volume_field(cursor.trans, volume, vol_version);
660 volume->ondisk->vol_version = ver->cur_version;
661 hammer_modify_volume_done(volume);
662 hammer_rel_volume(volume, 0);
663
664 hammer_sync_unlock(trans);
665 hammer_unlock(&hmp->flusher.finalize_lock);
666 failed:
667 ver->head.error = error;
668 hammer_done_cursor(&cursor);
669 return(0);
670 }
671
672 /*
673 * Get information
674 */
675 static
676 int
hammer_ioc_get_info(hammer_transaction_t trans,struct hammer_ioc_info * info)677 hammer_ioc_get_info(hammer_transaction_t trans, struct hammer_ioc_info *info)
678 {
679 hammer_volume_ondisk_t ondisk = trans->hmp->rootvol->ondisk;
680 hammer_mount_t hmp = trans->hmp;
681
682 /* Fill the structure with the necessary information */
683 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &info->rsvbigblocks);
684 info->rsvbigblocks = info->rsvbigblocks >> HAMMER_BIGBLOCK_BITS;
685 strlcpy(info->vol_label, ondisk->vol_label, sizeof(ondisk->vol_label));
686
687 info->vol_fsid = hmp->fsid;
688 info->vol_fstype = ondisk->vol_fstype;
689 info->version = hmp->version;
690
691 info->inodes = ondisk->vol0_stat_inodes;
692 info->bigblocks = ondisk->vol0_stat_bigblocks;
693 info->freebigblocks = ondisk->vol0_stat_freebigblocks;
694 info->nvolumes = hmp->nvolumes;
695 info->rootvol = ondisk->vol_rootvol;
696
697 return 0;
698 }
699
700 /*
701 * Add a snapshot transaction id(s) to the list of snapshots.
702 *
703 * NOTE: Records are created with an allocated TID. If a flush cycle
704 * is in progress the record may be synced in the current flush
705 * cycle and the volume header will reflect the allocation of the
706 * TID, but the synchronization point may not catch up to the
707 * TID until the next flush cycle.
708 */
709 static
710 int
hammer_ioc_add_snapshot(hammer_transaction_t trans,hammer_inode_t ip,struct hammer_ioc_snapshot * snap)711 hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
712 struct hammer_ioc_snapshot *snap)
713 {
714 hammer_mount_t hmp = ip->hmp;
715 struct hammer_btree_leaf_elm leaf;
716 struct hammer_cursor cursor;
717 int error;
718
719 /*
720 * Validate structure
721 */
722 if (snap->count > HAMMER_SNAPS_PER_IOCTL)
723 return (EINVAL);
724 if (snap->index >= snap->count)
725 return (EINVAL);
726
727 hammer_lock_ex(&hmp->snapshot_lock);
728 again:
729 /*
730 * Look for keys starting after the previous iteration, or at
731 * the beginning if snap->count is 0.
732 */
733 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
734 if (error) {
735 hammer_done_cursor(&cursor);
736 return(error);
737 }
738
739 cursor.asof = HAMMER_MAX_TID;
740 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF;
741
742 bzero(&leaf, sizeof(leaf));
743 leaf.base.obj_id = HAMMER_OBJID_ROOT;
744 leaf.base.rec_type = HAMMER_RECTYPE_SNAPSHOT;
745 leaf.base.create_tid = hammer_alloc_tid(hmp, 1);
746 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
747 leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
748 leaf.data_len = sizeof(struct hammer_snapshot_data);
749
750 while (snap->index < snap->count) {
751 leaf.base.key = (int64_t)snap->snaps[snap->index].tid;
752 cursor.key_beg = leaf.base;
753 error = hammer_btree_lookup(&cursor);
754 if (error == 0) {
755 error = EEXIST;
756 break;
757 }
758
759 /*
760 * NOTE: Must reload key_beg after an ASOF search because
761 * the create_tid may have been modified during the
762 * search.
763 */
764 cursor.flags &= ~HAMMER_CURSOR_ASOF;
765 cursor.key_beg = leaf.base;
766 error = hammer_create_at_cursor(&cursor, &leaf,
767 &snap->snaps[snap->index],
768 HAMMER_CREATE_MODE_SYS);
769 if (error == EDEADLK) {
770 hammer_done_cursor(&cursor);
771 goto again;
772 }
773 cursor.flags |= HAMMER_CURSOR_ASOF;
774 if (error)
775 break;
776 ++snap->index;
777 }
778 snap->head.error = error;
779 hammer_done_cursor(&cursor);
780 hammer_unlock(&hmp->snapshot_lock);
781 return(0);
782 }
783
784 /*
785 * Delete snapshot transaction id(s) from the list of snapshots.
786 */
787 static
788 int
hammer_ioc_del_snapshot(hammer_transaction_t trans,hammer_inode_t ip,struct hammer_ioc_snapshot * snap)789 hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
790 struct hammer_ioc_snapshot *snap)
791 {
792 hammer_mount_t hmp = ip->hmp;
793 struct hammer_cursor cursor;
794 int error;
795
796 /*
797 * Validate structure
798 */
799 if (snap->count > HAMMER_SNAPS_PER_IOCTL)
800 return (EINVAL);
801 if (snap->index >= snap->count)
802 return (EINVAL);
803
804 hammer_lock_ex(&hmp->snapshot_lock);
805 again:
806 /*
807 * Look for keys starting after the previous iteration, or at
808 * the beginning if snap->count is 0.
809 */
810 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
811 if (error) {
812 hammer_done_cursor(&cursor);
813 return(error);
814 }
815
816 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
817 cursor.key_beg.create_tid = 0;
818 cursor.key_beg.delete_tid = 0;
819 cursor.key_beg.obj_type = 0;
820 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT;
821 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
822 cursor.asof = HAMMER_MAX_TID;
823 cursor.flags |= HAMMER_CURSOR_ASOF;
824
825 while (snap->index < snap->count) {
826 cursor.key_beg.key = (int64_t)snap->snaps[snap->index].tid;
827 error = hammer_btree_lookup(&cursor);
828 if (error)
829 break;
830 error = hammer_btree_extract_leaf(&cursor);
831 if (error)
832 break;
833 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY,
834 0, 0, 0, NULL);
835 if (error == EDEADLK) {
836 hammer_done_cursor(&cursor);
837 goto again;
838 }
839 if (error)
840 break;
841 ++snap->index;
842 }
843 snap->head.error = error;
844 hammer_done_cursor(&cursor);
845 hammer_unlock(&hmp->snapshot_lock);
846 return(0);
847 }
848
849 /*
850 * Retrieve as many snapshot ids as possible or until the array is
851 * full, starting after the last transaction id passed in. If count
852 * is 0 we retrieve starting at the beginning.
853 *
854 * NOTE: Because the b-tree key field is signed but transaction ids
855 * are unsigned the returned list will be signed-sorted instead
856 * of unsigned sorted. The Caller must still sort the aggregate
857 * results.
858 */
859 static
860 int
hammer_ioc_get_snapshot(hammer_transaction_t trans,hammer_inode_t ip,struct hammer_ioc_snapshot * snap)861 hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
862 struct hammer_ioc_snapshot *snap)
863 {
864 struct hammer_cursor cursor;
865 int error;
866
867 /*
868 * Validate structure
869 */
870 if (snap->index != 0)
871 return (EINVAL);
872 if (snap->count > HAMMER_SNAPS_PER_IOCTL)
873 return (EINVAL);
874
875 /*
876 * Look for keys starting after the previous iteration, or at
877 * the beginning if snap->count is 0.
878 */
879 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
880 if (error) {
881 hammer_done_cursor(&cursor);
882 return(error);
883 }
884
885 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
886 cursor.key_beg.create_tid = 0;
887 cursor.key_beg.delete_tid = 0;
888 cursor.key_beg.obj_type = 0;
889 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT;
890 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
891 if (snap->count == 0)
892 cursor.key_beg.key = HAMMER_MIN_KEY;
893 else
894 cursor.key_beg.key = (int64_t)snap->snaps[snap->count - 1].tid + 1;
895
896 cursor.key_end = cursor.key_beg;
897 cursor.key_end.key = HAMMER_MAX_KEY;
898 cursor.asof = HAMMER_MAX_TID;
899 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE | HAMMER_CURSOR_ASOF;
900
901 snap->count = 0;
902
903 error = hammer_btree_first(&cursor);
904 while (error == 0 && snap->count < HAMMER_SNAPS_PER_IOCTL) {
905 error = hammer_btree_extract_leaf(&cursor);
906 if (error)
907 break;
908 if (cursor.leaf->base.rec_type == HAMMER_RECTYPE_SNAPSHOT) {
909 error = hammer_btree_extract_data(&cursor);
910 snap->snaps[snap->count] = cursor.data->snap;
911
912 /*
913 * The snap data tid should match the key but might
914 * not due to a bug in the HAMMER v3 conversion code.
915 *
916 * This error will work itself out over time but we
917 * have to force a match or the snapshot will not
918 * be deletable.
919 */
920 if (cursor.data->snap.tid !=
921 (hammer_tid_t)cursor.leaf->base.key) {
922 hkprintf("lo=%08x snapshot key "
923 "0x%016jx data mismatch 0x%016jx\n",
924 cursor.key_beg.localization,
925 (uintmax_t)cursor.data->snap.tid,
926 cursor.leaf->base.key);
927 hkprintf("Probably left over from the "
928 "original v3 conversion, hammer "
929 "cleanup should get it eventually\n");
930 snap->snaps[snap->count].tid =
931 cursor.leaf->base.key;
932 }
933 ++snap->count;
934 }
935 error = hammer_btree_iterate(&cursor);
936 }
937
938 if (error == ENOENT) {
939 snap->head.flags |= HAMMER_IOC_SNAPSHOT_EOF;
940 error = 0;
941 }
942 snap->head.error = error;
943 hammer_done_cursor(&cursor);
944 return(0);
945 }
946
947 /*
948 * Retrieve the PFS hammer cleanup utility config record. This is
949 * different (newer than) the PFS config.
950 */
951 static
952 int
hammer_ioc_get_config(hammer_transaction_t trans,hammer_inode_t ip,struct hammer_ioc_config * config)953 hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip,
954 struct hammer_ioc_config *config)
955 {
956 struct hammer_cursor cursor;
957 int error;
958
959 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
960 if (error) {
961 hammer_done_cursor(&cursor);
962 return(error);
963 }
964
965 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
966 cursor.key_beg.create_tid = 0;
967 cursor.key_beg.delete_tid = 0;
968 cursor.key_beg.obj_type = 0;
969 cursor.key_beg.rec_type = HAMMER_RECTYPE_CONFIG;
970 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
971 cursor.key_beg.key = 0; /* config space page 0 */
972
973 cursor.asof = HAMMER_MAX_TID;
974 cursor.flags |= HAMMER_CURSOR_ASOF;
975
976 error = hammer_btree_lookup(&cursor);
977 if (error == 0) {
978 error = hammer_btree_extract_data(&cursor);
979 if (error == 0)
980 config->config = cursor.data->config;
981 }
982 /* error can be ENOENT */
983 config->head.error = error;
984 hammer_done_cursor(&cursor);
985 return(0);
986 }
987
988 /*
989 * Retrieve the PFS hammer cleanup utility config record. This is
990 * different (newer than) the PFS config.
991 *
992 * This is kinda a hack.
993 */
994 static
995 int
hammer_ioc_set_config(hammer_transaction_t trans,hammer_inode_t ip,struct hammer_ioc_config * config)996 hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip,
997 struct hammer_ioc_config *config)
998 {
999 struct hammer_btree_leaf_elm leaf;
1000 struct hammer_cursor cursor;
1001 hammer_mount_t hmp = ip->hmp;
1002 int error;
1003
1004 again:
1005 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
1006 if (error) {
1007 hammer_done_cursor(&cursor);
1008 return(error);
1009 }
1010
1011 bzero(&leaf, sizeof(leaf));
1012 leaf.base.obj_id = HAMMER_OBJID_ROOT;
1013 leaf.base.rec_type = HAMMER_RECTYPE_CONFIG;
1014 leaf.base.create_tid = hammer_alloc_tid(hmp, 1);
1015 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
1016 leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
1017 leaf.base.key = 0; /* page 0 */
1018 leaf.data_len = sizeof(struct hammer_config_data);
1019
1020 cursor.key_beg = leaf.base;
1021
1022 cursor.asof = HAMMER_MAX_TID;
1023 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF;
1024
1025 error = hammer_btree_lookup(&cursor);
1026 if (error == 0) {
1027 error = hammer_btree_extract_data(&cursor);
1028 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY,
1029 0, 0, 0, NULL);
1030 if (error == EDEADLK) {
1031 hammer_done_cursor(&cursor);
1032 goto again;
1033 }
1034 }
1035 if (error == ENOENT)
1036 error = 0;
1037 if (error == 0) {
1038 /*
1039 * NOTE: Must reload key_beg after an ASOF search because
1040 * the create_tid may have been modified during the
1041 * search.
1042 */
1043 cursor.flags &= ~HAMMER_CURSOR_ASOF;
1044 cursor.key_beg = leaf.base;
1045 error = hammer_create_at_cursor(&cursor, &leaf,
1046 &config->config,
1047 HAMMER_CREATE_MODE_SYS);
1048 if (error == EDEADLK) {
1049 hammer_done_cursor(&cursor);
1050 goto again;
1051 }
1052 }
1053 config->head.error = error;
1054 hammer_done_cursor(&cursor);
1055 return(0);
1056 }
1057
1058 static
1059 int
hammer_ioc_get_data(hammer_transaction_t trans,hammer_inode_t ip,struct hammer_ioc_data * data)1060 hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip,
1061 struct hammer_ioc_data *data)
1062 {
1063 struct hammer_cursor cursor;
1064 int bytes;
1065 int error;
1066
1067 /* XXX cached inode ? */
1068 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
1069 if (error)
1070 goto failed;
1071
1072 cursor.key_beg = data->elm;
1073 cursor.flags |= HAMMER_CURSOR_BACKEND;
1074
1075 error = hammer_btree_lookup(&cursor);
1076 if (error == 0) {
1077 error = hammer_btree_extract_data(&cursor);
1078 if (error == 0) {
1079 data->leaf = *cursor.leaf;
1080 bytes = cursor.leaf->data_len;
1081 if (bytes > data->size)
1082 bytes = data->size;
1083 error = copyout(cursor.data, data->ubuf, bytes);
1084 }
1085 }
1086
1087 failed:
1088 hammer_done_cursor(&cursor);
1089 return (error);
1090 }
1091