1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #ifndef __XFS_MOUNT_H__
7 #define __XFS_MOUNT_H__
8
9 struct xlog;
10 struct xfs_inode;
11 struct xfs_mru_cache;
12 struct xfs_ail;
13 struct xfs_quotainfo;
14 struct xfs_da_geometry;
15 struct xfs_perag;
16
17 /* dynamic preallocation free space thresholds, 5% down to 1% */
18 enum {
19 XFS_LOWSP_1_PCNT = 0,
20 XFS_LOWSP_2_PCNT,
21 XFS_LOWSP_3_PCNT,
22 XFS_LOWSP_4_PCNT,
23 XFS_LOWSP_5_PCNT,
24 XFS_LOWSP_MAX,
25 };
26
27 /*
28 * Error Configuration
29 *
30 * Error classes define the subsystem the configuration belongs to.
31 * Error numbers define the errors that are configurable.
32 */
33 enum {
34 XFS_ERR_METADATA,
35 XFS_ERR_CLASS_MAX,
36 };
37 enum {
38 XFS_ERR_DEFAULT,
39 XFS_ERR_EIO,
40 XFS_ERR_ENOSPC,
41 XFS_ERR_ENODEV,
42 XFS_ERR_ERRNO_MAX,
43 };
44
45 #define XFS_ERR_RETRY_FOREVER -1
46
47 /*
48 * Although retry_timeout is in jiffies which is normally an unsigned long,
49 * we limit the retry timeout to 86400 seconds, or one day. So even a
50 * signed 32-bit long is sufficient for a HZ value up to 24855. Making it
51 * signed lets us store the special "-1" value, meaning retry forever.
52 */
53 struct xfs_error_cfg {
54 struct xfs_kobj kobj;
55 int max_retries;
56 long retry_timeout; /* in jiffies, -1 = infinite */
57 };
58
59 /*
60 * Per-cpu deferred inode inactivation GC lists.
61 */
62 struct xfs_inodegc {
63 struct xfs_mount *mp;
64 struct llist_head list;
65 struct delayed_work work;
66 int error;
67
68 /* approximate count of inodes in the list */
69 unsigned int items;
70 unsigned int shrinker_hits;
71 unsigned int cpu;
72 };
73
74 /*
75 * The struct xfsmount layout is optimised to separate read-mostly variables
76 * from variables that are frequently modified. We put the read-mostly variables
77 * first, then place all the other variables at the end.
78 *
79 * Typically, read-mostly variables are those that are set at mount time and
80 * never changed again, or only change rarely as a result of things like sysfs
81 * knobs being tweaked.
82 */
83 typedef struct xfs_mount {
84 struct xfs_sb m_sb; /* copy of fs superblock */
85 struct super_block *m_super;
86 struct xfs_ail *m_ail; /* fs active log item list */
87 struct xfs_buf *m_sb_bp; /* buffer for superblock */
88 char *m_rtname; /* realtime device name */
89 char *m_logname; /* external log device name */
90 struct xfs_da_geometry *m_dir_geo; /* directory block geometry */
91 struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */
92 struct xlog *m_log; /* log specific stuff */
93 struct xfs_inode *m_rbmip; /* pointer to bitmap inode */
94 struct xfs_inode *m_rsumip; /* pointer to summary inode */
95 struct xfs_inode *m_rootip; /* pointer to root directory */
96 struct xfs_quotainfo *m_quotainfo; /* disk quota information */
97 struct xfs_buftarg *m_ddev_targp; /* data device */
98 struct xfs_buftarg *m_logdev_targp;/* log device */
99 struct xfs_buftarg *m_rtdev_targp; /* rt device */
100 void __percpu *m_inodegc; /* percpu inodegc structures */
101
102 /*
103 * Optional cache of rt summary level per bitmap block with the
104 * invariant that m_rsum_cache[bbno] > the maximum i for which
105 * rsum[i][bbno] != 0, or 0 if rsum[i][bbno] == 0 for all i.
106 * Reads and writes are serialized by the rsumip inode lock.
107 */
108 uint8_t *m_rsum_cache;
109 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
110 struct workqueue_struct *m_buf_workqueue;
111 struct workqueue_struct *m_unwritten_workqueue;
112 struct workqueue_struct *m_reclaim_workqueue;
113 struct workqueue_struct *m_sync_workqueue;
114 struct workqueue_struct *m_blockgc_wq;
115 struct workqueue_struct *m_inodegc_wq;
116
117 int m_bsize; /* fs logical block size */
118 uint8_t m_blkbit_log; /* blocklog + NBBY */
119 uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
120 uint8_t m_agno_log; /* log #ag's */
121 uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
122 int8_t m_rtxblklog; /* log2 of rextsize, if possible */
123 uint m_blockmask; /* sb_blocksize-1 */
124 uint m_blockwsize; /* sb_blocksize in words */
125 uint m_blockwmask; /* blockwsize-1 */
126 uint m_alloc_mxr[2]; /* max alloc btree records */
127 uint m_alloc_mnr[2]; /* min alloc btree records */
128 uint m_bmap_dmxr[2]; /* max bmap btree records */
129 uint m_bmap_dmnr[2]; /* min bmap btree records */
130 uint m_rmap_mxr[2]; /* max rmap btree records */
131 uint m_rmap_mnr[2]; /* min rmap btree records */
132 uint m_refc_mxr[2]; /* max refc btree records */
133 uint m_refc_mnr[2]; /* min refc btree records */
134 uint m_alloc_maxlevels; /* max alloc btree levels */
135 uint m_bm_maxlevels[2]; /* max bmap btree levels */
136 uint m_rmap_maxlevels; /* max rmap btree levels */
137 uint m_refc_maxlevels; /* max refcount btree level */
138 unsigned int m_agbtree_maxlevels; /* max level of all AG btrees */
139 xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */
140 uint m_alloc_set_aside; /* space we can't use */
141 uint m_ag_max_usable; /* max space per AG */
142 int m_dalign; /* stripe unit */
143 int m_swidth; /* stripe width */
144 xfs_agnumber_t m_maxagi; /* highest inode alloc group */
145 uint m_allocsize_log;/* min write size log bytes */
146 uint m_allocsize_blocks; /* min write size blocks */
147 int m_logbufs; /* number of log buffers */
148 int m_logbsize; /* size of each log buffer */
149 uint m_rsumlevels; /* rt summary levels */
150 xfs_filblks_t m_rsumblocks; /* size of rt summary, FSBs */
151 int m_fixedfsid[2]; /* unchanged for life of FS */
152 uint m_qflags; /* quota status flags */
153 uint64_t m_features; /* active filesystem features */
154 uint64_t m_low_space[XFS_LOWSP_MAX];
155 uint64_t m_low_rtexts[XFS_LOWSP_MAX];
156 uint64_t m_rtxblkmask; /* rt extent block mask */
157 struct xfs_ino_geometry m_ino_geo; /* inode geometry */
158 struct xfs_trans_resv m_resv; /* precomputed res values */
159 /* low free space thresholds */
160 unsigned long m_opstate; /* dynamic state flags */
161 bool m_always_cow;
162 bool m_fail_unmount;
163 bool m_finobt_nores; /* no per-AG finobt resv. */
164 bool m_update_sb; /* sb needs update in mount */
165
166 /*
167 * Bitsets of per-fs metadata that have been checked and/or are sick.
168 * Callers must hold m_sb_lock to access these two fields.
169 */
170 uint8_t m_fs_checked;
171 uint8_t m_fs_sick;
172 /*
173 * Bitsets of rt metadata that have been checked and/or are sick.
174 * Callers must hold m_sb_lock to access this field.
175 */
176 uint8_t m_rt_checked;
177 uint8_t m_rt_sick;
178
179 /*
180 * End of read-mostly variables. Frequently written variables and locks
181 * should be placed below this comment from now on. The first variable
182 * here is marked as cacheline aligned so they it is separated from
183 * the read-mostly variables.
184 */
185
186 spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */
187 struct percpu_counter m_icount; /* allocated inodes counter */
188 struct percpu_counter m_ifree; /* free inodes counter */
189 struct percpu_counter m_fdblocks; /* free block counter */
190 struct percpu_counter m_frextents; /* free rt extent counter */
191
192 /*
193 * Count of data device blocks reserved for delayed allocations,
194 * including indlen blocks. Does not include allocated CoW staging
195 * extents or anything related to the rt device.
196 */
197 struct percpu_counter m_delalloc_blks;
198
199 /*
200 * RT version of the above.
201 */
202 struct percpu_counter m_delalloc_rtextents;
203
204 /*
205 * Global count of allocation btree blocks in use across all AGs. Only
206 * used when perag reservation is enabled. Helps prevent block
207 * reservation from attempting to reserve allocation btree blocks.
208 */
209 atomic64_t m_allocbt_blks;
210
211 struct xarray m_perags; /* per-ag accounting info */
212 uint64_t m_resblks; /* total reserved blocks */
213 uint64_t m_resblks_avail;/* available reserved blocks */
214 uint64_t m_resblks_save; /* reserved blks @ remount,ro */
215 struct delayed_work m_reclaim_work; /* background inode reclaim */
216 struct dentry *m_debugfs; /* debugfs parent */
217 struct xfs_kobj m_kobj;
218 struct xfs_kobj m_error_kobj;
219 struct xfs_kobj m_error_meta_kobj;
220 struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
221 struct xstats m_stats; /* per-fs stats */
222 #ifdef CONFIG_XFS_ONLINE_SCRUB_STATS
223 struct xchk_stats *m_scrub_stats;
224 #endif
225 xfs_agnumber_t m_agfrotor; /* last ag where space found */
226 atomic_t m_agirotor; /* last ag dir inode alloced */
227
228 /* Memory shrinker to throttle and reprioritize inodegc */
229 struct shrinker *m_inodegc_shrinker;
230 /*
231 * Workqueue item so that we can coalesce multiple inode flush attempts
232 * into a single flush.
233 */
234 struct work_struct m_flush_inodes_work;
235
236 /*
237 * Generation of the filesysyem layout. This is incremented by each
238 * growfs, and used by the pNFS server to ensure the client updates
239 * its view of the block device once it gets a layout that might
240 * reference the newly added blocks. Does not need to be persistent
241 * as long as we only allow file system size increments, but if we
242 * ever support shrinks it would have to be persisted in addition
243 * to various other kinds of pain inflicted on the pNFS server.
244 */
245 uint32_t m_generation;
246 struct mutex m_growlock; /* growfs mutex */
247
248 #ifdef DEBUG
249 /*
250 * Frequency with which errors are injected. Replaces xfs_etest; the
251 * value stored in here is the inverse of the frequency with which the
252 * error triggers. 1 = always, 2 = half the time, etc.
253 */
254 unsigned int *m_errortag;
255 struct xfs_kobj m_errortag_kobj;
256 #endif
257
258 /* cpus that have inodes queued for inactivation */
259 struct cpumask m_inodegc_cpumask;
260
261 /* Hook to feed dirent updates to an active online repair. */
262 struct xfs_hooks m_dir_update_hooks;
263 } xfs_mount_t;
264
265 #define M_IGEO(mp) (&(mp)->m_ino_geo)
266
267 /*
268 * Flags for m_features.
269 *
270 * These are all the active features in the filesystem, regardless of how
271 * they are configured.
272 */
273 #define XFS_FEAT_ATTR (1ULL << 0) /* xattrs present in fs */
274 #define XFS_FEAT_NLINK (1ULL << 1) /* 32 bit link counts */
275 #define XFS_FEAT_QUOTA (1ULL << 2) /* quota active */
276 #define XFS_FEAT_ALIGN (1ULL << 3) /* inode alignment */
277 #define XFS_FEAT_DALIGN (1ULL << 4) /* data alignment */
278 #define XFS_FEAT_LOGV2 (1ULL << 5) /* version 2 logs */
279 #define XFS_FEAT_SECTOR (1ULL << 6) /* sector size > 512 bytes */
280 #define XFS_FEAT_EXTFLG (1ULL << 7) /* unwritten extents */
281 #define XFS_FEAT_ASCIICI (1ULL << 8) /* ASCII only case-insens. */
282 #define XFS_FEAT_LAZYSBCOUNT (1ULL << 9) /* Superblk counters */
283 #define XFS_FEAT_ATTR2 (1ULL << 10) /* dynamic attr fork */
284 #define XFS_FEAT_PARENT (1ULL << 11) /* parent pointers */
285 #define XFS_FEAT_PROJID32 (1ULL << 12) /* 32 bit project id */
286 #define XFS_FEAT_CRC (1ULL << 13) /* metadata CRCs */
287 #define XFS_FEAT_V3INODES (1ULL << 14) /* Version 3 inodes */
288 #define XFS_FEAT_PQUOTINO (1ULL << 15) /* non-shared proj/grp quotas */
289 #define XFS_FEAT_FTYPE (1ULL << 16) /* inode type in dir */
290 #define XFS_FEAT_FINOBT (1ULL << 17) /* free inode btree */
291 #define XFS_FEAT_RMAPBT (1ULL << 18) /* reverse map btree */
292 #define XFS_FEAT_REFLINK (1ULL << 19) /* reflinked files */
293 #define XFS_FEAT_SPINODES (1ULL << 20) /* sparse inode chunks */
294 #define XFS_FEAT_META_UUID (1ULL << 21) /* metadata UUID */
295 #define XFS_FEAT_REALTIME (1ULL << 22) /* realtime device present */
296 #define XFS_FEAT_INOBTCNT (1ULL << 23) /* inobt block counts */
297 #define XFS_FEAT_BIGTIME (1ULL << 24) /* large timestamps */
298 #define XFS_FEAT_NEEDSREPAIR (1ULL << 25) /* needs xfs_repair */
299 #define XFS_FEAT_NREXT64 (1ULL << 26) /* large extent counters */
300 #define XFS_FEAT_EXCHANGE_RANGE (1ULL << 27) /* exchange range */
301
302 /* Mount features */
303 #define XFS_FEAT_NOATTR2 (1ULL << 48) /* disable attr2 creation */
304 #define XFS_FEAT_NOALIGN (1ULL << 49) /* ignore alignment */
305 #define XFS_FEAT_ALLOCSIZE (1ULL << 50) /* user specified allocation size */
306 #define XFS_FEAT_LARGE_IOSIZE (1ULL << 51) /* report large preferred
307 * I/O size in stat() */
308 #define XFS_FEAT_WSYNC (1ULL << 52) /* synchronous metadata ops */
309 #define XFS_FEAT_DIRSYNC (1ULL << 53) /* synchronous directory ops */
310 #define XFS_FEAT_DISCARD (1ULL << 54) /* discard unused blocks */
311 #define XFS_FEAT_GRPID (1ULL << 55) /* group-ID assigned from directory */
312 #define XFS_FEAT_SMALL_INUMS (1ULL << 56) /* user wants 32bit inodes */
313 #define XFS_FEAT_IKEEP (1ULL << 57) /* keep empty inode clusters*/
314 #define XFS_FEAT_SWALLOC (1ULL << 58) /* stripe width allocation */
315 #define XFS_FEAT_FILESTREAMS (1ULL << 59) /* use filestreams allocator */
316 #define XFS_FEAT_DAX_ALWAYS (1ULL << 60) /* DAX always enabled */
317 #define XFS_FEAT_DAX_NEVER (1ULL << 61) /* DAX never enabled */
318 #define XFS_FEAT_NORECOVERY (1ULL << 62) /* no recovery - dirty fs */
319 #define XFS_FEAT_NOUUID (1ULL << 63) /* ignore uuid during mount */
320
321 #define __XFS_HAS_FEAT(name, NAME) \
322 static inline bool xfs_has_ ## name (struct xfs_mount *mp) \
323 { \
324 return mp->m_features & XFS_FEAT_ ## NAME; \
325 }
326
327 /* Some features can be added dynamically so they need a set wrapper, too. */
328 #define __XFS_ADD_FEAT(name, NAME) \
329 __XFS_HAS_FEAT(name, NAME); \
330 static inline void xfs_add_ ## name (struct xfs_mount *mp) \
331 { \
332 mp->m_features |= XFS_FEAT_ ## NAME; \
333 xfs_sb_version_add ## name(&mp->m_sb); \
334 }
335
336 /* Superblock features */
__XFS_ADD_FEAT(attr,ATTR)337 __XFS_ADD_FEAT(attr, ATTR)
338 __XFS_HAS_FEAT(nlink, NLINK)
339 __XFS_ADD_FEAT(quota, QUOTA)
340 __XFS_HAS_FEAT(dalign, DALIGN)
341 __XFS_HAS_FEAT(sector, SECTOR)
342 __XFS_HAS_FEAT(asciici, ASCIICI)
343 __XFS_HAS_FEAT(parent, PARENT)
344 __XFS_HAS_FEAT(ftype, FTYPE)
345 __XFS_HAS_FEAT(finobt, FINOBT)
346 __XFS_HAS_FEAT(rmapbt, RMAPBT)
347 __XFS_HAS_FEAT(reflink, REFLINK)
348 __XFS_HAS_FEAT(sparseinodes, SPINODES)
349 __XFS_HAS_FEAT(metauuid, META_UUID)
350 __XFS_HAS_FEAT(realtime, REALTIME)
351 __XFS_HAS_FEAT(inobtcounts, INOBTCNT)
352 __XFS_HAS_FEAT(bigtime, BIGTIME)
353 __XFS_HAS_FEAT(needsrepair, NEEDSREPAIR)
354 __XFS_HAS_FEAT(large_extent_counts, NREXT64)
355 __XFS_HAS_FEAT(exchange_range, EXCHANGE_RANGE)
356
357 /*
358 * Some features are always on for v5 file systems, allow the compiler to
359 * eliminiate dead code when building without v4 support.
360 */
361 #define __XFS_HAS_V4_FEAT(name, NAME) \
362 static inline bool xfs_has_ ## name (struct xfs_mount *mp) \
363 { \
364 return !IS_ENABLED(CONFIG_XFS_SUPPORT_V4) || \
365 (mp->m_features & XFS_FEAT_ ## NAME); \
366 }
367
368 #define __XFS_ADD_V4_FEAT(name, NAME) \
369 __XFS_HAS_V4_FEAT(name, NAME); \
370 static inline void xfs_add_ ## name (struct xfs_mount *mp) \
371 { \
372 if (IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) { \
373 mp->m_features |= XFS_FEAT_ ## NAME; \
374 xfs_sb_version_add ## name(&mp->m_sb); \
375 } \
376 }
377
378 __XFS_HAS_V4_FEAT(align, ALIGN)
379 __XFS_HAS_V4_FEAT(logv2, LOGV2)
380 __XFS_HAS_V4_FEAT(extflg, EXTFLG)
381 __XFS_HAS_V4_FEAT(lazysbcount, LAZYSBCOUNT)
382 __XFS_ADD_V4_FEAT(attr2, ATTR2)
383 __XFS_ADD_V4_FEAT(projid32, PROJID32)
384 __XFS_HAS_V4_FEAT(v3inodes, V3INODES)
385 __XFS_HAS_V4_FEAT(crc, CRC)
386 __XFS_HAS_V4_FEAT(pquotino, PQUOTINO)
387
388 /*
389 * Mount features
390 *
391 * These do not change dynamically - features that can come and go, such as 32
392 * bit inodes and read-only state, are kept as operational state rather than
393 * features.
394 */
395 __XFS_HAS_FEAT(noattr2, NOATTR2)
396 __XFS_HAS_FEAT(noalign, NOALIGN)
397 __XFS_HAS_FEAT(allocsize, ALLOCSIZE)
398 __XFS_HAS_FEAT(large_iosize, LARGE_IOSIZE)
399 __XFS_HAS_FEAT(wsync, WSYNC)
400 __XFS_HAS_FEAT(dirsync, DIRSYNC)
401 __XFS_HAS_FEAT(discard, DISCARD)
402 __XFS_HAS_FEAT(grpid, GRPID)
403 __XFS_HAS_FEAT(small_inums, SMALL_INUMS)
404 __XFS_HAS_FEAT(ikeep, IKEEP)
405 __XFS_HAS_FEAT(swalloc, SWALLOC)
406 __XFS_HAS_FEAT(filestreams, FILESTREAMS)
407 __XFS_HAS_FEAT(dax_always, DAX_ALWAYS)
408 __XFS_HAS_FEAT(dax_never, DAX_NEVER)
409 __XFS_HAS_FEAT(norecovery, NORECOVERY)
410 __XFS_HAS_FEAT(nouuid, NOUUID)
411
412 /*
413 * Operational mount state flags
414 *
415 * Use these with atomic bit ops only!
416 */
417 #define XFS_OPSTATE_UNMOUNTING 0 /* filesystem is unmounting */
418 #define XFS_OPSTATE_CLEAN 1 /* mount was clean */
419 #define XFS_OPSTATE_SHUTDOWN 2 /* stop all fs operations */
420 #define XFS_OPSTATE_INODE32 3 /* inode32 allocator active */
421 #define XFS_OPSTATE_READONLY 4 /* read-only fs */
422
423 /*
424 * If set, inactivation worker threads will be scheduled to process queued
425 * inodegc work. If not, queued inodes remain in memory waiting to be
426 * processed.
427 */
428 #define XFS_OPSTATE_INODEGC_ENABLED 5
429 /*
430 * If set, background speculative prealloc gc worker threads will be scheduled
431 * to process queued blockgc work. If not, inodes retain their preallocations
432 * until explicitly deleted.
433 */
434 #define XFS_OPSTATE_BLOCKGC_ENABLED 6
435
436 /* Kernel has logged a warning about online fsck being used on this fs. */
437 #define XFS_OPSTATE_WARNED_SCRUB 7
438 /* Kernel has logged a warning about shrink being used on this fs. */
439 #define XFS_OPSTATE_WARNED_SHRINK 8
440 /* Kernel has logged a warning about logged xattr updates being used. */
441 #define XFS_OPSTATE_WARNED_LARP 9
442 /* Mount time quotacheck is running */
443 #define XFS_OPSTATE_QUOTACHECK_RUNNING 10
444 /* Do we want to clear log incompat flags? */
445 #define XFS_OPSTATE_UNSET_LOG_INCOMPAT 11
446 /* Filesystem can use logged extended attributes */
447 #define XFS_OPSTATE_USE_LARP 12
448
449 #define __XFS_IS_OPSTATE(name, NAME) \
450 static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
451 { \
452 return test_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
453 } \
454 static inline bool xfs_clear_ ## name (struct xfs_mount *mp) \
455 { \
456 return test_and_clear_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
457 } \
458 static inline bool xfs_set_ ## name (struct xfs_mount *mp) \
459 { \
460 return test_and_set_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
461 }
462
463 __XFS_IS_OPSTATE(unmounting, UNMOUNTING)
464 __XFS_IS_OPSTATE(clean, CLEAN)
465 __XFS_IS_OPSTATE(shutdown, SHUTDOWN)
466 __XFS_IS_OPSTATE(inode32, INODE32)
467 __XFS_IS_OPSTATE(readonly, READONLY)
468 __XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
469 __XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
470 #ifdef CONFIG_XFS_QUOTA
471 __XFS_IS_OPSTATE(quotacheck_running, QUOTACHECK_RUNNING)
472 #else
473 # define xfs_is_quotacheck_running(mp) (false)
474 #endif
475 __XFS_IS_OPSTATE(done_with_log_incompat, UNSET_LOG_INCOMPAT)
476 __XFS_IS_OPSTATE(using_logged_xattrs, USE_LARP)
477
478 static inline bool
479 xfs_should_warn(struct xfs_mount *mp, long nr)
480 {
481 return !test_and_set_bit(nr, &mp->m_opstate);
482 }
483
484 #define XFS_OPSTATE_STRINGS \
485 { (1UL << XFS_OPSTATE_UNMOUNTING), "unmounting" }, \
486 { (1UL << XFS_OPSTATE_CLEAN), "clean" }, \
487 { (1UL << XFS_OPSTATE_SHUTDOWN), "shutdown" }, \
488 { (1UL << XFS_OPSTATE_INODE32), "inode32" }, \
489 { (1UL << XFS_OPSTATE_READONLY), "read_only" }, \
490 { (1UL << XFS_OPSTATE_INODEGC_ENABLED), "inodegc" }, \
491 { (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }, \
492 { (1UL << XFS_OPSTATE_WARNED_SCRUB), "wscrub" }, \
493 { (1UL << XFS_OPSTATE_WARNED_SHRINK), "wshrink" }, \
494 { (1UL << XFS_OPSTATE_WARNED_LARP), "wlarp" }, \
495 { (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING), "quotacheck" }, \
496 { (1UL << XFS_OPSTATE_UNSET_LOG_INCOMPAT), "unset_log_incompat" }, \
497 { (1UL << XFS_OPSTATE_USE_LARP), "logged_xattrs" }
498
499 /*
500 * Max and min values for mount-option defined I/O
501 * preallocation sizes.
502 */
503 #define XFS_MAX_IO_LOG 30 /* 1G */
504 #define XFS_MIN_IO_LOG PAGE_SHIFT
505
506 void xfs_do_force_shutdown(struct xfs_mount *mp, uint32_t flags, char *fname,
507 int lnnum);
508 #define xfs_force_shutdown(m,f) \
509 xfs_do_force_shutdown(m, f, __FILE__, __LINE__)
510
511 #define SHUTDOWN_META_IO_ERROR (1u << 0) /* write attempt to metadata failed */
512 #define SHUTDOWN_LOG_IO_ERROR (1u << 1) /* write attempt to the log failed */
513 #define SHUTDOWN_FORCE_UMOUNT (1u << 2) /* shutdown from a forced unmount */
514 #define SHUTDOWN_CORRUPT_INCORE (1u << 3) /* corrupt in-memory structures */
515 #define SHUTDOWN_CORRUPT_ONDISK (1u << 4) /* corrupt metadata on device */
516 #define SHUTDOWN_DEVICE_REMOVED (1u << 5) /* device removed underneath us */
517
518 #define XFS_SHUTDOWN_STRINGS \
519 { SHUTDOWN_META_IO_ERROR, "metadata_io" }, \
520 { SHUTDOWN_LOG_IO_ERROR, "log_io" }, \
521 { SHUTDOWN_FORCE_UMOUNT, "force_umount" }, \
522 { SHUTDOWN_CORRUPT_INCORE, "corruption" }, \
523 { SHUTDOWN_DEVICE_REMOVED, "device_removed" }
524
525 /*
526 * Flags for xfs_mountfs
527 */
528 #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */
529
530 static inline xfs_agnumber_t
xfs_daddr_to_agno(struct xfs_mount * mp,xfs_daddr_t d)531 xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
532 {
533 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
534 do_div(ld, mp->m_sb.sb_agblocks);
535 return (xfs_agnumber_t) ld;
536 }
537
538 static inline xfs_agblock_t
xfs_daddr_to_agbno(struct xfs_mount * mp,xfs_daddr_t d)539 xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
540 {
541 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
542 return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
543 }
544
545 extern void xfs_uuid_table_free(void);
546 extern uint64_t xfs_default_resblks(xfs_mount_t *mp);
547 extern int xfs_mountfs(xfs_mount_t *mp);
548 extern void xfs_unmountfs(xfs_mount_t *);
549
550 /*
551 * Deltas for the block count can vary from 1 to very large, but lock contention
552 * only occurs on frequent small block count updates such as in the delayed
553 * allocation path for buffered writes (page a time updates). Hence we set
554 * a large batch count (1024) to minimise global counter updates except when
555 * we get near to ENOSPC and we have to be very accurate with our updates.
556 */
557 #define XFS_FDBLOCKS_BATCH 1024
558
559 /*
560 * Estimate the amount of free space that is not available to userspace and is
561 * not explicitly reserved from the incore fdblocks. This includes:
562 *
563 * - The minimum number of blocks needed to support splitting a bmap btree
564 * - The blocks currently in use by the freespace btrees because they record
565 * the actual blocks that will fill per-AG metadata space reservations
566 */
567 static inline uint64_t
xfs_fdblocks_unavailable(struct xfs_mount * mp)568 xfs_fdblocks_unavailable(
569 struct xfs_mount *mp)
570 {
571 return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
572 }
573
574 int xfs_dec_freecounter(struct xfs_mount *mp, struct percpu_counter *counter,
575 uint64_t delta, bool rsvd);
576 void xfs_add_freecounter(struct xfs_mount *mp, struct percpu_counter *counter,
577 uint64_t delta);
578
xfs_dec_fdblocks(struct xfs_mount * mp,uint64_t delta,bool reserved)579 static inline int xfs_dec_fdblocks(struct xfs_mount *mp, uint64_t delta,
580 bool reserved)
581 {
582 return xfs_dec_freecounter(mp, &mp->m_fdblocks, delta, reserved);
583 }
584
xfs_add_fdblocks(struct xfs_mount * mp,uint64_t delta)585 static inline void xfs_add_fdblocks(struct xfs_mount *mp, uint64_t delta)
586 {
587 xfs_add_freecounter(mp, &mp->m_fdblocks, delta);
588 }
589
xfs_dec_frextents(struct xfs_mount * mp,uint64_t delta)590 static inline int xfs_dec_frextents(struct xfs_mount *mp, uint64_t delta)
591 {
592 return xfs_dec_freecounter(mp, &mp->m_frextents, delta, false);
593 }
594
xfs_add_frextents(struct xfs_mount * mp,uint64_t delta)595 static inline void xfs_add_frextents(struct xfs_mount *mp, uint64_t delta)
596 {
597 xfs_add_freecounter(mp, &mp->m_frextents, delta);
598 }
599
600 extern int xfs_readsb(xfs_mount_t *, int);
601 extern void xfs_freesb(xfs_mount_t *);
602 extern bool xfs_fs_writable(struct xfs_mount *mp, int level);
603 extern int xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t);
604
605 extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
606
607 extern void xfs_set_low_space_thresholds(struct xfs_mount *);
608
609 int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
610 xfs_off_t count_fsb);
611
612 struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
613 int error_class, int error);
614 void xfs_force_summary_recalc(struct xfs_mount *mp);
615 int xfs_add_incompat_log_feature(struct xfs_mount *mp, uint32_t feature);
616 bool xfs_clear_incompat_log_features(struct xfs_mount *mp);
617 void xfs_mod_delalloc(struct xfs_inode *ip, int64_t data_delta,
618 int64_t ind_delta);
619
620 #endif /* __XFS_MOUNT_H__ */
621