1*96ed2ae4SDarrick J. Wong // SPDX-License-Identifier: GPL-2.0-or-later
2*96ed2ae4SDarrick J. Wong /*
3*96ed2ae4SDarrick J. Wong * Copyright (c) 2020-2024 Oracle. All Rights Reserved.
4*96ed2ae4SDarrick J. Wong * Author: Darrick J. Wong <djwong@kernel.org>
5*96ed2ae4SDarrick J. Wong */
6*96ed2ae4SDarrick J. Wong #include "xfs.h"
7*96ed2ae4SDarrick J. Wong #include "xfs_fs.h"
8*96ed2ae4SDarrick J. Wong #include "xfs_shared.h"
9*96ed2ae4SDarrick J. Wong #include "xfs_format.h"
10*96ed2ae4SDarrick J. Wong #include "xfs_trans_resv.h"
11*96ed2ae4SDarrick J. Wong #include "xfs_mount.h"
12*96ed2ae4SDarrick J. Wong #include "xfs_log_format.h"
13*96ed2ae4SDarrick J. Wong #include "xfs_trans.h"
14*96ed2ae4SDarrick J. Wong #include "xfs_inode.h"
15*96ed2ae4SDarrick J. Wong #include "xfs_quota.h"
16*96ed2ae4SDarrick J. Wong #include "xfs_qm.h"
17*96ed2ae4SDarrick J. Wong #include "xfs_icache.h"
18*96ed2ae4SDarrick J. Wong #include "xfs_bmap_util.h"
19*96ed2ae4SDarrick J. Wong #include "xfs_iwalk.h"
20*96ed2ae4SDarrick J. Wong #include "xfs_ialloc.h"
21*96ed2ae4SDarrick J. Wong #include "xfs_sb.h"
22*96ed2ae4SDarrick J. Wong #include "scrub/scrub.h"
23*96ed2ae4SDarrick J. Wong #include "scrub/common.h"
24*96ed2ae4SDarrick J. Wong #include "scrub/repair.h"
25*96ed2ae4SDarrick J. Wong #include "scrub/xfile.h"
26*96ed2ae4SDarrick J. Wong #include "scrub/xfarray.h"
27*96ed2ae4SDarrick J. Wong #include "scrub/iscan.h"
28*96ed2ae4SDarrick J. Wong #include "scrub/quota.h"
29*96ed2ae4SDarrick J. Wong #include "scrub/quotacheck.h"
30*96ed2ae4SDarrick J. Wong #include "scrub/trace.h"
31*96ed2ae4SDarrick J. Wong
32*96ed2ae4SDarrick J. Wong /*
33*96ed2ae4SDarrick J. Wong * Live Quotacheck Repair
34*96ed2ae4SDarrick J. Wong * ======================
35*96ed2ae4SDarrick J. Wong *
36*96ed2ae4SDarrick J. Wong * Use the live quota counter information that we collected to replace the
37*96ed2ae4SDarrick J. Wong * counter values in the incore dquots. A scrub->repair cycle should have left
38*96ed2ae4SDarrick J. Wong * the live data and hooks active, so this is safe so long as we make sure the
39*96ed2ae4SDarrick J. Wong * dquot is locked.
40*96ed2ae4SDarrick J. Wong */
41*96ed2ae4SDarrick J. Wong
42*96ed2ae4SDarrick J. Wong /* Commit new counters to a dquot. */
43*96ed2ae4SDarrick J. Wong static int
xqcheck_commit_dquot(struct xqcheck * xqc,xfs_dqtype_t dqtype,struct xfs_dquot * dq)44*96ed2ae4SDarrick J. Wong xqcheck_commit_dquot(
45*96ed2ae4SDarrick J. Wong struct xqcheck *xqc,
46*96ed2ae4SDarrick J. Wong xfs_dqtype_t dqtype,
47*96ed2ae4SDarrick J. Wong struct xfs_dquot *dq)
48*96ed2ae4SDarrick J. Wong {
49*96ed2ae4SDarrick J. Wong struct xqcheck_dquot xcdq;
50*96ed2ae4SDarrick J. Wong struct xfarray *counts = xqcheck_counters_for(xqc, dqtype);
51*96ed2ae4SDarrick J. Wong int64_t delta;
52*96ed2ae4SDarrick J. Wong bool dirty = false;
53*96ed2ae4SDarrick J. Wong int error = 0;
54*96ed2ae4SDarrick J. Wong
55*96ed2ae4SDarrick J. Wong /* Unlock the dquot just long enough to allocate a transaction. */
56*96ed2ae4SDarrick J. Wong xfs_dqunlock(dq);
57*96ed2ae4SDarrick J. Wong error = xchk_trans_alloc(xqc->sc, 0);
58*96ed2ae4SDarrick J. Wong xfs_dqlock(dq);
59*96ed2ae4SDarrick J. Wong if (error)
60*96ed2ae4SDarrick J. Wong return error;
61*96ed2ae4SDarrick J. Wong
62*96ed2ae4SDarrick J. Wong xfs_trans_dqjoin(xqc->sc->tp, dq);
63*96ed2ae4SDarrick J. Wong
64*96ed2ae4SDarrick J. Wong if (xchk_iscan_aborted(&xqc->iscan)) {
65*96ed2ae4SDarrick J. Wong error = -ECANCELED;
66*96ed2ae4SDarrick J. Wong goto out_cancel;
67*96ed2ae4SDarrick J. Wong }
68*96ed2ae4SDarrick J. Wong
69*96ed2ae4SDarrick J. Wong mutex_lock(&xqc->lock);
70*96ed2ae4SDarrick J. Wong error = xfarray_load_sparse(counts, dq->q_id, &xcdq);
71*96ed2ae4SDarrick J. Wong if (error)
72*96ed2ae4SDarrick J. Wong goto out_unlock;
73*96ed2ae4SDarrick J. Wong
74*96ed2ae4SDarrick J. Wong /* Adjust counters as needed. */
75*96ed2ae4SDarrick J. Wong delta = (int64_t)xcdq.icount - dq->q_ino.count;
76*96ed2ae4SDarrick J. Wong if (delta) {
77*96ed2ae4SDarrick J. Wong dq->q_ino.reserved += delta;
78*96ed2ae4SDarrick J. Wong dq->q_ino.count += delta;
79*96ed2ae4SDarrick J. Wong dirty = true;
80*96ed2ae4SDarrick J. Wong }
81*96ed2ae4SDarrick J. Wong
82*96ed2ae4SDarrick J. Wong delta = (int64_t)xcdq.bcount - dq->q_blk.count;
83*96ed2ae4SDarrick J. Wong if (delta) {
84*96ed2ae4SDarrick J. Wong dq->q_blk.reserved += delta;
85*96ed2ae4SDarrick J. Wong dq->q_blk.count += delta;
86*96ed2ae4SDarrick J. Wong dirty = true;
87*96ed2ae4SDarrick J. Wong }
88*96ed2ae4SDarrick J. Wong
89*96ed2ae4SDarrick J. Wong delta = (int64_t)xcdq.rtbcount - dq->q_rtb.count;
90*96ed2ae4SDarrick J. Wong if (delta) {
91*96ed2ae4SDarrick J. Wong dq->q_rtb.reserved += delta;
92*96ed2ae4SDarrick J. Wong dq->q_rtb.count += delta;
93*96ed2ae4SDarrick J. Wong dirty = true;
94*96ed2ae4SDarrick J. Wong }
95*96ed2ae4SDarrick J. Wong
96*96ed2ae4SDarrick J. Wong xcdq.flags |= (XQCHECK_DQUOT_REPAIR_SCANNED | XQCHECK_DQUOT_WRITTEN);
97*96ed2ae4SDarrick J. Wong error = xfarray_store(counts, dq->q_id, &xcdq);
98*96ed2ae4SDarrick J. Wong if (error == -EFBIG) {
99*96ed2ae4SDarrick J. Wong /*
100*96ed2ae4SDarrick J. Wong * EFBIG means we tried to store data at too high a byte offset
101*96ed2ae4SDarrick J. Wong * in the sparse array. IOWs, we cannot complete the repair
102*96ed2ae4SDarrick J. Wong * and must cancel the whole operation. This should never
103*96ed2ae4SDarrick J. Wong * happen, but we need to catch it anyway.
104*96ed2ae4SDarrick J. Wong */
105*96ed2ae4SDarrick J. Wong error = -ECANCELED;
106*96ed2ae4SDarrick J. Wong }
107*96ed2ae4SDarrick J. Wong mutex_unlock(&xqc->lock);
108*96ed2ae4SDarrick J. Wong if (error || !dirty)
109*96ed2ae4SDarrick J. Wong goto out_cancel;
110*96ed2ae4SDarrick J. Wong
111*96ed2ae4SDarrick J. Wong trace_xrep_quotacheck_dquot(xqc->sc->mp, dq->q_type, dq->q_id);
112*96ed2ae4SDarrick J. Wong
113*96ed2ae4SDarrick J. Wong /* Commit the dirty dquot to disk. */
114*96ed2ae4SDarrick J. Wong dq->q_flags |= XFS_DQFLAG_DIRTY;
115*96ed2ae4SDarrick J. Wong if (dq->q_id)
116*96ed2ae4SDarrick J. Wong xfs_qm_adjust_dqtimers(dq);
117*96ed2ae4SDarrick J. Wong xfs_trans_log_dquot(xqc->sc->tp, dq);
118*96ed2ae4SDarrick J. Wong
119*96ed2ae4SDarrick J. Wong /*
120*96ed2ae4SDarrick J. Wong * Transaction commit unlocks the dquot, so we must re-lock it so that
121*96ed2ae4SDarrick J. Wong * the caller can put the reference (which apparently requires a locked
122*96ed2ae4SDarrick J. Wong * dquot).
123*96ed2ae4SDarrick J. Wong */
124*96ed2ae4SDarrick J. Wong error = xrep_trans_commit(xqc->sc);
125*96ed2ae4SDarrick J. Wong xfs_dqlock(dq);
126*96ed2ae4SDarrick J. Wong return error;
127*96ed2ae4SDarrick J. Wong
128*96ed2ae4SDarrick J. Wong out_unlock:
129*96ed2ae4SDarrick J. Wong mutex_unlock(&xqc->lock);
130*96ed2ae4SDarrick J. Wong out_cancel:
131*96ed2ae4SDarrick J. Wong xchk_trans_cancel(xqc->sc);
132*96ed2ae4SDarrick J. Wong
133*96ed2ae4SDarrick J. Wong /* Re-lock the dquot so the caller can put the reference. */
134*96ed2ae4SDarrick J. Wong xfs_dqlock(dq);
135*96ed2ae4SDarrick J. Wong return error;
136*96ed2ae4SDarrick J. Wong }
137*96ed2ae4SDarrick J. Wong
138*96ed2ae4SDarrick J. Wong /* Commit new quota counters for a particular quota type. */
139*96ed2ae4SDarrick J. Wong STATIC int
xqcheck_commit_dqtype(struct xqcheck * xqc,unsigned int dqtype)140*96ed2ae4SDarrick J. Wong xqcheck_commit_dqtype(
141*96ed2ae4SDarrick J. Wong struct xqcheck *xqc,
142*96ed2ae4SDarrick J. Wong unsigned int dqtype)
143*96ed2ae4SDarrick J. Wong {
144*96ed2ae4SDarrick J. Wong struct xchk_dqiter cursor = { };
145*96ed2ae4SDarrick J. Wong struct xqcheck_dquot xcdq;
146*96ed2ae4SDarrick J. Wong struct xfs_scrub *sc = xqc->sc;
147*96ed2ae4SDarrick J. Wong struct xfs_mount *mp = sc->mp;
148*96ed2ae4SDarrick J. Wong struct xfarray *counts = xqcheck_counters_for(xqc, dqtype);
149*96ed2ae4SDarrick J. Wong struct xfs_dquot *dq;
150*96ed2ae4SDarrick J. Wong xfarray_idx_t cur = XFARRAY_CURSOR_INIT;
151*96ed2ae4SDarrick J. Wong int error;
152*96ed2ae4SDarrick J. Wong
153*96ed2ae4SDarrick J. Wong /*
154*96ed2ae4SDarrick J. Wong * Update the counters of every dquot that the quota file knows about.
155*96ed2ae4SDarrick J. Wong */
156*96ed2ae4SDarrick J. Wong xchk_dqiter_init(&cursor, sc, dqtype);
157*96ed2ae4SDarrick J. Wong while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
158*96ed2ae4SDarrick J. Wong error = xqcheck_commit_dquot(xqc, dqtype, dq);
159*96ed2ae4SDarrick J. Wong xfs_qm_dqput(dq);
160*96ed2ae4SDarrick J. Wong if (error)
161*96ed2ae4SDarrick J. Wong break;
162*96ed2ae4SDarrick J. Wong }
163*96ed2ae4SDarrick J. Wong if (error)
164*96ed2ae4SDarrick J. Wong return error;
165*96ed2ae4SDarrick J. Wong
166*96ed2ae4SDarrick J. Wong /*
167*96ed2ae4SDarrick J. Wong * Make a second pass to deal with the dquots that we know about but
168*96ed2ae4SDarrick J. Wong * the quota file previously did not know about.
169*96ed2ae4SDarrick J. Wong */
170*96ed2ae4SDarrick J. Wong mutex_lock(&xqc->lock);
171*96ed2ae4SDarrick J. Wong while ((error = xfarray_iter(counts, &cur, &xcdq)) == 1) {
172*96ed2ae4SDarrick J. Wong xfs_dqid_t id = cur - 1;
173*96ed2ae4SDarrick J. Wong
174*96ed2ae4SDarrick J. Wong if (xcdq.flags & XQCHECK_DQUOT_REPAIR_SCANNED)
175*96ed2ae4SDarrick J. Wong continue;
176*96ed2ae4SDarrick J. Wong
177*96ed2ae4SDarrick J. Wong mutex_unlock(&xqc->lock);
178*96ed2ae4SDarrick J. Wong
179*96ed2ae4SDarrick J. Wong /*
180*96ed2ae4SDarrick J. Wong * Grab the dquot, allowing for dquot block allocation in a
181*96ed2ae4SDarrick J. Wong * separate transaction. We committed the scrub transaction
182*96ed2ae4SDarrick J. Wong * in a previous step, so we will not be creating nested
183*96ed2ae4SDarrick J. Wong * transactions here.
184*96ed2ae4SDarrick J. Wong */
185*96ed2ae4SDarrick J. Wong error = xfs_qm_dqget(mp, id, dqtype, true, &dq);
186*96ed2ae4SDarrick J. Wong if (error)
187*96ed2ae4SDarrick J. Wong return error;
188*96ed2ae4SDarrick J. Wong
189*96ed2ae4SDarrick J. Wong error = xqcheck_commit_dquot(xqc, dqtype, dq);
190*96ed2ae4SDarrick J. Wong xfs_qm_dqput(dq);
191*96ed2ae4SDarrick J. Wong if (error)
192*96ed2ae4SDarrick J. Wong return error;
193*96ed2ae4SDarrick J. Wong
194*96ed2ae4SDarrick J. Wong mutex_lock(&xqc->lock);
195*96ed2ae4SDarrick J. Wong }
196*96ed2ae4SDarrick J. Wong mutex_unlock(&xqc->lock);
197*96ed2ae4SDarrick J. Wong
198*96ed2ae4SDarrick J. Wong return error;
199*96ed2ae4SDarrick J. Wong }
200*96ed2ae4SDarrick J. Wong
201*96ed2ae4SDarrick J. Wong /* Figure out quota CHKD flags for the running quota types. */
202*96ed2ae4SDarrick J. Wong static inline unsigned int
xqcheck_chkd_flags(struct xfs_mount * mp)203*96ed2ae4SDarrick J. Wong xqcheck_chkd_flags(
204*96ed2ae4SDarrick J. Wong struct xfs_mount *mp)
205*96ed2ae4SDarrick J. Wong {
206*96ed2ae4SDarrick J. Wong unsigned int ret = 0;
207*96ed2ae4SDarrick J. Wong
208*96ed2ae4SDarrick J. Wong if (XFS_IS_UQUOTA_ON(mp))
209*96ed2ae4SDarrick J. Wong ret |= XFS_UQUOTA_CHKD;
210*96ed2ae4SDarrick J. Wong if (XFS_IS_GQUOTA_ON(mp))
211*96ed2ae4SDarrick J. Wong ret |= XFS_GQUOTA_CHKD;
212*96ed2ae4SDarrick J. Wong if (XFS_IS_PQUOTA_ON(mp))
213*96ed2ae4SDarrick J. Wong ret |= XFS_PQUOTA_CHKD;
214*96ed2ae4SDarrick J. Wong return ret;
215*96ed2ae4SDarrick J. Wong }
216*96ed2ae4SDarrick J. Wong
217*96ed2ae4SDarrick J. Wong /* Commit the new dquot counters. */
218*96ed2ae4SDarrick J. Wong int
xrep_quotacheck(struct xfs_scrub * sc)219*96ed2ae4SDarrick J. Wong xrep_quotacheck(
220*96ed2ae4SDarrick J. Wong struct xfs_scrub *sc)
221*96ed2ae4SDarrick J. Wong {
222*96ed2ae4SDarrick J. Wong struct xqcheck *xqc = sc->buf;
223*96ed2ae4SDarrick J. Wong unsigned int qflags = xqcheck_chkd_flags(sc->mp);
224*96ed2ae4SDarrick J. Wong int error;
225*96ed2ae4SDarrick J. Wong
226*96ed2ae4SDarrick J. Wong /*
227*96ed2ae4SDarrick J. Wong * Clear the CHKD flag for the running quota types and commit the scrub
228*96ed2ae4SDarrick J. Wong * transaction so that we can allocate new quota block mappings if we
229*96ed2ae4SDarrick J. Wong * have to. If we crash after this point, the sb still has the CHKD
230*96ed2ae4SDarrick J. Wong * flags cleared, so mount quotacheck will fix all of this up.
231*96ed2ae4SDarrick J. Wong */
232*96ed2ae4SDarrick J. Wong xrep_update_qflags(sc, qflags, 0);
233*96ed2ae4SDarrick J. Wong error = xrep_trans_commit(sc);
234*96ed2ae4SDarrick J. Wong if (error)
235*96ed2ae4SDarrick J. Wong return error;
236*96ed2ae4SDarrick J. Wong
237*96ed2ae4SDarrick J. Wong /* Commit the new counters to the dquots. */
238*96ed2ae4SDarrick J. Wong if (xqc->ucounts) {
239*96ed2ae4SDarrick J. Wong error = xqcheck_commit_dqtype(xqc, XFS_DQTYPE_USER);
240*96ed2ae4SDarrick J. Wong if (error)
241*96ed2ae4SDarrick J. Wong return error;
242*96ed2ae4SDarrick J. Wong }
243*96ed2ae4SDarrick J. Wong if (xqc->gcounts) {
244*96ed2ae4SDarrick J. Wong error = xqcheck_commit_dqtype(xqc, XFS_DQTYPE_GROUP);
245*96ed2ae4SDarrick J. Wong if (error)
246*96ed2ae4SDarrick J. Wong return error;
247*96ed2ae4SDarrick J. Wong }
248*96ed2ae4SDarrick J. Wong if (xqc->pcounts) {
249*96ed2ae4SDarrick J. Wong error = xqcheck_commit_dqtype(xqc, XFS_DQTYPE_PROJ);
250*96ed2ae4SDarrick J. Wong if (error)
251*96ed2ae4SDarrick J. Wong return error;
252*96ed2ae4SDarrick J. Wong }
253*96ed2ae4SDarrick J. Wong
254*96ed2ae4SDarrick J. Wong /* Set the CHKD flags now that we've fixed quota counts. */
255*96ed2ae4SDarrick J. Wong error = xchk_trans_alloc(sc, 0);
256*96ed2ae4SDarrick J. Wong if (error)
257*96ed2ae4SDarrick J. Wong return error;
258*96ed2ae4SDarrick J. Wong
259*96ed2ae4SDarrick J. Wong xrep_update_qflags(sc, 0, qflags);
260*96ed2ae4SDarrick J. Wong return xrep_trans_commit(sc);
261*96ed2ae4SDarrick J. Wong }
262