xref: /minix/minix/servers/vfs/vnode.c (revision 83133719)
1 /* This file contains the routines related to vnodes.
2  * The entry points are:
3  *
4  *  get_vnode - increase counter and get details of an inode
5  *  get_free_vnode - get a pointer to a free vnode obj
6  *  find_vnode - find a vnode according to the FS endpoint and the inode num.
7  *  dup_vnode - duplicate vnode (i.e. increase counter)
8  *  put_vnode - drop vnode (i.e. decrease counter)
9  */
10 
11 #include "fs.h"
12 #include "vnode.h"
13 #include "vmnt.h"
14 #include "file.h"
15 #include <minix/vfsif.h>
16 #include <assert.h>
17 
18 /* Is vnode pointer reasonable? */
19 #if NDEBUG
20 #define SANEVP(v)
21 #define CHECKVN(v)
22 #define ASSERTVP(v)
23 #else
24 #define SANEVP(v) ((((v) >= &vnode[0] && (v) < &vnode[NR_VNODES])))
25 
26 #define BADVP(v, f, l) printf("%s:%d: bad vp %p\n", f, l, v)
27 
28 /* vp check that returns 0 for use in check_vrefs() */
29 #define CHECKVN(v) if(!SANEVP(v)) {				\
30 	BADVP(v, __FILE__, __LINE__);	\
31 	return 0;	\
32 }
33 
34 /* vp check that panics */
35 #define ASSERTVP(v) if(!SANEVP(v)) { \
36 	BADVP(v, __FILE__, __LINE__); panic("bad vp"); }
37 #endif
38 
39 #if LOCK_DEBUG
40 /*===========================================================================*
41  *				check_vnode_locks_by_me			     *
42  *===========================================================================*/
43 void check_vnode_locks_by_me(struct fproc *rfp)
44 {
45 /* Check whether this thread still has locks held on vnodes */
46   struct vnode *vp;
47 
48   for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; vp++) {
49 	if (tll_locked_by_me(&vp->v_lock)) {
50 		panic("Thread %d still holds vnode lock on vp %p call_nr=%d\n",
51 		      mthread_self(), vp, job_call_nr);
52 	}
53   }
54 
55   if (rfp->fp_vp_rdlocks != 0)
56 	panic("Thread %d still holds read locks on a vnode (%d) call_nr=%d\n",
57 	      mthread_self(), rfp->fp_vp_rdlocks, job_call_nr);
58 }
59 #endif
60 
61 /*===========================================================================*
62  *				check_vnode_locks			     *
63  *===========================================================================*/
64 void check_vnode_locks()
65 {
66   struct vnode *vp;
67   int count = 0;
68 
69   for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; vp++)
70 	if (is_vnode_locked(vp)) {
71 		count++;
72 	}
73 
74   if (count) panic("%d locked vnodes\n", count);
75 #if 0
76   printf("check_vnode_locks OK\n");
77 #endif
78 }
79 
80 /*===========================================================================*
81  *				get_free_vnode				     *
82  *===========================================================================*/
83 struct vnode *get_free_vnode()
84 {
85 /* Find a free vnode slot in the vnode table (it's not actually allocated) */
86   struct vnode *vp;
87 
88   for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp) {
89 	if (vp->v_ref_count == 0 && !is_vnode_locked(vp)) {
90 		vp->v_uid  = -1;
91 		vp->v_gid  = -1;
92 		vp->v_sdev = NO_DEV;
93 		vp->v_mapfs_e = NONE;
94 		vp->v_mapfs_count = 0;
95 		vp->v_mapinode_nr = 0;
96 		return(vp);
97 	}
98   }
99 
100   err_code = ENFILE;
101   return(NULL);
102 }
103 
104 
105 /*===========================================================================*
106  *				find_vnode				     *
107  *===========================================================================*/
108 struct vnode *find_vnode(int fs_e, ino_t ino)
109 {
110 /* Find a specified (FS endpoint and inode number) vnode in the
111  * vnode table */
112   struct vnode *vp;
113 
114   for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp)
115 	if (vp->v_ref_count > 0 && vp->v_inode_nr == ino && vp->v_fs_e == fs_e)
116 		return(vp);
117 
118   return(NULL);
119 }
120 
121 /*===========================================================================*
122  *				is_vnode_locked				     *
123  *===========================================================================*/
124 int is_vnode_locked(struct vnode *vp)
125 {
126 /* Find out whether a thread holds a lock on this vnode or is trying to obtain
127  * a lock. */
128   ASSERTVP(vp);
129 
130   return(tll_islocked(&vp->v_lock) || tll_haspendinglock(&vp->v_lock));
131 }
132 
133 /*===========================================================================*
134  *				init_vnodes				     *
135  *===========================================================================*/
136 void init_vnodes(void)
137 {
138   struct vnode *vp;
139 
140   for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp) {
141 	vp->v_fs_e = NONE;
142 	vp->v_mapfs_e = NONE;
143 	vp->v_inode_nr = 0;
144 	vp->v_ref_count = 0;
145 	vp->v_fs_count = 0;
146 	vp->v_mapfs_count = 0;
147 	tll_init(&vp->v_lock);
148   }
149 }
150 
151 /*===========================================================================*
152  *				lock_vnode				     *
153  *===========================================================================*/
154 int lock_vnode(struct vnode *vp, tll_access_t locktype)
155 {
156   int r;
157 
158   ASSERTVP(vp);
159 
160   r = tll_lock(&vp->v_lock, locktype);
161 
162 #if LOCK_DEBUG
163   if (locktype == VNODE_READ) {
164 	fp->fp_vp_rdlocks++;
165   }
166 #endif
167 
168   if (r == EBUSY) return(r);
169   return(OK);
170 }
171 
172 /*===========================================================================*
173  *				unlock_vnode				     *
174  *===========================================================================*/
175 void unlock_vnode(struct vnode *vp)
176 {
177 #if LOCK_DEBUG
178   int i;
179   register struct vnode *rvp;
180   struct worker_thread *w;
181 #endif
182   ASSERTVP(vp);
183 
184 #if LOCK_DEBUG
185   /* Decrease read-only lock counter when not locked as VNODE_OPCL or
186    * VNODE_WRITE */
187   if (!tll_locked_by_me(&vp->v_lock)) {
188 	fp->fp_vp_rdlocks--;
189   }
190 
191   for (i = 0; i < NR_VNODES; i++) {
192 	rvp = &vnode[i];
193 
194 	w = rvp->v_lock.t_write;
195 	assert(w != self);
196 	while (w && w->w_next != NULL) {
197 		w = w->w_next;
198 		assert(w != self);
199 	}
200 
201 	w = rvp->v_lock.t_serial;
202 	assert(w != self);
203 	while (w && w->w_next != NULL) {
204 		w = w->w_next;
205 		assert(w != self);
206 	}
207   }
208 #endif
209 
210   tll_unlock(&vp->v_lock);
211 }
212 
213 /*===========================================================================*
214  *				vnode				     *
215  *===========================================================================*/
216 void upgrade_vnode_lock(struct vnode *vp)
217 {
218   ASSERTVP(vp);
219   tll_upgrade(&vp->v_lock);
220 }
221 
222 /*===========================================================================*
223  *				dup_vnode				     *
224  *===========================================================================*/
225 void dup_vnode(struct vnode *vp)
226 {
227 /* dup_vnode() is called to increment the vnode and therefore the
228  * referred inode's counter.
229  */
230   ASSERTVP(vp);
231   vp->v_ref_count++;
232 }
233 
234 
235 /*===========================================================================*
236  *				put_vnode				     *
237  *===========================================================================*/
238 void put_vnode(struct vnode *vp)
239 {
240 /* Decrease vnode's usage counter and decrease inode's usage counter in the
241  * corresponding FS process. Decreasing the fs_count each time we decrease the
242  * ref count would lead to poor performance. Instead, only decrease fs_count
243  * when the ref count hits zero. However, this could lead to fs_count to wrap.
244  * To prevent this, we drop the counter to 1 when the counter hits 256.
245  * We maintain fs_count as a sanity check to make sure VFS and the FS are in
246  * sync.
247  */
248   int r, lock_vp;
249 
250   ASSERTVP(vp);
251 
252   /* Lock vnode. It's quite possible this thread already has a lock on this
253    * vnode. That's no problem, because the reference counter will not decrease
254    * to zero in that case. However, if the counter does decrease to zero *and*
255    * is already locked, we have a consistency problem somewhere. */
256   lock_vp = lock_vnode(vp, VNODE_OPCL);
257 
258   if (vp->v_ref_count > 1) {
259 	/* Decrease counter */
260 	vp->v_ref_count--;
261 	if (vp->v_fs_count > 256)
262 		vnode_clean_refs(vp);
263 	if (lock_vp != EBUSY) unlock_vnode(vp);
264 	return;
265   }
266 
267   /* If we already had a lock, there is a consistency problem */
268   assert(lock_vp != EBUSY);
269   upgrade_vnode_lock(vp); /* Acquire exclusive access */
270 
271   /* A vnode that's not in use can't be put back. */
272   if (vp->v_ref_count <= 0)
273 	panic("put_vnode failed: bad v_ref_count %d\n", vp->v_ref_count);
274 
275   /* fs_count should indicate that the file is in use. */
276   if (vp->v_fs_count <= 0)
277 	panic("put_vnode failed: bad v_fs_count %d\n", vp->v_fs_count);
278 
279   /* Tell FS we don't need this inode to be open anymore. */
280   r = req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count);
281 
282   if (r != OK) {
283 	printf("VFS: putnode failed: %d\n", r);
284 	util_stacktrace();
285   }
286 
287   /* This inode could've been mapped. If so, tell mapped FS to close it as
288    * well. If mapped onto same FS, this putnode is not needed. */
289   if (vp->v_mapfs_e != NONE && vp->v_mapfs_e != vp->v_fs_e)
290 	req_putnode(vp->v_mapfs_e, vp->v_mapinode_nr, vp->v_mapfs_count);
291 
292   vp->v_fs_count = 0;
293   vp->v_ref_count = 0;
294   vp->v_mapfs_count = 0;
295 
296   unlock_vnode(vp);
297 }
298 
299 
300 /*===========================================================================*
301  *				vnode_clean_refs			     *
302  *===========================================================================*/
303 void vnode_clean_refs(struct vnode *vp)
304 {
305 /* Tell the underlying FS to drop all reference but one. */
306 
307   if (vp == NULL) return;
308   if (vp->v_fs_count <= 1) return;	/* Nothing to do */
309 
310   /* Drop all references except one */
311   req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count - 1);
312   vp->v_fs_count = 1;
313 }
314 
315