xref: /minix/minix/servers/vfs/vnode.c (revision 045e0ed3)
1 /* This file contains the routines related to vnodes.
2  * The entry points are:
3  *
4  *  get_vnode - increase counter and get details of an inode
5  *  get_free_vnode - get a pointer to a free vnode obj
6  *  find_vnode - find a vnode according to the FS endpoint and the inode num.
7  *  dup_vnode - duplicate vnode (i.e. increase counter)
8  *  put_vnode - drop vnode (i.e. decrease counter)
9  */
10 
11 #include "fs.h"
12 #include "vnode.h"
13 #include "vmnt.h"
14 #include "file.h"
15 #include <minix/vfsif.h>
16 #include <assert.h>
17 
18 /* Is vnode pointer reasonable? */
19 #if NDEBUG
20 #define SANEVP(v)
21 #define CHECKVN(v)
22 #define ASSERTVP(v)
23 #else
24 #define SANEVP(v) ((((v) >= &vnode[0] && (v) < &vnode[NR_VNODES])))
25 
26 #define BADVP(v, f, l) printf("%s:%d: bad vp %p\n", f, l, v)
27 
28 /* vp check that returns 0 for use in check_vrefs() */
29 #define CHECKVN(v) if(!SANEVP(v)) {				\
30 	BADVP(v, __FILE__, __LINE__);	\
31 	return 0;	\
32 }
33 
34 /* vp check that panics */
35 #define ASSERTVP(v) if(!SANEVP(v)) { \
36 	BADVP(v, __FILE__, __LINE__); panic("bad vp"); }
37 #endif
38 
39 #if LOCK_DEBUG
40 /*===========================================================================*
41  *				check_vnode_locks_by_me			     *
42  *===========================================================================*/
43 void check_vnode_locks_by_me(struct fproc *rfp)
44 {
45 /* Check whether this thread still has locks held on vnodes */
46   struct vnode *vp;
47 
48   for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; vp++) {
49 	if (tll_locked_by_me(&vp->v_lock)) {
50 		panic("Thread %d still holds vnode lock on vp %p call_nr=%d\n",
51 		      mthread_self(), vp, job_call_nr);
52 	}
53   }
54 
55   if (rfp->fp_vp_rdlocks != 0)
56 	panic("Thread %d still holds read locks on a vnode (%d) call_nr=%d\n",
57 	      mthread_self(), rfp->fp_vp_rdlocks, job_call_nr);
58 }
59 #endif
60 
61 /*===========================================================================*
62  *				check_vnode_locks			     *
63  *===========================================================================*/
64 void
65 check_vnode_locks(void)
66 {
67   struct vnode *vp;
68   int count = 0;
69 
70   for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; vp++)
71 	if (is_vnode_locked(vp)) {
72 		count++;
73 	}
74 
75   if (count) panic("%d locked vnodes\n", count);
76 #if 0
77   printf("check_vnode_locks OK\n");
78 #endif
79 }
80 
81 /*===========================================================================*
82  *				get_free_vnode				     *
83  *===========================================================================*/
84 struct vnode *
85 get_free_vnode(void)
86 {
87 /* Find a free vnode slot in the vnode table (it's not actually allocated) */
88   struct vnode *vp;
89 
90   for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp) {
91 	if (vp->v_ref_count == 0 && !is_vnode_locked(vp)) {
92 		vp->v_uid  = -1;
93 		vp->v_gid  = -1;
94 		vp->v_sdev = NO_DEV;
95 		vp->v_mapfs_e = NONE;
96 		vp->v_mapfs_count = 0;
97 		vp->v_mapinode_nr = 0;
98 		return(vp);
99 	}
100   }
101 
102   err_code = ENFILE;
103   return(NULL);
104 }
105 
106 
107 /*===========================================================================*
108  *				find_vnode				     *
109  *===========================================================================*/
110 struct vnode *find_vnode(int fs_e, ino_t ino)
111 {
112 /* Find a specified (FS endpoint and inode number) vnode in the
113  * vnode table */
114   struct vnode *vp;
115 
116   for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp)
117 	if (vp->v_ref_count > 0 && vp->v_inode_nr == ino && vp->v_fs_e == fs_e)
118 		return(vp);
119 
120   return(NULL);
121 }
122 
123 /*===========================================================================*
124  *				is_vnode_locked				     *
125  *===========================================================================*/
126 int is_vnode_locked(struct vnode *vp)
127 {
128 /* Find out whether a thread holds a lock on this vnode or is trying to obtain
129  * a lock. */
130   ASSERTVP(vp);
131 
132   return(tll_islocked(&vp->v_lock) || tll_haspendinglock(&vp->v_lock));
133 }
134 
135 /*===========================================================================*
136  *				init_vnodes				     *
137  *===========================================================================*/
138 void init_vnodes(void)
139 {
140   struct vnode *vp;
141 
142   for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp) {
143 	vp->v_fs_e = NONE;
144 	vp->v_mapfs_e = NONE;
145 	vp->v_inode_nr = 0;
146 	vp->v_ref_count = 0;
147 	vp->v_fs_count = 0;
148 	vp->v_mapfs_count = 0;
149 	tll_init(&vp->v_lock);
150   }
151 }
152 
153 /*===========================================================================*
154  *				lock_vnode				     *
155  *===========================================================================*/
156 int lock_vnode(struct vnode *vp, tll_access_t locktype)
157 {
158   int r;
159 
160   ASSERTVP(vp);
161 
162   r = tll_lock(&vp->v_lock, locktype);
163 
164 #if LOCK_DEBUG
165   if (locktype == VNODE_READ) {
166 	fp->fp_vp_rdlocks++;
167   }
168 #endif
169 
170   if (r == EBUSY) return(r);
171   return(OK);
172 }
173 
174 /*===========================================================================*
175  *				unlock_vnode				     *
176  *===========================================================================*/
177 void unlock_vnode(struct vnode *vp)
178 {
179 #if LOCK_DEBUG
180   int i;
181   register struct vnode *rvp;
182   struct worker_thread *w;
183 #endif
184   ASSERTVP(vp);
185 
186 #if LOCK_DEBUG
187   /* Decrease read-only lock counter when not locked as VNODE_OPCL or
188    * VNODE_WRITE */
189   if (!tll_locked_by_me(&vp->v_lock)) {
190 	fp->fp_vp_rdlocks--;
191   }
192 
193   for (i = 0; i < NR_VNODES; i++) {
194 	rvp = &vnode[i];
195 
196 	w = rvp->v_lock.t_write;
197 	assert(w != self);
198 	while (w && w->w_next != NULL) {
199 		w = w->w_next;
200 		assert(w != self);
201 	}
202 
203 	w = rvp->v_lock.t_serial;
204 	assert(w != self);
205 	while (w && w->w_next != NULL) {
206 		w = w->w_next;
207 		assert(w != self);
208 	}
209   }
210 #endif
211 
212   tll_unlock(&vp->v_lock);
213 }
214 
215 /*===========================================================================*
216  *				vnode				     *
217  *===========================================================================*/
218 void upgrade_vnode_lock(struct vnode *vp)
219 {
220   ASSERTVP(vp);
221   tll_upgrade(&vp->v_lock);
222 }
223 
224 /*===========================================================================*
225  *				dup_vnode				     *
226  *===========================================================================*/
227 void dup_vnode(struct vnode *vp)
228 {
229 /* dup_vnode() is called to increment the vnode and therefore the
230  * referred inode's counter.
231  */
232   ASSERTVP(vp);
233   vp->v_ref_count++;
234 }
235 
236 
237 /*===========================================================================*
238  *				put_vnode				     *
239  *===========================================================================*/
240 void put_vnode(struct vnode *vp)
241 {
242 /* Decrease vnode's usage counter and decrease inode's usage counter in the
243  * corresponding FS process. Decreasing the fs_count each time we decrease the
244  * ref count would lead to poor performance. Instead, only decrease fs_count
245  * when the ref count hits zero. However, this could lead to fs_count to wrap.
246  * To prevent this, we drop the counter to 1 when the counter hits 256.
247  * We maintain fs_count as a sanity check to make sure VFS and the FS are in
248  * sync.
249  */
250   int r, lock_vp;
251 
252   ASSERTVP(vp);
253 
254   /* Lock vnode. It's quite possible this thread already has a lock on this
255    * vnode. That's no problem, because the reference counter will not decrease
256    * to zero in that case. However, if the counter does decrease to zero *and*
257    * is already locked, we have a consistency problem somewhere. */
258   lock_vp = lock_vnode(vp, VNODE_OPCL);
259 
260   if (vp->v_ref_count > 1) {
261 	/* Decrease counter */
262 	vp->v_ref_count--;
263 	if (vp->v_fs_count > 256)
264 		vnode_clean_refs(vp);
265 	if (lock_vp != EBUSY) unlock_vnode(vp);
266 	return;
267   }
268 
269   /* If we already had a lock, there is a consistency problem */
270   assert(lock_vp != EBUSY);
271   upgrade_vnode_lock(vp); /* Acquire exclusive access */
272 
273   /* A vnode that's not in use can't be put back. */
274   if (vp->v_ref_count <= 0)
275 	panic("put_vnode failed: bad v_ref_count %d\n", vp->v_ref_count);
276 
277   /* fs_count should indicate that the file is in use. */
278   if (vp->v_fs_count <= 0)
279 	panic("put_vnode failed: bad v_fs_count %d\n", vp->v_fs_count);
280 
281   /* Tell FS we don't need this inode to be open anymore. */
282   r = req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count);
283 
284   if (r != OK) {
285 	printf("VFS: putnode failed: %d\n", r);
286 	util_stacktrace();
287   }
288 
289   /* This inode could've been mapped. If so, tell mapped FS to close it as
290    * well. If mapped onto same FS, this putnode is not needed. */
291   if (vp->v_mapfs_e != NONE && vp->v_mapfs_e != vp->v_fs_e)
292 	req_putnode(vp->v_mapfs_e, vp->v_mapinode_nr, vp->v_mapfs_count);
293 
294   vp->v_fs_count = 0;
295   vp->v_ref_count = 0;
296   vp->v_mapfs_count = 0;
297 
298   unlock_vnode(vp);
299 }
300 
301 
302 /*===========================================================================*
303  *				vnode_clean_refs			     *
304  *===========================================================================*/
305 void vnode_clean_refs(struct vnode *vp)
306 {
307 /* Tell the underlying FS to drop all reference but one. */
308 
309   if (vp == NULL) return;
310   if (vp->v_fs_count <= 1) return;	/* Nothing to do */
311 
312   /* Drop all references except one */
313   req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count - 1);
314   vp->v_fs_count = 1;
315 }
316 
317