1 /* $NetBSD$ */
2 
3 /*
4  * File "udf_unix.c" is part of the UDFclient toolkit.
5  * File $Id: udf_unix.c,v 1.17 2016/04/25 21:01:40 reinoud Exp $
6  *
7  * Copyright (c) 2003, 2004, 2005, 2006, 2011
8  * 	Reinoud Zandijk <reinoud@netbsd.org>
9  * All rights reserved.
10  *
11  * The UDFclient toolkit is distributed under the Clarified Artistic Licence.
12  * A copy of the licence is included in the distribution as
13  * `LICENCE.clearified.artistic' and a copy of the licence can also be
14  * requested at the GNU foundantion's website.
15  *
16  * Visit the UDFclient toolkit homepage http://www.13thmonkey.org/udftoolkit/
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  */
30 
31 
32 /* XXX strip list to bare minimum XXX */
33 #include <stdio.h>
34 #include <fcntl.h>
35 #include <stdlib.h>
36 #include <errno.h>
37 #include <sys/types.h>
38 #include <sys/stat.h>
39 #include <unistd.h>
40 #include <assert.h>
41 #include <dirent.h>
42 #include <string.h>
43 #include <strings.h>
44 #include <limits.h>
45 #include <time.h>
46 
47 #include "uscsilib.h"
48 
49 
50 /* for locals */
51 #include "udf.h"
52 #include "udf_bswap.h"
53 #include "udf_discop.h"
54 #include "udf_unix.h"
55 #include "uio.h"
56 #include <pthread.h>
57 
58 
59 #ifndef MAX
60 #	define MAX(a,b) ((a)>(b)?(a):(b))
61 #	define MIN(a,b) ((a)<(b)?(a):(b))
62 #endif
63 
64 
65 
66 /* #define DEBUG(a) { a; } */
67 #define DEBUG(a) if (0) { a; }
68 
69 
70 /******************************************************************************************
71  *
72  * Bufcache emulation
73  *
74  ******************************************************************************************/
75 
76 /* shared bufcache structure */
77 struct udf_bufcache *udf_bufcache = NULL;
78 
79 
udf_unix_init(void)80 int udf_unix_init(void) {
81 	if (udf_bufcache) {
82 		fprintf(stderr, "reinit unix_init?\n");
83 		return 0;
84 	}
85 
86 	udf_bufcache = calloc(1, sizeof(struct udf_bufcache));
87 	assert(udf_bufcache);
88 
89 	UDF_MUTEX_INIT(&udf_bufcache->bufcache_lock);
90 
91 	TAILQ_INIT(&udf_bufcache->lru_bufs_data);
92 	TAILQ_INIT(&udf_bufcache->lru_bufs_metadata);
93 
94 	pthread_cond_init(&udf_bufcache->purgethread_signal, NULL);
95 	pthread_mutex_init(&udf_bufcache->purgethread_lock, NULL);
96 
97 	pthread_cond_init(&udf_bufcache->processed_signal, NULL);
98 	pthread_mutex_init(&udf_bufcache->processed_lock, NULL);
99 
100 	return 0;
101 }
102 
103 
104 /* delete the buf entry */
udf_free_buf_entry(struct udf_buf * buf_entry)105 void udf_free_buf_entry(struct udf_buf *buf_entry) {
106 	assert(udf_bufcache);
107 
108 	buf_entry->b_vp    = NULL;	/* detach, i.e. recycle */
109 	buf_entry->b_flags = 0;		/* just in case */
110 
111 udf_bufcache->bcnt--;
112 	free(buf_entry->b_data);
113 	free(buf_entry);
114 }
115 
116 
117 /* XXX knowledge of LBSIZE, FILETYPE, INTERNAL NODE (?) ! XXX */
118 /* must be called with bufcache lock ! */
udf_get_buf_entry(struct udf_node * udf_node,struct udf_buf ** buf_entry_p)119 int udf_get_buf_entry(struct udf_node *udf_node, struct udf_buf **buf_entry_p) {
120 	struct udf_log_vol *log_vol;
121 	struct udf_buf     *buf_entry;
122 	uint32_t lb_size;
123 
124 	assert(udf_node);
125 	assert(udf_bufcache);
126 	assert(buf_entry_p);
127 
128 	log_vol = udf_node->udf_log_vol;
129 	lb_size = log_vol->lb_size;
130 
131 	*buf_entry_p = NULL;
132 	buf_entry    = NULL;
133 
134 	if (udf_node->udf_filetype != UDF_ICB_FILETYPE_RANDOMACCESS) {
135 		if (udf_bufcache->lru_len_metadata >= UDF_LRU_METADATA_MIN) {
136 			/* kick writeout of data; if past max wait for space to continue */
137 			UDF_MUTEX_UNLOCK(&udf_bufcache->bufcache_lock);
138 				udf_purgethread_kick("Data buffer surplus");
139 				while (udf_bufcache->lru_len_metadata >= UDF_LRU_METADATA_MAX) {
140 					udf_purgethread_kick("Metadata buffer surplus");
141 					/* wait for processed signal */
142 					pthread_mutex_lock(&udf_bufcache->processed_lock);
143 					pthread_cond_wait(&udf_bufcache->processed_signal, &udf_bufcache->processed_lock);
144 					pthread_mutex_unlock(&udf_bufcache->processed_lock);
145 				}
146 			UDF_MUTEX_LOCK(&udf_bufcache->bufcache_lock);
147 		}
148 	} else {
149 		if (udf_bufcache->lru_len_data >= UDF_LRU_DATA_MIN) {
150 			/* kick writeout of data; if past max wait for space to continue */
151 			UDF_MUTEX_UNLOCK(&udf_bufcache->bufcache_lock);
152 				udf_purgethread_kick("Data buffer surplus");
153 				while (udf_bufcache->lru_len_data >= UDF_LRU_DATA_MAX) {
154 					udf_purgethread_kick("Data buffer surplus");
155 					/* wait for processed signal */
156 					pthread_mutex_lock(&udf_bufcache->processed_lock);
157 					pthread_cond_wait(&udf_bufcache->processed_signal, &udf_bufcache->processed_lock);
158 					pthread_mutex_unlock(&udf_bufcache->processed_lock);
159 				}
160 			UDF_MUTEX_LOCK(&udf_bufcache->bufcache_lock);
161 		}
162 	}
163 
164 	/* create new buf_entry */
165 	buf_entry = calloc(1, sizeof(struct udf_buf));
166 	if (!buf_entry) return ENOMEM;
167 
168 	buf_entry->b_data = calloc(1, lb_size);
169 	if (!buf_entry->b_data) {
170 		*buf_entry_p = NULL;
171 		free(buf_entry);
172 		return ENOMEM;
173 	}
174 	*buf_entry_p = buf_entry;
175 
176 	/* fill in details */
177 	buf_entry->b_bufsize = lb_size;
178 	buf_entry->b_bcount  = 0;
179 	buf_entry->b_resid   = lb_size;
180 	buf_entry->b_lblk    = 0;
181 	buf_entry->b_flags   = B_INVAL;
182 	buf_entry->b_vp      = udf_node;	/* not just NULL ? */
183 
184 udf_bufcache->bcnt++;
185 	return 0;
186 }
187 
188 
189 /* really `out of the sky' hash formula */
udf_calc_bufhash(struct udf_node * udf_node,uint32_t b_lblk)190 uint32_t udf_calc_bufhash(struct udf_node *udf_node, uint32_t b_lblk) {
191 	return (udf_node->hashkey * 5 + b_lblk);
192 }
193 
194 
195 /* !!! needs to be called with bufcache and udf_node->buf_mutex lock !!! */
udf_mark_buf_needing_allocate(struct udf_node * udf_node,struct udf_buf * buf_entry)196 void udf_mark_buf_needing_allocate(struct udf_node *udf_node, struct udf_buf *buf_entry) {
197 	uint32_t lb_size;
198 
199 	assert(udf_node);
200 	/* assert(udf_node->buf_mutex.locked && udf_bufcache->bufcache_lock.locked); */
201 	lb_size = udf_node->udf_log_vol->lb_size;
202 
203 	/* if it isnt allready marked to eb allocated, allocate it and claim space */
204 	if (!(buf_entry->b_flags & B_NEEDALLOC)) {
205 		udf_node->udf_log_vol->await_alloc_space += lb_size;
206 		buf_entry->b_flags |= B_NEEDALLOC;
207 	}
208 }
209 
210 
211 /* !!! needs to be called with bufcache and udf_node->buf_mutex lock !!! */
udf_mark_buf_allocated(struct udf_node * udf_node,struct udf_buf * buf_entry)212 void udf_mark_buf_allocated(struct udf_node *udf_node, struct udf_buf *buf_entry) {
213 	uint32_t lb_size;
214 
215 	assert(udf_node);
216 	/* assert(udf_node->buf_mutex.locked && udf_bufcache->bufcache_lock.locked); */
217 	lb_size = udf_node->udf_log_vol->lb_size;
218 
219 	/* if it needed allocation, clear the flag and release the space */
220 	if (buf_entry->b_flags & B_NEEDALLOC) {
221 		udf_node->udf_log_vol->await_alloc_space -= lb_size;
222 		buf_entry->b_flags &= ~B_NEEDALLOC;
223 	}
224 }
225 
226 
227 /* !!! needs to be called with bufcache and udf_node->buf_mutex lock !!! */
udf_mark_buf_dirty(struct udf_node * udf_node,struct udf_buf * buf_entry)228 void udf_mark_buf_dirty(struct udf_node *udf_node, struct udf_buf *buf_entry) {
229 	assert(udf_node);
230 	assert(buf_entry);
231 	assert(udf_node->buf_mutex.locked);
232 	assert(udf_bufcache->bufcache_lock.locked);
233 
234 	if (buf_entry->b_flags & B_DIRTY)
235 		return;
236 
237 	if (udf_node->addr_type == UDF_ICB_INTERN_ALLOC) {
238 		udf_mark_buf_needing_allocate(udf_node, buf_entry);	/* signal it needs allocation */
239 	}
240 
241 	if (udf_node->udf_filetype != UDF_ICB_FILETYPE_RANDOMACCESS) {
242 		udf_bufcache->lru_len_dirty_metadata++;
243 	} else {
244 		udf_bufcache->lru_len_dirty_data++;
245 	}
246 	buf_entry->b_flags |= B_DIRTY;
247 	udf_node->v_numoutput++;
248 }
249 
250 
251 /* !!! needs to be called with bufcache and udf_node->buf_mutex lock !!! */
udf_mark_buf_clean(struct udf_node * udf_node,struct udf_buf * buf_entry)252 void udf_mark_buf_clean(struct udf_node *udf_node, struct udf_buf *buf_entry) {
253 	assert(udf_node);
254 	assert(buf_entry);
255 	assert(udf_node->buf_mutex.locked);
256 	assert(udf_bufcache->bufcache_lock.locked);
257 
258 	if ((buf_entry->b_flags & B_DIRTY) == 0)
259 		return;
260 
261 	if (udf_node->udf_filetype != UDF_ICB_FILETYPE_RANDOMACCESS) {
262 		udf_bufcache->lru_len_dirty_metadata--;
263 	} else {
264 		udf_bufcache->lru_len_dirty_data--;
265 	}
266 	buf_entry->b_flags &= ~B_DIRTY;
267 	assert(udf_node->v_numoutput >= 1);
268 	udf_node->v_numoutput--;
269 }
270 
271 
272 /* !!! needs to be called with bufcache and udf_node->buf_mutex lock !!! */
udf_attach_buf_to_node(struct udf_node * udf_node,struct udf_buf * buf_entry)273 int udf_attach_buf_to_node(struct udf_node *udf_node, struct udf_buf *buf_entry) {
274 	struct udf_buf_queue *lru_chain;
275 	struct udf_buf *buf, *lbuf;
276 	uint32_t hashkey, bucket;
277 
278 	assert(udf_node);
279 	assert(buf_entry);
280 	assert(udf_node->buf_mutex.locked && udf_bufcache->bufcache_lock.locked);
281 
282 	buf_entry->b_vp = udf_node;
283 
284 if (0) {
285 	/*
286 	 * Insert ordered in list. In KERNEL: vnode->v_dirtyblkhd is a list
287 	 * that can be reverse-sorted. Ordering not used yet thus commented
288 	 * out.
289 	 */
290 	lbuf = TAILQ_LAST(&udf_node->vn_bufs, udf_buf_queue);
291 	if (lbuf) {
292 		if (buf_entry->b_lblk > lbuf->b_lblk) {
293 			TAILQ_INSERT_TAIL(&udf_node->vn_bufs, buf_entry, b_vnbufs);
294 		} else {
295 			buf = TAILQ_FIRST(&udf_node->vn_bufs);
296 			while (buf->b_lblk < buf->b_lblk) {
297 				buf = TAILQ_NEXT(buf, b_vnbufs);
298 			}
299 			assert((buf->b_lblk != buf_entry->b_lblk) && (buf->b_vp == udf_node));
300 			TAILQ_INSERT_BEFORE(buf, buf_entry, b_vnbufs);
301 		}
302 	} else {
303 		TAILQ_INSERT_HEAD(&udf_node->vn_bufs, buf_entry, b_vnbufs);
304 	}
305 } else {
306 	TAILQ_INSERT_TAIL(&udf_node->vn_bufs, buf_entry, b_vnbufs);
307 }
308 
309 	/* fill buf into the bufcache */
310 	hashkey = udf_calc_bufhash(udf_node, buf_entry->b_lblk);
311 	bucket  = hashkey & UDF_BUFCACHE_HASHMASK;
312 
313 	DEBUG(
314 		struct udf_buf *buf;
315 
316 		/* checking for doubles */
317 		LIST_FOREACH(buf, &udf_bufcache->udf_bufs[bucket], b_hash) {
318 			if ((buf->b_vp == udf_node) && (buf->b_lblk == buf_entry->b_lblk)) {
319 				printf("DOUBLE hashnode in UDF_BUFS!?\n");
320 				exit(1);
321 			}
322 		}
323 	);
324 
325 	LIST_INSERT_HEAD(&udf_bufcache->udf_bufs[bucket], buf_entry, b_hash);
326 
327 	/* queue it in the lru chain */
328 	if (udf_node->udf_filetype != UDF_ICB_FILETYPE_RANDOMACCESS) {
329 		lru_chain = &udf_bufcache->lru_bufs_metadata;
330 		udf_bufcache->lru_len_metadata++;
331 	} else {
332 		lru_chain = &udf_bufcache->lru_bufs_data;
333 		udf_bufcache->lru_len_data++;
334 	}
335 	TAILQ_INSERT_TAIL(lru_chain, buf_entry, b_lru);
336 
337 	return 0;
338 }
339 
340 
341 /* kind of brelse() ? */
342 /* !!! needs to be called with bufcache and udf_node->buf_mutex lock !!! */
udf_detach_buf_from_node(struct udf_node * udf_node,struct udf_buf * buf_entry)343 int udf_detach_buf_from_node(struct udf_node *udf_node, struct udf_buf *buf_entry) {
344 	struct udf_buf_queue *lru_chain;
345 
346 	assert(udf_node);
347 	assert(buf_entry);
348 	assert(udf_node->buf_mutex.locked && udf_bufcache->bufcache_lock.locked);
349 
350 	/* remove from vnode admin */
351 	TAILQ_REMOVE(&udf_node->vn_bufs, buf_entry, b_vnbufs);
352 
353 	/* please don't forget this one */
354 	if (buf_entry->b_flags & B_DIRTY)
355 		udf_node->v_numoutput--;
356 
357 	/* remove from buffer cache */
358 	LIST_REMOVE(buf_entry, b_hash);
359 
360 	/* remove from lru lists */
361 	if (udf_node->udf_filetype != UDF_ICB_FILETYPE_RANDOMACCESS) {
362 		lru_chain = &udf_bufcache->lru_bufs_metadata;
363 		TAILQ_REMOVE(lru_chain, buf_entry, b_lru);
364 		udf_bufcache->lru_len_metadata--;
365 	} else {
366 		lru_chain = &udf_bufcache->lru_bufs_data;
367 		TAILQ_REMOVE(lru_chain, buf_entry, b_lru);
368 		udf_bufcache->lru_len_data--;
369 	}
370 
371 	return 0;
372 }
373 
374 
375 /* bufcache lock has to be held! */
udf_lookup_node_buf(struct udf_node * udf_node,uint32_t lblk,struct udf_buf ** buf_p)376 int udf_lookup_node_buf(struct udf_node *udf_node, uint32_t lblk, struct udf_buf **buf_p) {
377 #ifdef UDF_METADATA_LRU
378 	struct udf_buf_queue *lru_chain;
379 #endif
380 	struct udf_buf *buf;
381 	uint32_t hashkey, bucket;
382 
383 	assert(udf_node);
384 	assert(udf_bufcache->bufcache_lock.locked);
385 
386 	*buf_p = NULL;
387 
388 	hashkey = udf_calc_bufhash(udf_node, lblk);
389 	bucket  = hashkey & UDF_BUFCACHE_HASHMASK;
390 	LIST_FOREACH(buf, &udf_bufcache->udf_bufs[bucket], b_hash) {
391 		if ((buf->b_vp == udf_node) && (buf->b_lblk == lblk)) {
392 			*buf_p = buf;
393 
394 #ifdef UDF_METADATA_LRU
395 			lru_chain = &udf_bufcache->lru_bufs_data;
396 			if (udf_node->udf_filetype != UDF_ICB_FILETYPE_RANDOMACCESS) lru_chain = &udf_bufcache->lru_bufs_metadata;
397 
398 			TAILQ_REMOVE(lru_chain, buf, b_lru);
399 			TAILQ_INSERT_TAIL(lru_chain, buf, b_lru);
400 #endif
401 
402 			break; /* for each */
403 		}
404 	}
405 
406 	return 0;
407 }
408 
409 
udf_purger(void * arg)410 void *udf_purger(void *arg) {
411 	struct timespec wakeup;
412 	struct udf_buf *buf_entry, *marker;
413 	struct udf_node *udf_node;
414 
415 	arg = arg;	/* paramter not used */
416 	marker = calloc(1, sizeof(struct udf_buf));
417 	assert(marker);
418 
419 	UDF_VERBOSE(printf("\tbufcache thread initialising\n"));
420 	while (1) {
421 		DEBUG(printf("UDF bufcache sync thread: waiting for lock\n"));
422 		/*
423 		 * If we are not asked to finish up our writing, block to
424 		 * wait for more data. Signal the reader to continue just in
425 		 * case it is still stuck.
426 		 */
427 		if (!udf_bufcache->finish_purgethread) {
428 			do {
429 				/* determine the time we want to wake up again * */
430 				clock_gettime(CLOCK_REALTIME, &wakeup);
431 				wakeup.tv_sec += UDF_BUFCACHE_IDLE_SECS;
432 
433 				/* ask for more requests */
434 				pthread_cond_signal(&udf_bufcache->processed_signal);
435 				pthread_mutex_lock(&udf_bufcache->purgethread_lock);
436 				pthread_cond_timedwait(&udf_bufcache->purgethread_signal, &udf_bufcache->purgethread_lock, &wakeup);
437 				pthread_mutex_unlock(&udf_bufcache->purgethread_lock);
438 
439 				if (!udf_bufcache->purgethread_kicked) {
440 					/* UDF_VERBOSE_MAX(printf("\nUDF purger woke up due to timeout\n")); */
441 
442 					/* see if we would want to do something */
443 					if (udf_bufcache->flushall)	/* shouldn't happen */
444 						break;
445 					if (udf_bufcache->lru_len_data >= UDF_LRU_DATA_MIN)
446 						break;			/* we have something to do */
447 					if (udf_bufcache->lru_len_metadata >= UDF_LRU_METADATA_MIN)
448 						break;			/* we have something to do */
449 				} /* else : we have been explicitly asked to do something */
450 			} while (!udf_bufcache->purgethread_kicked && !udf_bufcache->finish_purgethread);
451 		}
452 		udf_bufcache->purgethread_kicked = 0;
453 
454 		DEBUG(printf("UDF read/write thread: got activate\n"));
455 
456 		UDF_MUTEX_LOCK(&udf_bufcache->bufcache_lock);
457 			/* writeout surplus of dirty buffers */
458 			/* PURGE surplus bufs if possible */
459 			if ((udf_bufcache->lru_len_data >= UDF_LRU_DATA_MIN) || udf_bufcache->flushall) {
460 				/* getting too many dirty data buffers */
461 				TAILQ_INSERT_HEAD(&udf_bufcache->lru_bufs_data, marker, b_lru);
462 				while ((buf_entry = TAILQ_NEXT(marker, b_lru))) {
463 					/* advance marker */
464 					TAILQ_REMOVE(&udf_bufcache->lru_bufs_data, marker, b_lru);
465 					TAILQ_INSERT_AFTER(&udf_bufcache->lru_bufs_data, buf_entry, marker, b_lru);
466 
467 					/* process buf_entry */
468 					if ((buf_entry->b_flags & B_DIRTY) != 0) {
469 						if ((udf_bufcache->lru_len_dirty_data >= UDF_READWRITE_LINE_LENGTH*2) || udf_bufcache->flushall) {
470 							UDF_MUTEX_UNLOCK(&udf_bufcache->bufcache_lock);
471 								/* signal there is time/space ahead and write out buffer */
472 								pthread_cond_signal(&udf_bufcache->processed_signal);
473 								udf_writeout_file_buffer(buf_entry->b_vp, "dirty data buf", UDF_C_USERDATA, buf_entry);
474 DEBUG(printf("."); fflush(stdout));
475 							UDF_MUTEX_LOCK(&udf_bufcache->bufcache_lock);
476 						}
477 					}
478 
479 					/* if there are too many, drop least used */
480 					if (udf_bufcache->lru_len_data > UDF_LRU_DATA_MIN) {
481 						TAILQ_FOREACH(buf_entry, &udf_bufcache->lru_bufs_data, b_lru) {
482 							/* process buf_entry */
483 							if ((buf_entry != marker) && ((buf_entry->b_flags & B_DIRTY) == 0)) {
484 								/* lock node bufs (locking protocol) */
485 								udf_node = buf_entry->b_vp;
486 								if (udf_node) {
487 									UDF_MUTEX_LOCK(&udf_node->buf_mutex);
488 										udf_detach_buf_from_node(udf_node, buf_entry);
489 									UDF_MUTEX_UNLOCK(&udf_node->buf_mutex);
490 								} else {
491 									printf("\n\nWARNING: got a NULL udf_node freeing dataspace\n\n");
492 								}
493 								udf_free_buf_entry(buf_entry);
494 
495 								/* signal there is time/space ahead */
496 								UDF_MUTEX_UNLOCK(&udf_bufcache->bufcache_lock);
497 									pthread_cond_signal(&udf_bufcache->processed_signal);
498 								UDF_MUTEX_LOCK(&udf_bufcache->bufcache_lock);
499 
500 								break; /* mandatory leaving FOREACH */
501 							}
502 							if (udf_bufcache->lru_len_data < UDF_LRU_DATA_MIN)
503 								break;	/* foreach */
504 						}
505 					}
506 				}
507 				TAILQ_REMOVE(&udf_bufcache->lru_bufs_data, marker, b_lru);
508 			}
509 			if ((udf_bufcache->lru_len_metadata >= UDF_LRU_METADATA_MIN) || udf_bufcache->flushall) {
510 				/* getting too many dirty metadata buffers */
511 				TAILQ_INSERT_HEAD(&udf_bufcache->lru_bufs_metadata, marker, b_lru);
512 				while ((buf_entry = TAILQ_NEXT(marker, b_lru))) {
513 					/* advance marker */
514 					TAILQ_REMOVE(&udf_bufcache->lru_bufs_metadata, marker, b_lru);
515 					TAILQ_INSERT_AFTER(&udf_bufcache->lru_bufs_metadata, buf_entry, marker, b_lru);
516 
517 					/* process buf_entry */
518 					if ((buf_entry->b_flags & B_DIRTY) != 0) {
519 						if ((udf_bufcache->lru_len_dirty_metadata >= UDF_READWRITE_LINE_LENGTH*2) || udf_bufcache->flushall) {
520 							UDF_MUTEX_UNLOCK(&udf_bufcache->bufcache_lock);
521 								/* signal there is time/space ahead and writeout buffer */
522 								pthread_cond_signal(&udf_bufcache->processed_signal);
523 								udf_writeout_file_buffer(buf_entry->b_vp, "dirty metadata buf", UDF_C_FIDS, buf_entry);
524 DEBUG(printf("+"); fflush(stdout));
525 							UDF_MUTEX_LOCK(&udf_bufcache->bufcache_lock);
526 						}
527 					}
528 
529 					/* if there are too many, drop least used */
530 					if (udf_bufcache->lru_len_metadata > UDF_LRU_METADATA_MIN) {
531 						TAILQ_FOREACH(buf_entry, &udf_bufcache->lru_bufs_metadata, b_lru) {
532 							/* process buf_entry */
533 							if ((buf_entry != marker) && ((buf_entry->b_flags & B_DIRTY)) == 0) {
534 								/* lock node bufs (locking protocol); don't drop metadata from `held' nodes */
535 								udf_node = buf_entry->b_vp;
536 								if (udf_node && ((!udf_node->hold) || udf_bufcache->flushall)) {
537 									UDF_MUTEX_LOCK(&udf_node->buf_mutex);
538 										udf_detach_buf_from_node(udf_node, buf_entry);
539 									UDF_MUTEX_UNLOCK(&udf_node->buf_mutex);
540 									udf_free_buf_entry(buf_entry);
541 
542 									/* signal there is time/space ahead */
543 									UDF_MUTEX_UNLOCK(&udf_bufcache->bufcache_lock);
544 										pthread_cond_signal(&udf_bufcache->processed_signal);
545 									UDF_MUTEX_LOCK(&udf_bufcache->bufcache_lock);
546 
547 									break; /* mandatory leaving FOREACH */
548 								} else {
549 									if (!udf_node) {
550 										printf("\n\nWARNING: got a NULL udf_node freeing METAspace\n\n");
551 									}
552 #ifdef UDF_METADATA_LRU
553 									TAILQ_REMOVE(&udf_bufcache->lru_bufs_metadata, buf_entry, b_lru);
554 									TAILQ_INSERT_TAIL(&udf_bufcache->lru_bufs_metadata, buf_entry, b_lru);
555 #endif
556 								}
557 							}
558 						}
559 					}
560 				}
561 				TAILQ_REMOVE(&udf_bufcache->lru_bufs_metadata, marker, b_lru);
562 			}
563 			/* can only be used once */
564 			udf_bufcache->flushall = 0;
565 
566 		UDF_MUTEX_UNLOCK(&udf_bufcache->bufcache_lock);
567 		/* PURGE nodes?	(or only on request?)	*/
568 
569 		/* if asked to quit break out of the loop */
570 		if (udf_bufcache->finish_purgethread) break;
571 	}
572 
573 	UDF_VERBOSE(printf("\tbufcache thread joining\n"));
574 	pthread_exit(0);	/* join */
575 
576 	/* not reached */
577 	return NULL;
578 }
579 
580 
udf_start_unix_thread(void)581 int udf_start_unix_thread(void) {
582 	/*
583 	 * start up bufcache purger thread and crudely kick it into
584 	 * existence.
585 	 */
586 
587 	if (udf_bufcache->thread_active) {
588 		fprintf(stderr,"\tlogvol bufcache thread asked to start again; ignoring\n");
589 		return 0;
590 	}
591 
592 	DEBUG(printf("\tstarting logvol bufcache thread\n"));
593 
594 	udf_bufcache->thread_active = 1;
595 	pthread_create(&udf_bufcache->purgethread_id, NULL, udf_purger, NULL);
596 	sleep(1);
597 
598 	DEBUG(printf("\n\n"));
599 	return 0;
600 }
601 
602 
udf_stop_unix_thread(void)603 int udf_stop_unix_thread(void) {
604 	/* stop all associated threads */
605 	UDF_VERBOSE(printf("\tstopping bufcache thread\n"));
606 
607 	if (udf_bufcache->thread_active) {
608 		udf_bufcache->purgethread_kicked = 1;
609 		udf_bufcache->finish_purgethread = 1;
610 		pthread_cond_signal(&udf_bufcache->purgethread_signal);
611 		pthread_join(udf_bufcache->purgethread_id, NULL);		/* wait for join */
612 	}
613 
614 	udf_bufcache->thread_active = 0;
615 	return 0;
616 }
617 
618 
udf_purgethread_kick(char * why)619 int udf_purgethread_kick(char *why) {
620 	/*
621 	 * Kick the cache purger into existence in case its not active and wait
622 	 * for it to signal there is space left.
623 	 */
624 
625 	DEBUG(printf("\npurgethread kick! because of %s\n", why));
626 	udf_bufcache->purgethread_kicked = 1;
627 	pthread_cond_signal(&udf_bufcache->purgethread_signal);
628 
629 	return 0;
630 }
631 
632 /* end of udf_unix.c */
633 
634