xref: /freebsd/usr.sbin/rpc.lockd/lockd_lock.c (revision 75e40e46)
1 /*	$NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $	*/
2 /*	$FreeBSD$ */
3 
4 /*
5  * Copyright (c) 2001 Andrew P. Lentvorski, Jr.
6  * Copyright (c) 2000 Manuel Bouyer.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  */
37 
38 #define LOCKD_DEBUG
39 
40 #include <stdio.h>
41 #ifdef LOCKD_DEBUG
42 #include <stdarg.h>
43 #endif
44 #include <stdlib.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <syslog.h>
48 #include <errno.h>
49 #include <string.h>
50 #include <signal.h>
51 #include <rpc/rpc.h>
52 #include <sys/types.h>
53 #include <sys/stat.h>
54 #include <sys/socket.h>
55 #include <sys/param.h>
56 #include <sys/mount.h>
57 #include <sys/wait.h>
58 #include <rpcsvc/sm_inter.h>
59 #include <rpcsvc/nlm_prot.h>
60 #include "lockd_lock.h"
61 #include "lockd.h"
62 
63 #define MAXOBJECTSIZE 64
64 #define MAXBUFFERSIZE 1024
65 
66 /*
67  * SM_MAXSTRLEN is usually 1024.  This means that lock requests and
68  * host name monitoring entries are *MUCH* larger than they should be
69  */
70 
71 /*
72  * A set of utilities for managing file locking
73  *
74  * XXX: All locks are in a linked list, a better structure should be used
75  * to improve search/access effeciency.
76  */
77 
78 /* struct describing a lock */
79 struct file_lock {
80 	LIST_ENTRY(file_lock) nfslocklist;
81 	fhandle_t filehandle; /* NFS filehandle */
82 	struct sockaddr *addr;
83 	struct nlm4_holder client; /* lock holder */
84 	/* XXX: client_cookie used *only* in send_granted */
85 	netobj client_cookie; /* cookie sent by the client */
86 	char client_name[SM_MAXSTRLEN];
87 	int nsm_status; /* status from the remote lock manager */
88 	int status; /* lock status, see below */
89 	int flags; /* lock flags, see lockd_lock.h */
90 	int blocking; /* blocking lock or not */
91 	pid_t locker; /* pid of the child process trying to get the lock */
92 	int fd;	/* file descriptor for this lock */
93 };
94 
95 LIST_HEAD(nfslocklist_head, file_lock);
96 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head);
97 
98 LIST_HEAD(blockedlocklist_head, file_lock);
99 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head);
100 
101 /* lock status */
102 #define LKST_LOCKED	1 /* lock is locked */
103 /* XXX: Is this flag file specific or lock specific? */
104 #define LKST_WAITING	2 /* file is already locked by another host */
105 #define LKST_PROCESSING	3 /* child is trying to aquire the lock */
106 #define LKST_DYING	4 /* must dies when we get news from the child */
107 
108 /* struct describing a monitored host */
109 struct host {
110 	LIST_ENTRY(host) hostlst;
111 	char name[SM_MAXSTRLEN];
112 	int refcnt;
113 };
114 /* list of hosts we monitor */
115 LIST_HEAD(hostlst_head, host);
116 struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head);
117 
118 /*
119  * File monitoring handlers
120  * XXX: These might be able to be removed when kevent support
121  * is placed into the hardware lock/unlock routines.  (ie.
122  * let the kernel do all the file monitoring)
123  */
124 
125 /* Struct describing a monitored file */
126 struct monfile {
127 	LIST_ENTRY(monfile) monfilelist;
128 	fhandle_t filehandle; /* Local access filehandle */
129 	int fd; /* file descriptor: remains open until unlock! */
130 	int refcount;
131 	int exclusive;
132 };
133 
134 /* List of files we monitor */
135 LIST_HEAD(monfilelist_head, monfile);
136 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head);
137 
138 static int debugdelay = 0;
139 
140 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE,
141 		      NFS_DENIED, NFS_DENIED_NOLOCK,
142 		      NFS_RESERR };
143 
144 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE,
145 		     HW_DENIED, HW_DENIED_NOLOCK,
146 		     HW_STALEFH, HW_READONLY, HW_RESERR };
147 
148 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED,
149 			      PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR,
150 			      PFL_HWDENIED,  PFL_HWBLOCKED,  PFL_HWDENIED_NOLOCK, PFL_HWRESERR};
151 
152 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT};
153 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT};
154 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM!  SPLIT IT APART INTO TWO */
155 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8};
156 
157 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl);
158 
159 void send_granted(struct file_lock *fl, int opcode);
160 void siglock(void);
161 void sigunlock(void);
162 void monitor_lock_host(const char *hostname);
163 void unmonitor_lock_host(char *hostname);
164 
165 void	copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src,
166     const bool_t exclusive, struct nlm4_holder *dest);
167 struct file_lock *	allocate_file_lock(const netobj *lockowner,
168     const netobj *matchcookie);
169 void	deallocate_file_lock(struct file_lock *fl);
170 void	fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
171     struct sockaddr *addr, const bool_t exclusive, const int32_t svid,
172     const u_int64_t offset, const u_int64_t len, const char *caller_name,
173     const int state, const int status, const int flags, const int blocking);
174 int	regions_overlap(const u_int64_t start1, const u_int64_t len1,
175     const u_int64_t start2, const u_int64_t len2);;
176 enum split_status  region_compare(const u_int64_t starte, const u_int64_t lene,
177     const u_int64_t startu, const u_int64_t lenu,
178     u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2);
179 int	same_netobj(const netobj *n0, const netobj *n1);
180 int	same_filelock_identity(const struct file_lock *fl0,
181     const struct file_lock *fl2);
182 
183 static void debuglog(char const *fmt, ...);
184 void dump_static_object(const unsigned char* object, const int sizeof_object,
185                         unsigned char* hbuff, const int sizeof_hbuff,
186                         unsigned char* cbuff, const int sizeof_cbuff);
187 void dump_netobj(const struct netobj *nobj);
188 void dump_filelock(const struct file_lock *fl);
189 struct file_lock *	get_lock_matching_unlock(const struct file_lock *fl);
190 enum nfslock_status	test_nfslock(const struct file_lock *fl,
191     struct file_lock **conflicting_fl);
192 enum nfslock_status	lock_nfslock(struct file_lock *fl);
193 enum nfslock_status	delete_nfslock(struct file_lock *fl);
194 enum nfslock_status	unlock_nfslock(const struct file_lock *fl,
195     struct file_lock **released_lock, struct file_lock **left_lock,
196     struct file_lock **right_lock);
197 enum hwlock_status lock_hwlock(struct file_lock *fl);
198 enum split_status split_nfslock(const struct file_lock *exist_lock,
199     const struct file_lock *unlock_lock, struct file_lock **left_lock,
200     struct file_lock **right_lock);
201 void	add_blockingfilelock(struct file_lock *fl);
202 enum hwlock_status	unlock_hwlock(const struct file_lock *fl);
203 enum hwlock_status	test_hwlock(const struct file_lock *fl,
204     struct file_lock **conflicting_fl);
205 void	remove_blockingfilelock(struct file_lock *fl);
206 void	clear_blockingfilelock(const char *hostname);
207 void	retry_blockingfilelocklist(void);
208 enum partialfilelock_status	unlock_partialfilelock(
209     const struct file_lock *fl);
210 void	clear_partialfilelock(const char *hostname);
211 enum partialfilelock_status	test_partialfilelock(
212     const struct file_lock *fl, struct file_lock **conflicting_fl);
213 enum nlm_stats	do_test(struct file_lock *fl,
214     struct file_lock **conflicting_fl);
215 enum nlm_stats	do_unlock(struct file_lock *fl);
216 enum nlm_stats	do_lock(struct file_lock *fl);
217 void	do_clear(const char *hostname);
218 
219 
220 void
221 debuglog(char const *fmt, ...)
222 {
223 	va_list ap;
224 
225 	if (debug_level < 1) {
226 		return;
227 	}
228 
229 	sleep(debugdelay);
230 
231 	va_start(ap, fmt);
232 	vsyslog(LOG_DEBUG, fmt, ap);
233 	va_end(ap);
234 }
235 
236 void
237 dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff)
238 	const unsigned char *object;
239 	const int size_object;
240 	unsigned char *hbuff;
241 	const int size_hbuff;
242 	unsigned char *cbuff;
243 	const int size_cbuff;
244 {
245 	int i, objectsize;
246 
247 	if (debug_level < 2) {
248 		return;
249 	}
250 
251 	objectsize = size_object;
252 
253 	if (objectsize == 0) {
254 		debuglog("object is size 0\n");
255 	} else {
256 		if (objectsize > MAXOBJECTSIZE) {
257 			debuglog("Object of size %d being clamped"
258 			    "to size %d\n", objectsize, MAXOBJECTSIZE);
259 			objectsize = MAXOBJECTSIZE;
260 		}
261 
262 		if (hbuff != NULL) {
263 			if (size_hbuff < objectsize*2+1) {
264 				debuglog("Hbuff not large enough."
265 				    "  Increase size\n");
266 			} else {
267 				for(i=0;i<objectsize;i++) {
268 					sprintf(hbuff+i*2,"%02x",*(object+i));
269 				}
270 				*(hbuff+i*2) = '\0';
271 			}
272 		}
273 
274 		if (cbuff != NULL) {
275 			if (size_cbuff < objectsize+1) {
276 				debuglog("Cbuff not large enough."
277 				    "  Increase Size\n");
278 			}
279 
280 			for(i=0;i<objectsize;i++) {
281 				if (*(object+i) >= 32 && *(object+i) <= 127) {
282 					*(cbuff+i) = *(object+i);
283 				} else {
284 					*(cbuff+i) = '.';
285 				}
286 			}
287 			*(cbuff+i) = '\0';
288 		}
289 	}
290 }
291 
292 void
293 dump_netobj(const struct netobj *nobj)
294 {
295 	char hbuff[MAXBUFFERSIZE*2];
296 	char cbuff[MAXBUFFERSIZE];
297 
298 	if (debug_level < 2) {
299 		return;
300 	}
301 
302 	if (nobj == NULL) {
303 		debuglog("Null netobj pointer\n");
304 	}
305 	else if (nobj->n_len == 0) {
306 		debuglog("Size zero netobj\n");
307 	} else {
308 		dump_static_object(nobj->n_bytes, nobj->n_len,
309 		    hbuff, sizeof(hbuff), cbuff, sizeof(cbuff));
310 		debuglog("netobj: len: %d  data: %s :::  %s\n",
311 		    nobj->n_len, hbuff, cbuff);
312 	}
313 }
314 
315 /* #define DUMP_FILELOCK_VERBOSE */
316 void
317 dump_filelock(const struct file_lock *fl)
318 {
319 #ifdef DUMP_FILELOCK_VERBOSE
320 	char hbuff[MAXBUFFERSIZE*2];
321 	char cbuff[MAXBUFFERSIZE];
322 #endif
323 
324 	if (debug_level < 2) {
325 		return;
326 	}
327 
328 	if (fl != NULL) {
329 		debuglog("Dumping file lock structure @ %p\n", fl);
330 
331 #ifdef DUMP_FILELOCK_VERBOSE
332 		dump_static_object((unsigned char *)&fl->filehandle,
333 		    sizeof(fl->filehandle), hbuff, sizeof(hbuff),
334 		    cbuff, sizeof(cbuff));
335 		debuglog("Filehandle: %8s  :::  %8s\n", hbuff, cbuff);
336 #endif
337 
338 		debuglog("Dumping nlm4_holder:\n"
339 		    "exc: %x  svid: %x  offset:len %llx:%llx\n",
340 		    fl->client.exclusive, fl->client.svid,
341 		    fl->client.l_offset, fl->client.l_len);
342 
343 #ifdef DUMP_FILELOCK_VERBOSE
344 		debuglog("Dumping client identity:\n");
345 		dump_netobj(&fl->client.oh);
346 
347 		debuglog("Dumping client cookie:\n");
348 		dump_netobj(&fl->client_cookie);
349 
350 		debuglog("nsm: %d  status: %d  flags: %d  locker: %d"
351 		    "  fd:  %d\n", fl->nsm_status, fl->status,
352 		    fl->flags, fl->locker, fl->fd);
353 #endif
354 	} else {
355 		debuglog("NULL file lock structure\n");
356 	}
357 }
358 
359 void
360 copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest)
361 	const struct nlm4_lock *src;
362 	const bool_t exclusive;
363 	struct nlm4_holder *dest;
364 {
365 
366 	dest->exclusive = exclusive;
367 	dest->oh.n_len = src->oh.n_len;
368 	dest->oh.n_bytes = src->oh.n_bytes;
369 	dest->svid = src->svid;
370 	dest->l_offset = src->l_offset;
371 	dest->l_len = src->l_len;
372 }
373 
374 
375 /*
376  * allocate_file_lock: Create a lock with the given parameters
377  */
378 
379 struct file_lock *
380 allocate_file_lock(const netobj *lockowner, const netobj *matchcookie)
381 {
382 	struct file_lock *newfl;
383 
384 	newfl = malloc(sizeof(struct file_lock));
385 	if (newfl == NULL) {
386 		return NULL;
387 	}
388 	bzero(newfl, sizeof(newfl));
389 
390 	newfl->client.oh.n_bytes = malloc(lockowner->n_len);
391 	if (newfl->client.oh.n_bytes == NULL) {
392 		free(newfl);
393 		return NULL;
394 	}
395 	newfl->client.oh.n_len = lockowner->n_len;
396 	bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len);
397 
398 	newfl->client_cookie.n_bytes = malloc(matchcookie->n_len);
399 	if (newfl->client_cookie.n_bytes == NULL) {
400 		free(newfl->client.oh.n_bytes);
401 		free(newfl);
402 		return NULL;
403 	}
404 	newfl->client_cookie.n_len = matchcookie->n_len;
405 	bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len);
406 
407 	return newfl;
408 }
409 
410 /*
411  * file_file_lock: Force creation of a valid file lock
412  */
413 void
414 fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
415     struct sockaddr *addr, const bool_t exclusive, const int32_t svid,
416     const u_int64_t offset, const u_int64_t len, const char *caller_name,
417     const int state, const int status, const int flags, const int blocking)
418 {
419 	bcopy(fh, &fl->filehandle, sizeof(fhandle_t));
420 	fl->addr = addr;
421 
422 	fl->client.exclusive = exclusive;
423 	fl->client.svid = svid;
424 	fl->client.l_offset = offset;
425 	fl->client.l_len = len;
426 
427 	strncpy(fl->client_name, caller_name, SM_MAXSTRLEN);
428 
429 	fl->nsm_status = state;
430 	fl->status = status;
431 	fl->flags = flags;
432 	fl->blocking = blocking;
433 }
434 
435 /*
436  * deallocate_file_lock: Free all storage associated with a file lock
437  */
438 void
439 deallocate_file_lock(struct file_lock *fl)
440 {
441 	free(fl->client.oh.n_bytes);
442 	free(fl->client_cookie.n_bytes);
443 	free(fl);
444 }
445 
446 /*
447  * regions_overlap(): This function examines the two provided regions for
448  * overlap.
449  */
450 int
451 regions_overlap(start1, len1, start2, len2)
452 	const u_int64_t start1, len1, start2, len2;
453 {
454 	u_int64_t d1,d2,d3,d4;
455 	enum split_status result;
456 
457 	debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n",
458 		 start1, len1, start2, len2);
459 
460 	result = region_compare(start1, len1, start2, len2,
461 	    &d1, &d2, &d3, &d4);
462 
463 	debuglog("Exiting region overlap with val: %d\n",result);
464 
465 	if (result == SPL_DISJOINT) {
466 		return 0;
467 	} else {
468 		return 1;
469 	}
470 
471 	return (result);
472 }
473 
474 /*
475  * region_compare(): Examine lock regions and split appropriately
476  *
477  * XXX: Fix 64 bit overflow problems
478  * XXX: Check to make sure I got *ALL* the cases.
479  * XXX: This DESPERATELY needs a regression test.
480  */
481 enum split_status
482 region_compare(starte, lene, startu, lenu,
483     start1, len1, start2, len2)
484 	const u_int64_t starte, lene, startu, lenu;
485 	u_int64_t *start1, *len1, *start2, *len2;
486 {
487 	/*
488 	 * Please pay attention to the sequential exclusions
489 	 * of the if statements!!!
490 	 */
491 	enum LFLAGS lflags;
492 	enum RFLAGS rflags;
493 	enum split_status retval;
494 
495 	retval = SPL_DISJOINT;
496 
497 	if (lene == 0 && lenu == 0) {
498 		/* Examine left edge of locker */
499 		if (startu < starte) {
500 			lflags = LEDGE_LEFT;
501 		} else if (startu == starte) {
502 			lflags = LEDGE_LBOUNDARY;
503 		} else {
504 			lflags = LEDGE_INSIDE;
505 		}
506 
507 		rflags = REDGE_RBOUNDARY; /* Both are infiinite */
508 
509 		if (lflags == LEDGE_INSIDE) {
510 			*start1 = starte;
511 			*len1 = startu - starte;
512 		}
513 
514 		if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) {
515 			retval = SPL_CONTAINED;
516 		} else {
517 			retval = SPL_LOCK1;
518 		}
519 	} else if (lene == 0 && lenu != 0) {
520 		/* Established lock is infinite */
521 		/* Examine left edge of unlocker */
522 		if (startu < starte) {
523 			lflags = LEDGE_LEFT;
524 		} else if (startu == starte) {
525 			lflags = LEDGE_LBOUNDARY;
526 		} else if (startu > starte) {
527 			lflags = LEDGE_INSIDE;
528 		}
529 
530 		/* Examine right edge of unlocker */
531 		if (startu + lenu < starte) {
532 			/* Right edge of unlocker left of established lock */
533 			rflags = REDGE_LEFT;
534 			return SPL_DISJOINT;
535 		} else if (startu + lenu == starte) {
536 			/* Right edge of unlocker on start of established lock */
537 			rflags = REDGE_LBOUNDARY;
538 			return SPL_DISJOINT;
539 		} else { /* Infinifty is right of finity */
540 			/* Right edge of unlocker inside established lock */
541 			rflags = REDGE_INSIDE;
542 		}
543 
544 		if (lflags == LEDGE_INSIDE) {
545 			*start1 = starte;
546 			*len1 = startu - starte;
547 			retval |= SPL_LOCK1;
548 		}
549 
550 		if (rflags == REDGE_INSIDE) {
551 			/* Create right lock */
552 			*start2 = startu+lenu;
553 			*len2 = 0;
554 			retval |= SPL_LOCK2;
555 		}
556 	} else if (lene != 0 && lenu == 0) {
557 		/* Unlocker is infinite */
558 		/* Examine left edge of unlocker */
559 		if (startu < starte) {
560 			lflags = LEDGE_LEFT;
561 			retval = SPL_CONTAINED;
562 			return retval;
563 		} else if (startu == starte) {
564 			lflags = LEDGE_LBOUNDARY;
565 			retval = SPL_CONTAINED;
566 			return retval;
567 		} else if ((startu > starte) && (startu < starte + lene - 1)) {
568 			lflags = LEDGE_INSIDE;
569 		} else if (startu == starte + lene - 1) {
570 			lflags = LEDGE_RBOUNDARY;
571 		} else { /* startu > starte + lene -1 */
572 			lflags = LEDGE_RIGHT;
573 			return SPL_DISJOINT;
574 		}
575 
576 		rflags = REDGE_RIGHT; /* Infinity is right of finity */
577 
578 		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
579 			*start1 = starte;
580 			*len1 = startu - starte;
581 			retval |= SPL_LOCK1;
582 			return retval;
583 		}
584 
585 	} else {
586 		/* Both locks are finite */
587 
588 		/* Examine left edge of unlocker */
589 		if (startu < starte) {
590 			lflags = LEDGE_LEFT;
591 		} else if (startu == starte) {
592 			lflags = LEDGE_LBOUNDARY;
593 		} else if ((startu > starte) && (startu < starte + lene - 1)) {
594 			lflags = LEDGE_INSIDE;
595 		} else if (startu == starte + lene - 1) {
596 			lflags = LEDGE_RBOUNDARY;
597 		} else { /* startu > starte + lene -1 */
598 			lflags = LEDGE_RIGHT;
599 			return SPL_DISJOINT;
600 		}
601 
602 		/* Examine right edge of unlocker */
603 		if (startu + lenu < starte) {
604 			/* Right edge of unlocker left of established lock */
605 			rflags = REDGE_LEFT;
606 			return SPL_DISJOINT;
607 		} else if (startu + lenu == starte) {
608 			/* Right edge of unlocker on start of established lock */
609 			rflags = REDGE_LBOUNDARY;
610 			return SPL_DISJOINT;
611 		} else if (startu + lenu < starte + lene) {
612 			/* Right edge of unlocker inside established lock */
613 			rflags = REDGE_INSIDE;
614 		} else if (startu + lenu == starte + lene) {
615 			/* Right edge of unlocker on right edge of established lock */
616 			rflags = REDGE_RBOUNDARY;
617 		} else { /* startu + lenu > starte + lene */
618 			/* Right edge of unlocker is right of established lock */
619 			rflags = REDGE_RIGHT;
620 		}
621 
622 		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
623 			/* Create left lock */
624 			*start1 = starte;
625 			*len1 = (startu - starte);
626 			retval |= SPL_LOCK1;
627 		}
628 
629 		if (rflags == REDGE_INSIDE) {
630 			/* Create right lock */
631 			*start2 = startu+lenu;
632 			*len2 = starte+lene-(startu+lenu);
633 			retval |= SPL_LOCK2;
634 		}
635 
636 		if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) &&
637 		    (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) {
638 			retval = SPL_CONTAINED;
639 		}
640 	}
641 
642 	return retval;
643 }
644 
645 /*
646  * same_netobj: Compares the apprpriate bits of a netobj for identity
647  */
648 int
649 same_netobj(const netobj *n0, const netobj *n1)
650 {
651 	int retval;
652 
653 	retval = 0;
654 
655 	debuglog("Entering netobj identity check\n");
656 
657 	if (n0->n_len == n1->n_len) {
658 		debuglog("Preliminary length check passed\n");
659 		retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len);
660 		debuglog("netobj %smatch\n", retval ? "" : "mis");
661 	}
662 
663 	return (retval);
664 }
665 
666 /*
667  * same_filelock_identity: Compares the appropriate bits of a file_lock
668  */
669 int
670 same_filelock_identity(fl0, fl1)
671 	const struct file_lock *fl0, *fl1;
672 {
673 	int retval;
674 
675 	retval = 0;
676 
677 	debuglog("Checking filelock identity\n");
678 
679 	/*
680 	 * Check process ids and host information.
681 	 */
682 	retval = (fl0->client.svid == fl1->client.svid &&
683 	    same_netobj(&(fl0->client.oh), &(fl1->client.oh)));
684 
685 	debuglog("Exiting checking filelock identity: retval: %d\n",retval);
686 
687 	return (retval);
688 }
689 
690 /*
691  * Below here are routines associated with manipulating the NFS
692  * lock list.
693  */
694 
695 /*
696  * get_lock_matching_unlock: Return a lock which matches the given unlock lock
697  *                           or NULL otehrwise
698  * XXX: It is a shame that this duplicates so much code from test_nfslock.
699  */
700 struct file_lock *
701 get_lock_matching_unlock(const struct file_lock *fl)
702 {
703 	struct file_lock *ifl; /* Iterator */
704 
705 	debuglog("Entering lock_matching_unlock\n");
706 	debuglog("********Dump of fl*****************\n");
707 	dump_filelock(fl);
708 
709 	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
710 		debuglog("Pointer to file lock: %p\n",ifl);
711 
712 		debuglog("****Dump of ifl****\n");
713 		dump_filelock(ifl);
714 		debuglog("*******************\n");
715 
716 		/*
717 		 * XXX: It is conceivable that someone could use the NLM RPC
718 		 * system to directly access filehandles.  This may be a
719 		 * security hazard as the filehandle code may bypass normal
720 		 * file access controls
721 		 */
722 		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
723 			continue;
724 
725 		debuglog("matching_unlock: Filehandles match, "
726 		    "checking regions\n");
727 
728 		/* Filehandles match, check for region overlap */
729 		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
730 			ifl->client.l_offset, ifl->client.l_len))
731 			continue;
732 
733 		debuglog("matching_unlock: Region overlap"
734 		    " found %llu : %llu -- %llu : %llu\n",
735 		    fl->client.l_offset,fl->client.l_len,
736 		    ifl->client.l_offset,ifl->client.l_len);
737 
738 		/* Regions overlap, check the identity */
739 		if (!same_filelock_identity(fl,ifl))
740 			continue;
741 
742 		debuglog("matching_unlock: Duplicate lock id.  Granting\n");
743 		return (ifl);
744 	}
745 
746 	debuglog("Exiting lock_matching_unlock\n");
747 
748 	return (NULL);
749 }
750 
751 /*
752  * test_nfslock: check for NFS lock in lock list
753  *
754  * This routine makes the following assumptions:
755  *    1) Nothing will adjust the lock list during a lookup
756  *
757  * This routine has an intersting quirk which bit me hard.
758  * The conflicting_fl is the pointer to the conflicting lock.
759  * However, to modify the "*pointer* to the conflicting lock" rather
760  * that the "conflicting lock itself" one must pass in a "pointer to
761  * the pointer of the conflicting lock".  Gross.
762  */
763 
764 enum nfslock_status
765 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl)
766 {
767 	struct file_lock *ifl; /* Iterator */
768 	enum nfslock_status retval;
769 
770 	debuglog("Entering test_nfslock\n");
771 
772 	retval = NFS_GRANTED;
773 	(*conflicting_fl) = NULL;
774 
775 	debuglog("Entering lock search loop\n");
776 
777 	debuglog("***********************************\n");
778 	debuglog("Dumping match filelock\n");
779 	debuglog("***********************************\n");
780 	dump_filelock(fl);
781 	debuglog("***********************************\n");
782 
783 	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
784 		if (retval == NFS_DENIED)
785 			break;
786 
787 		debuglog("Top of lock loop\n");
788 		debuglog("Pointer to file lock: %p\n",ifl);
789 
790 		debuglog("***********************************\n");
791 		debuglog("Dumping test filelock\n");
792 		debuglog("***********************************\n");
793 		dump_filelock(ifl);
794 		debuglog("***********************************\n");
795 
796 		/*
797 		 * XXX: It is conceivable that someone could use the NLM RPC
798 		 * system to directly access filehandles.  This may be a
799 		 * security hazard as the filehandle code may bypass normal
800 		 * file access controls
801 		 */
802 		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
803 			continue;
804 
805 		debuglog("test_nfslock: filehandle match found\n");
806 
807 		/* Filehandles match, check for region overlap */
808 		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
809 			ifl->client.l_offset, ifl->client.l_len))
810 			continue;
811 
812 		debuglog("test_nfslock: Region overlap found"
813 		    " %llu : %llu -- %llu : %llu\n",
814 		    fl->client.l_offset,fl->client.l_len,
815 		    ifl->client.l_offset,ifl->client.l_len);
816 
817 		/* Regions overlap, check the exclusivity */
818 		if (!(fl->client.exclusive || ifl->client.exclusive))
819 			continue;
820 
821 		debuglog("test_nfslock: Exclusivity failure: %d %d\n",
822 		    fl->client.exclusive,
823 		    ifl->client.exclusive);
824 
825 		if (same_filelock_identity(fl,ifl)) {
826 			debuglog("test_nfslock: Duplicate id.  Granting\n");
827 			(*conflicting_fl) = ifl;
828 			retval = NFS_GRANTED_DUPLICATE;
829 		} else {
830 			/* locking attempt fails */
831 			debuglog("test_nfslock: Lock attempt failed\n");
832 			debuglog("Desired lock\n");
833 			dump_filelock(fl);
834 			debuglog("Conflicting lock\n");
835 			dump_filelock(ifl);
836 			(*conflicting_fl) = ifl;
837 			retval = NFS_DENIED;
838 		}
839 	}
840 
841 	debuglog("Dumping file locks\n");
842 	debuglog("Exiting test_nfslock\n");
843 
844 	return (retval);
845 }
846 
847 /*
848  * lock_nfslock: attempt to create a lock in the NFS lock list
849  *
850  * This routine tests whether the lock will be granted and then adds
851  * the entry to the lock list if so.
852  *
853  * Argument fl gets modified as its list housekeeping entries get modified
854  * upon insertion into the NFS lock list
855  *
856  * This routine makes several assumptions:
857  *    1) It is perfectly happy to grant a duplicate lock from the same pid.
858  *       While this seems to be intuitively wrong, it is required for proper
859  *       Posix semantics during unlock.  It is absolutely imperative to not
860  *       unlock the main lock before the two child locks are established. Thus,
861  *       one has be be able to create duplicate locks over an existing lock
862  *    2) It currently accepts duplicate locks from the same id,pid
863  */
864 
865 enum nfslock_status
866 lock_nfslock(struct file_lock *fl)
867 {
868 	enum nfslock_status retval;
869 	struct file_lock *dummy_fl;
870 
871 	dummy_fl = NULL;
872 
873 	debuglog("Entering lock_nfslock...\n");
874 
875 	retval = test_nfslock(fl,&dummy_fl);
876 
877 	if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) {
878 		debuglog("Inserting lock...\n");
879 		dump_filelock(fl);
880 		LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist);
881 	}
882 
883 	debuglog("Exiting lock_nfslock...\n");
884 
885 	return (retval);
886 }
887 
888 /*
889  * delete_nfslock: delete an NFS lock list entry
890  *
891  * This routine is used to delete a lock out of the NFS lock list
892  * without regard to status, underlying locks, regions or anything else
893  *
894  * Note that this routine *does not deallocate memory* of the lock.
895  * It just disconnects it from the list.  The lock can then be used
896  * by other routines without fear of trashing the list.
897  */
898 
899 enum nfslock_status
900 delete_nfslock(struct file_lock *fl)
901 {
902 
903 	LIST_REMOVE(fl, nfslocklist);
904 
905 	return (NFS_GRANTED);
906 }
907 
908 enum split_status
909 split_nfslock(exist_lock, unlock_lock, left_lock, right_lock)
910 	const struct file_lock *exist_lock, *unlock_lock;
911 	struct file_lock **left_lock, **right_lock;
912 {
913 	u_int64_t start1, len1, start2, len2;
914 	enum split_status spstatus;
915 
916 	spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len,
917 	    unlock_lock->client.l_offset, unlock_lock->client.l_len,
918 	    &start1, &len1, &start2, &len2);
919 
920 	if ((spstatus & SPL_LOCK1) != 0) {
921 		*left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie);
922 		if (*left_lock == NULL) {
923 			debuglog("Unable to allocate resource for split 1\n");
924 			return SPL_RESERR;
925 		}
926 
927 		fill_file_lock(*left_lock, &exist_lock->filehandle,
928 		    exist_lock->addr,
929 		    exist_lock->client.exclusive, exist_lock->client.svid,
930 		    start1, len1,
931 		    exist_lock->client_name, exist_lock->nsm_status,
932 		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
933 	}
934 
935 	if ((spstatus & SPL_LOCK2) != 0) {
936 		*right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie);
937 		if (*right_lock == NULL) {
938 			debuglog("Unable to allocate resource for split 1\n");
939 			if (*left_lock != NULL) {
940 				deallocate_file_lock(*left_lock);
941 			}
942 			return SPL_RESERR;
943 		}
944 
945 		fill_file_lock(*right_lock, &exist_lock->filehandle,
946 		    exist_lock->addr,
947 		    exist_lock->client.exclusive, exist_lock->client.svid,
948 		    start2, len2,
949 		    exist_lock->client_name, exist_lock->nsm_status,
950 		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
951 	}
952 
953 	return spstatus;
954 }
955 
956 enum nfslock_status
957 unlock_nfslock(fl, released_lock, left_lock, right_lock)
958 	const struct file_lock *fl;
959 	struct file_lock **released_lock;
960 	struct file_lock **left_lock;
961 	struct file_lock **right_lock;
962 {
963 	struct file_lock *mfl; /* Matching file lock */
964 	enum nfslock_status retval;
965 	enum split_status spstatus;
966 
967 	debuglog("Entering unlock_nfslock\n");
968 
969 	*released_lock = NULL;
970 	*left_lock = NULL;
971 	*right_lock = NULL;
972 
973 	retval = NFS_DENIED_NOLOCK;
974 
975 	printf("Attempting to match lock...\n");
976 	mfl = get_lock_matching_unlock(fl);
977 
978 	if (mfl != NULL) {
979 		debuglog("Unlock matched.  Querying for split\n");
980 
981 		spstatus = split_nfslock(mfl, fl, left_lock, right_lock);
982 
983 		debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock);
984 		debuglog("********Split dumps********");
985 		dump_filelock(mfl);
986 		dump_filelock(fl);
987 		dump_filelock(*left_lock);
988 		dump_filelock(*right_lock);
989 		debuglog("********End Split dumps********");
990 
991 		if (spstatus == SPL_RESERR) {
992 			if (*left_lock != NULL) {
993 				deallocate_file_lock(*left_lock);
994 				*left_lock = NULL;
995 			}
996 
997 			if (*right_lock != NULL) {
998 				deallocate_file_lock(*right_lock);
999 				*right_lock = NULL;
1000 			}
1001 
1002 			return NFS_RESERR;
1003 		}
1004 
1005 		/* Insert new locks from split if required */
1006 		if (*left_lock != NULL) {
1007 			debuglog("Split left activated\n");
1008 			LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist);
1009 		}
1010 
1011 		if (*right_lock != NULL) {
1012 			debuglog("Split right activated\n");
1013 			LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist);
1014 		}
1015 
1016 		/* Unlock the lock since it matches identity */
1017 		LIST_REMOVE(mfl, nfslocklist);
1018 		*released_lock = mfl;
1019 		retval = NFS_GRANTED;
1020 	}
1021 
1022 	debuglog("Exiting unlock_nfslock\n");
1023 
1024 	return retval;
1025 }
1026 
1027 /*
1028  * Below here are the routines for manipulating the file lock directly
1029  * on the disk hardware itself
1030  */
1031 enum hwlock_status
1032 lock_hwlock(struct file_lock *fl)
1033 {
1034 	struct monfile *imf,*nmf;
1035 	int lflags, flerror;
1036 
1037 	/* Scan to see if filehandle already present */
1038 	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1039 		if (bcmp(&fl->filehandle, &imf->filehandle,
1040 			sizeof(fl->filehandle)) == 0) {
1041 			/* imf is the correct filehandle */
1042 			break;
1043 		}
1044 	}
1045 
1046 	/*
1047 	 * Filehandle already exists (we control the file)
1048 	 * *AND* NFS has already cleared the lock for availability
1049 	 * Grant it and bump the refcount.
1050 	 */
1051 	if (imf != NULL) {
1052 		++(imf->refcount);
1053 		return (HW_GRANTED);
1054 	}
1055 
1056 	/* No filehandle found, create and go */
1057 	nmf = malloc(sizeof(struct monfile));
1058 	if (nmf == NULL) {
1059 		debuglog("hwlock resource allocation failure\n");
1060 		return (HW_RESERR);
1061 	}
1062 
1063 	/* XXX: Is O_RDWR always the correct mode? */
1064 	nmf->fd = fhopen(&fl->filehandle, O_RDWR);
1065 	if (nmf->fd < 0) {
1066 		debuglog("fhopen failed (from %16s): %32s\n",
1067 		    fl->client_name, strerror(errno));
1068 		free(nmf);
1069 		switch (errno) {
1070 		case ESTALE:
1071 			return (HW_STALEFH);
1072 		case EROFS:
1073 			return (HW_READONLY);
1074 		default:
1075 			return (HW_RESERR);
1076 		}
1077 	}
1078 
1079 	/* File opened correctly, fill the monitor struct */
1080 	bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle));
1081 	nmf->refcount = 1;
1082 	nmf->exclusive = fl->client.exclusive;
1083 
1084 	lflags = (nmf->exclusive == 1) ?
1085 	    (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB);
1086 
1087 	flerror = flock(nmf->fd, lflags);
1088 
1089 	if (flerror != 0) {
1090 		debuglog("flock failed (from %16s): %32s\n",
1091 		    fl->client_name, strerror(errno));
1092 		close(nmf->fd);
1093 		free(nmf);
1094 		switch (errno) {
1095 		case EAGAIN:
1096 			return (HW_DENIED);
1097 		case ESTALE:
1098 			return (HW_STALEFH);
1099 		case EROFS:
1100 			return (HW_READONLY);
1101 		default:
1102 			return (HW_RESERR);
1103 			break;
1104 		}
1105 	}
1106 
1107 	/* File opened and locked */
1108 	LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist);
1109 
1110 	debuglog("flock succeeded (from %16s)\n", fl->client_name);
1111 	return (HW_GRANTED);
1112 }
1113 
1114 enum hwlock_status
1115 unlock_hwlock(const struct file_lock *fl)
1116 {
1117 	struct monfile *imf;
1118 
1119 	debuglog("Entering unlock_hwlock\n");
1120 	debuglog("Entering loop interation\n");
1121 
1122 	/* Scan to see if filehandle already present */
1123 	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1124 		if (bcmp(&fl->filehandle, &imf->filehandle,
1125 			sizeof(fl->filehandle)) == 0) {
1126 			/* imf is the correct filehandle */
1127 			break;
1128 		}
1129 	}
1130 
1131 	debuglog("Completed iteration.  Proceeding\n");
1132 
1133 	if (imf == NULL) {
1134 		/* No lock found */
1135 		debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n");
1136 		return (HW_DENIED_NOLOCK);
1137 	}
1138 
1139 	/* Lock found */
1140 	--imf->refcount;
1141 
1142 	if (imf->refcount < 0) {
1143 		debuglog("Negative hardware reference count\n");
1144 	}
1145 
1146 	if (imf->refcount <= 0) {
1147 		close(imf->fd);
1148 		LIST_REMOVE(imf, monfilelist);
1149 		free(imf);
1150 	}
1151 	debuglog("Exiting unlock_hwlock (HW_GRANTED)\n");
1152 	return (HW_GRANTED);
1153 }
1154 
1155 enum hwlock_status
1156 test_hwlock(fl, conflicting_fl)
1157 	const struct file_lock *fl __unused;
1158 	struct file_lock **conflicting_fl __unused;
1159 {
1160 
1161 	/*
1162 	 * XXX: lock tests on hardware are not required until
1163 	 * true partial file testing is done on the underlying file
1164 	 */
1165 	return (HW_RESERR);
1166 }
1167 
1168 
1169 
1170 /*
1171  * Below here are routines for manipulating blocked lock requests
1172  * They should only be called from the XXX_partialfilelock routines
1173  * if at all possible
1174  */
1175 
1176 void
1177 add_blockingfilelock(struct file_lock *fl)
1178 {
1179 
1180 	debuglog("Entering add_blockingfilelock\n");
1181 
1182 	/*
1183 	 * Clear the blocking flag so that it can be reused without
1184 	 * adding it to the blocking queue a second time
1185 	 */
1186 
1187 	fl->blocking = 0;
1188 	LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist);
1189 
1190 	debuglog("Exiting add_blockingfilelock\n");
1191 }
1192 
1193 void
1194 remove_blockingfilelock(struct file_lock *fl)
1195 {
1196 
1197 	debuglog("Entering remove_blockingfilelock\n");
1198 
1199 	LIST_REMOVE(fl, nfslocklist);
1200 
1201 	debuglog("Exiting remove_blockingfilelock\n");
1202 }
1203 
1204 void
1205 clear_blockingfilelock(const char *hostname)
1206 {
1207 	struct file_lock *ifl,*nfl;
1208 
1209 	/*
1210 	 * Normally, LIST_FOREACH is called for, but since
1211 	 * the current element *is* the iterator, deleting it
1212 	 * would mess up the iteration.  Thus, a next element
1213 	 * must be used explicitly
1214 	 */
1215 
1216 	ifl = LIST_FIRST(&blockedlocklist_head);
1217 
1218 	while (ifl != NULL) {
1219 		nfl = LIST_NEXT(ifl, nfslocklist);
1220 
1221 		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1222 			remove_blockingfilelock(ifl);
1223 			deallocate_file_lock(ifl);
1224 		}
1225 
1226 		ifl = nfl;
1227 	}
1228 }
1229 
1230 void
1231 retry_blockingfilelocklist(void)
1232 {
1233 	/* Retry all locks in the blocked list */
1234 	struct file_lock *ifl, *nfl, *pfl; /* Iterator */
1235 	enum partialfilelock_status pflstatus;
1236 
1237 	debuglog("Entering retry_blockingfilelocklist\n");
1238 
1239 	pfl = NULL;
1240 	ifl = LIST_FIRST(&blockedlocklist_head);
1241 	debuglog("Iterator choice %p\n",ifl);
1242 
1243 	while (ifl != NULL) {
1244 		/*
1245 		 * SUBTLE BUG: The next element must be worked out before the
1246 		 * current element has been moved
1247 		 */
1248 		nfl = LIST_NEXT(ifl, nfslocklist);
1249 		debuglog("Iterator choice %p\n",ifl);
1250 		debuglog("Prev iterator choice %p\n",pfl);
1251 		debuglog("Next iterator choice %p\n",nfl);
1252 
1253 		/*
1254 		 * SUBTLE BUG: The file_lock must be removed from the
1255 		 * old list so that it's list pointers get disconnected
1256 		 * before being allowed to participate in the new list
1257 		 * which will automatically add it in if necessary.
1258 		 */
1259 
1260 		LIST_REMOVE(ifl, nfslocklist);
1261 		pflstatus = lock_partialfilelock(ifl);
1262 
1263 		if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1264 			debuglog("Granted blocked lock\n");
1265 			/* lock granted and is now being used */
1266 			send_granted(ifl,0);
1267 		} else {
1268 			/* Reinsert lock back into same place in blocked list */
1269 			debuglog("Replacing blocked lock\n");
1270 			if (pfl != NULL)
1271 				LIST_INSERT_AFTER(pfl, ifl, nfslocklist);
1272 			else
1273 				/* ifl is the only elem. in the list */
1274 				LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist);
1275 		}
1276 
1277 		/* Valid increment behavior regardless of state of ifl */
1278 		ifl = nfl;
1279 		/* if a lock was granted incrementing pfl would make it nfl */
1280 		if (pfl != NULL && (LIST_NEXT(pfl, nfslocklist) != nfl))
1281 			pfl = LIST_NEXT(pfl, nfslocklist);
1282 		else
1283 			pfl = LIST_FIRST(&blockedlocklist_head);
1284 	}
1285 
1286 	debuglog("Exiting retry_blockingfilelocklist\n");
1287 }
1288 
1289 /*
1290  * Below here are routines associated with manipulating all
1291  * aspects of the partial file locking system (list, hardware, etc.)
1292  */
1293 
1294 /*
1295  * Please note that lock monitoring must be done at this level which
1296  * keeps track of *individual* lock requests on lock and unlock
1297  *
1298  * XXX: Split unlocking is going to make the unlock code miserable
1299  */
1300 
1301 /*
1302  * lock_partialfilelock:
1303  *
1304  * Argument fl gets modified as its list housekeeping entries get modified
1305  * upon insertion into the NFS lock list
1306  *
1307  * This routine makes several assumptions:
1308  * 1) It (will) pass locks through to flock to lock the entire underlying file
1309  *     and then parcel out NFS locks if it gets control of the file.
1310  *         This matches the old rpc.lockd file semantics (except where it
1311  *         is now more correct).  It is the safe solution, but will cause
1312  *         overly restrictive blocking if someone is trying to use the
1313  *         underlying files without using NFS.  This appears to be an
1314  *         acceptable tradeoff since most people use standalone NFS servers.
1315  * XXX: The right solution is probably kevent combined with fcntl
1316  *
1317  *    2) Nothing modifies the lock lists between testing and granting
1318  *           I have no idea whether this is a useful assumption or not
1319  */
1320 
1321 enum partialfilelock_status
1322 lock_partialfilelock(struct file_lock *fl)
1323 {
1324 	enum partialfilelock_status retval;
1325 	enum nfslock_status lnlstatus;
1326 	enum hwlock_status hwstatus;
1327 
1328 	debuglog("Entering lock_partialfilelock\n");
1329 
1330 	retval = PFL_DENIED;
1331 
1332 	/*
1333 	 * Execute the NFS lock first, if possible, as it is significantly
1334 	 * easier and less expensive to undo than the filesystem lock
1335 	 */
1336 
1337 	lnlstatus = lock_nfslock(fl);
1338 
1339 	switch (lnlstatus) {
1340 	case NFS_GRANTED:
1341 	case NFS_GRANTED_DUPLICATE:
1342 		/*
1343 		 * At this point, the NFS lock is allocated and active.
1344 		 * Remember to clean it up if the hardware lock fails
1345 		 */
1346 		hwstatus = lock_hwlock(fl);
1347 
1348 		switch (hwstatus) {
1349 		case HW_GRANTED:
1350 		case HW_GRANTED_DUPLICATE:
1351 			debuglog("HW GRANTED\n");
1352 			/*
1353 			 * XXX: Fixme: Check hwstatus for duplicate when
1354 			 * true partial file locking and accounting is
1355 			 * done on the hardware
1356 			 */
1357 			if (lnlstatus == NFS_GRANTED_DUPLICATE) {
1358 				retval = PFL_GRANTED_DUPLICATE;
1359 			} else {
1360 				retval = PFL_GRANTED;
1361 			}
1362 			monitor_lock_host(fl->client_name);
1363 			break;
1364 		case HW_RESERR:
1365 			debuglog("HW RESERR\n");
1366 			retval = PFL_HWRESERR;
1367 			break;
1368 		case HW_DENIED:
1369 			debuglog("HW DENIED\n");
1370 			retval = PFL_HWDENIED;
1371 			break;
1372 		default:
1373 			debuglog("Unmatched hwstatus %d\n",hwstatus);
1374 			break;
1375 		}
1376 
1377 		if (retval != PFL_GRANTED &&
1378 		    retval != PFL_GRANTED_DUPLICATE) {
1379 			/* Clean up the NFS lock */
1380 			debuglog("Deleting trial NFS lock\n");
1381 			delete_nfslock(fl);
1382 		}
1383 		break;
1384 	case NFS_DENIED:
1385 		retval = PFL_NFSDENIED;
1386 		break;
1387 	case NFS_RESERR:
1388 		retval = PFL_NFSRESERR;
1389 	default:
1390 		debuglog("Unmatched lnlstatus %d\n");
1391 		retval = PFL_NFSDENIED_NOLOCK;
1392 		break;
1393 	}
1394 
1395 	/*
1396 	 * By the time fl reaches here, it is completely free again on
1397 	 * failure.  The NFS lock done before attempting the
1398 	 * hardware lock has been backed out
1399 	 */
1400 
1401 	if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) {
1402 		/* Once last chance to check the lock */
1403 		if (fl->blocking == 1) {
1404 			if (retval == PFL_NFSDENIED) {
1405 				/* Queue the lock */
1406 				debuglog("BLOCKING LOCK RECEIVED\n");
1407 				retval = PFL_NFSBLOCKED;
1408 				add_blockingfilelock(fl);
1409 				dump_filelock(fl);
1410 			} else {
1411 				/* retval is okay as PFL_HWDENIED */
1412 				debuglog("BLOCKING LOCK DENIED IN HARDWARE\n");
1413 				dump_filelock(fl);
1414 			}
1415 		} else {
1416 			/* Leave retval alone, it's already correct */
1417 			debuglog("Lock denied.  Non-blocking failure\n");
1418 			dump_filelock(fl);
1419 		}
1420 	}
1421 
1422 	debuglog("Exiting lock_partialfilelock\n");
1423 
1424 	return retval;
1425 }
1426 
1427 /*
1428  * unlock_partialfilelock:
1429  *
1430  * Given a file_lock, unlock all locks which match.
1431  *
1432  * Note that a given lock might have to unlock ITSELF!  See
1433  * clear_partialfilelock for example.
1434  */
1435 
1436 enum partialfilelock_status
1437 unlock_partialfilelock(const struct file_lock *fl)
1438 {
1439 	struct file_lock *lfl,*rfl,*releasedfl,*selffl;
1440 	enum partialfilelock_status retval;
1441 	enum nfslock_status unlstatus;
1442 	enum hwlock_status unlhwstatus, lhwstatus;
1443 
1444 	debuglog("Entering unlock_partialfilelock\n");
1445 
1446 	selffl = NULL;
1447 	lfl = NULL;
1448 	rfl = NULL;
1449 	releasedfl = NULL;
1450 	retval = PFL_DENIED;
1451 
1452 	/*
1453 	 * There are significant overlap and atomicity issues
1454 	 * with partially releasing a lock.  For example, releasing
1455 	 * part of an NFS shared lock does *not* always release the
1456 	 * corresponding part of the file since there is only one
1457 	 * rpc.lockd UID but multiple users could be requesting it
1458 	 * from NFS.  Also, an unlock request should never allow
1459 	 * another process to gain a lock on the remaining parts.
1460 	 * ie. Always apply the new locks before releasing the
1461 	 * old one
1462 	 */
1463 
1464 	/*
1465 	 * Loop is required since multiple little locks
1466 	 * can be allocated and then deallocated with one
1467 	 * big unlock.
1468 	 *
1469 	 * The loop is required to be here so that the nfs &
1470 	 * hw subsystems do not need to communicate with one
1471 	 * one another
1472 	 */
1473 
1474 	do {
1475 		debuglog("Value of releasedfl: %p\n",releasedfl);
1476 		/* lfl&rfl are created *AND* placed into the NFS lock list if required */
1477 		unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl);
1478 		debuglog("Value of releasedfl: %p\n",releasedfl);
1479 
1480 
1481 		/* XXX: This is grungy.  It should be refactored to be cleaner */
1482 		if (lfl != NULL) {
1483 			lhwstatus = lock_hwlock(lfl);
1484 			if (lhwstatus != HW_GRANTED &&
1485 			    lhwstatus != HW_GRANTED_DUPLICATE) {
1486 				debuglog("HW duplicate lock failure for left split\n");
1487 			}
1488 			monitor_lock_host(lfl->client_name);
1489 		}
1490 
1491 		if (rfl != NULL) {
1492 			lhwstatus = lock_hwlock(rfl);
1493 			if (lhwstatus != HW_GRANTED &&
1494 			    lhwstatus != HW_GRANTED_DUPLICATE) {
1495 				debuglog("HW duplicate lock failure for right split\n");
1496 			}
1497 			monitor_lock_host(rfl->client_name);
1498 		}
1499 
1500 		switch (unlstatus) {
1501 		case NFS_GRANTED:
1502 			/* Attempt to unlock on the hardware */
1503 			debuglog("NFS unlock granted.  Attempting hardware unlock\n");
1504 
1505 			/* This call *MUST NOT* unlock the two newly allocated locks */
1506 			unlhwstatus = unlock_hwlock(fl);
1507 			debuglog("HW unlock returned with code %d\n",unlhwstatus);
1508 
1509 			switch (unlhwstatus) {
1510 			case HW_GRANTED:
1511 				debuglog("HW unlock granted\n");
1512 				unmonitor_lock_host(releasedfl->client_name);
1513 				retval = PFL_GRANTED;
1514 				break;
1515 			case HW_DENIED_NOLOCK:
1516 				/* Huh?!?!  This shouldn't happen */
1517 				debuglog("HW unlock denied no lock\n");
1518 				retval = PFL_HWRESERR;
1519 				/* Break out of do-while */
1520 				unlstatus = NFS_RESERR;
1521 				break;
1522 			default:
1523 				debuglog("HW unlock failed\n");
1524 				retval = PFL_HWRESERR;
1525 				/* Break out of do-while */
1526 				unlstatus = NFS_RESERR;
1527 				break;
1528 			}
1529 
1530 			debuglog("Exiting with status retval: %d\n",retval);
1531 
1532 			retry_blockingfilelocklist();
1533 			break;
1534 		case NFS_DENIED_NOLOCK:
1535 			retval = PFL_GRANTED;
1536 			debuglog("All locks cleaned out\n");
1537 			break;
1538 		default:
1539 			retval = PFL_NFSRESERR;
1540 			debuglog("NFS unlock failure\n");
1541 			dump_filelock(fl);
1542 			break;
1543 		}
1544 
1545 		if (releasedfl != NULL) {
1546 			if (fl == releasedfl) {
1547 				/*
1548 				 * XXX: YECHHH!!! Attempt to unlock self succeeded
1549 				 * but we can't deallocate the space yet.  This is what
1550 				 * happens when you don't write malloc and free together
1551 				 */
1552 				debuglog("Attempt to unlock self\n");
1553 				selffl = releasedfl;
1554 			} else {
1555 				/*
1556 				 * XXX: this deallocation *still* needs to migrate closer
1557 				 * to the allocation code way up in get_lock or the allocation
1558 				 * code needs to migrate down (violation of "When you write
1559 				 * malloc you must write free")
1560 				 */
1561 
1562 				deallocate_file_lock(releasedfl);
1563 			}
1564 		}
1565 
1566 	} while (unlstatus == NFS_GRANTED);
1567 
1568 	if (selffl != NULL) {
1569 		/*
1570 		 * This statement wipes out the incoming file lock (fl)
1571 		 * in spite of the fact that it is declared const
1572 		 */
1573 		debuglog("WARNING!  Destroying incoming lock pointer\n");
1574 		deallocate_file_lock(selffl);
1575 	}
1576 
1577 	debuglog("Exiting unlock_partialfilelock\n");
1578 
1579 	return retval;
1580 }
1581 
1582 /*
1583  * clear_partialfilelock
1584  *
1585  * Normally called in response to statd state number change.
1586  * Wipe out all locks held by a host.  As a bonus, the act of
1587  * doing so should automatically clear their statd entries and
1588  * unmonitor the host.
1589  */
1590 
1591 void
1592 clear_partialfilelock(const char *hostname)
1593 {
1594 	struct file_lock *ifl, *nfl;
1595 
1596 	/* Clear blocking file lock list */
1597 	clear_blockingfilelock(hostname);
1598 
1599 	/* do all required unlocks */
1600 	/* Note that unlock can smash the current pointer to a lock */
1601 
1602 	/*
1603 	 * Normally, LIST_FOREACH is called for, but since
1604 	 * the current element *is* the iterator, deleting it
1605 	 * would mess up the iteration.  Thus, a next element
1606 	 * must be used explicitly
1607 	 */
1608 
1609 	ifl = LIST_FIRST(&nfslocklist_head);
1610 
1611 	while (ifl != NULL) {
1612 		nfl = LIST_NEXT(ifl, nfslocklist);
1613 
1614 		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1615 			/* Unlock destroys ifl out from underneath */
1616 			unlock_partialfilelock(ifl);
1617 			/* ifl is NO LONGER VALID AT THIS POINT */
1618 		}
1619 		ifl = nfl;
1620 	}
1621 }
1622 
1623 /*
1624  * test_partialfilelock:
1625  */
1626 enum partialfilelock_status
1627 test_partialfilelock(const struct file_lock *fl,
1628     struct file_lock **conflicting_fl)
1629 {
1630 	enum partialfilelock_status retval;
1631 	enum nfslock_status teststatus;
1632 
1633 	debuglog("Entering testpartialfilelock...\n");
1634 
1635 	retval = PFL_DENIED;
1636 
1637 	teststatus = test_nfslock(fl, conflicting_fl);
1638 	debuglog("test_partialfilelock: teststatus %d\n",teststatus);
1639 
1640 	if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) {
1641 		/* XXX: Add the underlying filesystem locking code */
1642 		retval = (teststatus == NFS_GRANTED) ?
1643 		    PFL_GRANTED : PFL_GRANTED_DUPLICATE;
1644 		debuglog("Dumping locks...\n");
1645 		dump_filelock(fl);
1646 		dump_filelock(*conflicting_fl);
1647 		debuglog("Done dumping locks...\n");
1648 	} else {
1649 		retval = PFL_NFSDENIED;
1650 		debuglog("NFS test denied.\n");
1651 		dump_filelock(fl);
1652 		debuglog("Conflicting.\n");
1653 		dump_filelock(*conflicting_fl);
1654 	}
1655 
1656 	debuglog("Exiting testpartialfilelock...\n");
1657 
1658 	return retval;
1659 }
1660 
1661 /*
1662  * Below here are routines associated with translating the partial file locking
1663  * codes into useful codes to send back to the NFS RPC messaging system
1664  */
1665 
1666 /*
1667  * These routines translate the (relatively) useful return codes back onto
1668  * the few return codes which the nlm subsystems wishes to trasmit
1669  */
1670 
1671 enum nlm_stats
1672 do_test(struct file_lock *fl, struct file_lock **conflicting_fl)
1673 {
1674 	enum partialfilelock_status pfsret;
1675 	enum nlm_stats retval;
1676 
1677 	debuglog("Entering do_test...\n");
1678 
1679 	pfsret = test_partialfilelock(fl,conflicting_fl);
1680 
1681 	switch (pfsret) {
1682 	case PFL_GRANTED:
1683 		debuglog("PFL test lock granted\n");
1684 		dump_filelock(fl);
1685 		dump_filelock(*conflicting_fl);
1686 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1687 		break;
1688 	case PFL_GRANTED_DUPLICATE:
1689 		debuglog("PFL test lock granted--duplicate id detected\n");
1690 		dump_filelock(fl);
1691 		dump_filelock(*conflicting_fl);
1692 		debuglog("Clearing conflicting_fl for call semantics\n");
1693 		*conflicting_fl = NULL;
1694 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1695 		break;
1696 	case PFL_NFSDENIED:
1697 	case PFL_HWDENIED:
1698 		debuglog("PFL test lock denied\n");
1699 		dump_filelock(fl);
1700 		dump_filelock(*conflicting_fl);
1701 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1702 		break;
1703 	case PFL_NFSRESERR:
1704 	case PFL_HWRESERR:
1705 		debuglog("PFL test lock resource fail\n");
1706 		dump_filelock(fl);
1707 		dump_filelock(*conflicting_fl);
1708 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1709 		break;
1710 	default:
1711 		debuglog("PFL test lock *FAILED*\n");
1712 		dump_filelock(fl);
1713 		dump_filelock(*conflicting_fl);
1714 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1715 		break;
1716 	}
1717 
1718 	debuglog("Exiting do_test...\n");
1719 
1720 	return retval;
1721 }
1722 
1723 /*
1724  * do_lock: Try to acquire a lock
1725  *
1726  * This routine makes a distinction between NLM versions.  I am pretty
1727  * convinced that this should be abstracted out and bounced up a level
1728  */
1729 
1730 enum nlm_stats
1731 do_lock(struct file_lock *fl)
1732 {
1733 	enum partialfilelock_status pfsret;
1734 	enum nlm_stats retval;
1735 
1736 	debuglog("Entering do_lock...\n");
1737 
1738 	pfsret = lock_partialfilelock(fl);
1739 
1740 	switch (pfsret) {
1741 	case PFL_GRANTED:
1742 		debuglog("PFL lock granted");
1743 		dump_filelock(fl);
1744 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1745 		break;
1746 	case PFL_GRANTED_DUPLICATE:
1747 		debuglog("PFL lock granted--duplicate id detected");
1748 		dump_filelock(fl);
1749 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1750 		break;
1751 	case PFL_NFSDENIED:
1752 	case PFL_HWDENIED:
1753 		debuglog("PFL_NFS lock denied");
1754 		dump_filelock(fl);
1755 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1756 		break;
1757 	case PFL_NFSBLOCKED:
1758 	case PFL_HWBLOCKED:
1759 		debuglog("PFL_NFS blocking lock denied.  Queued.\n");
1760 		dump_filelock(fl);
1761 		retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked;
1762 		break;
1763 	case PFL_NFSRESERR:
1764 	case PFL_HWRESERR:
1765 		debuglog("PFL lock resource alocation fail\n");
1766 		dump_filelock(fl);
1767 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1768 		break;
1769 	default:
1770 		debuglog("PFL lock *FAILED*");
1771 		dump_filelock(fl);
1772 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1773 		break;
1774 	}
1775 
1776 	debuglog("Exiting do_lock...\n");
1777 
1778 	return retval;
1779 }
1780 
1781 enum nlm_stats
1782 do_unlock(struct file_lock *fl)
1783 {
1784 	enum partialfilelock_status pfsret;
1785 	enum nlm_stats retval;
1786 
1787 	debuglog("Entering do_unlock...\n");
1788 	pfsret = unlock_partialfilelock(fl);
1789 
1790 	switch (pfsret) {
1791 	case PFL_GRANTED:
1792 		debuglog("PFL unlock granted");
1793 		dump_filelock(fl);
1794 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1795 		break;
1796 	case PFL_NFSDENIED:
1797 	case PFL_HWDENIED:
1798 		debuglog("PFL_NFS unlock denied");
1799 		dump_filelock(fl);
1800 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1801 		break;
1802 	case PFL_NFSDENIED_NOLOCK:
1803 	case PFL_HWDENIED_NOLOCK:
1804 		debuglog("PFL_NFS no lock found\n");
1805 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1806 		break;
1807 	case PFL_NFSRESERR:
1808 	case PFL_HWRESERR:
1809 		debuglog("PFL unlock resource failure");
1810 		dump_filelock(fl);
1811 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1812 		break;
1813 	default:
1814 		debuglog("PFL unlock *FAILED*");
1815 		dump_filelock(fl);
1816 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1817 		break;
1818 	}
1819 
1820 	debuglog("Exiting do_unlock...\n");
1821 
1822 	return retval;
1823 }
1824 
1825 /*
1826  * do_clear
1827  *
1828  * This routine is non-existent because it doesn't have a return code.
1829  * It is here for completeness in case someone *does* need to do return
1830  * codes later.  A decent compiler should optimize this away.
1831  */
1832 
1833 void
1834 do_clear(const char *hostname)
1835 {
1836 
1837 	clear_partialfilelock(hostname);
1838 }
1839 
1840 /*
1841  * The following routines are all called from the code which the
1842  * RPC layer invokes
1843  */
1844 
1845 /*
1846  * testlock(): inform the caller if the requested lock would be granted
1847  *
1848  * returns NULL if lock would granted
1849  * returns pointer to a conflicting nlm4_holder if not
1850  */
1851 
1852 struct nlm4_holder *
1853 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused)
1854 {
1855 	struct file_lock test_fl, *conflicting_fl;
1856 
1857 	bzero(&test_fl, sizeof(test_fl));
1858 
1859 	bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t));
1860 	copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client);
1861 
1862 	siglock();
1863 	do_test(&test_fl, &conflicting_fl);
1864 
1865 	if (conflicting_fl == NULL) {
1866 		debuglog("No conflicting lock found\n");
1867 		sigunlock();
1868 		return NULL;
1869 	} else {
1870 		debuglog("Found conflicting lock\n");
1871 		dump_filelock(conflicting_fl);
1872 		sigunlock();
1873 		return (&conflicting_fl->client);
1874 	}
1875 }
1876 
1877 /*
1878  * getlock: try to aquire the lock.
1879  * If file is already locked and we can sleep, put the lock in the list with
1880  * status LKST_WAITING; it'll be processed later.
1881  * Otherwise try to lock. If we're allowed to block, fork a child which
1882  * will do the blocking lock.
1883  */
1884 
1885 enum nlm_stats
1886 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags)
1887 {
1888 	struct file_lock *newfl;
1889 	enum nlm_stats retval;
1890 
1891 	debuglog("Entering getlock...\n");
1892 
1893 	if (grace_expired == 0 && lckarg->reclaim == 0)
1894 		return (flags & LOCK_V4) ?
1895 		    nlm4_denied_grace_period : nlm_denied_grace_period;
1896 
1897 	/* allocate new file_lock for this request */
1898 	newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie);
1899 	if (newfl == NULL) {
1900 		syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno));
1901 		/* failed */
1902 		return (flags & LOCK_V4) ?
1903 		    nlm4_denied_nolocks : nlm_denied_nolocks;
1904 	}
1905 
1906 	if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) {
1907 		debuglog("recieved fhandle size %d, local size %d",
1908 		    lckarg->alock.fh.n_len, (int)sizeof(fhandle_t));
1909 	}
1910 
1911 	fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes,
1912 	    (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf,
1913 	    lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset,
1914 	    lckarg->alock.l_len,
1915 	    lckarg->alock.caller_name, lckarg->state, 0, flags, lckarg->block);
1916 
1917 	/*
1918 	 * newfl is now fully constructed and deallocate_file_lock
1919 	 * can now be used to delete it
1920 	 */
1921 
1922 	siglock();
1923 	debuglog("Pointer to new lock is %p\n",newfl);
1924 
1925 	retval = do_lock(newfl);
1926 
1927 	debuglog("Pointer to new lock is %p\n",newfl);
1928 	sigunlock();
1929 
1930 	switch (retval)
1931 		{
1932 		case nlm4_granted:
1933 			/* case nlm_granted: is the same as nlm4_granted */
1934 			/* do_mon(lckarg->alock.caller_name); */
1935 			break;
1936 		case nlm4_blocked:
1937 			/* case nlm_blocked: is the same as nlm4_blocked */
1938 			/* do_mon(lckarg->alock.caller_name); */
1939 			break;
1940 		default:
1941 			deallocate_file_lock(newfl);
1942 			break;
1943 		}
1944 
1945 	debuglog("Exiting getlock...\n");
1946 
1947 	return retval;
1948 }
1949 
1950 
1951 /* unlock a filehandle */
1952 enum nlm_stats
1953 unlock(nlm4_lock *lock, const int flags __unused)
1954 {
1955 	struct file_lock fl;
1956 	enum nlm_stats err;
1957 
1958 	siglock();
1959 
1960 	debuglog("Entering unlock...\n");
1961 
1962 	bzero(&fl,sizeof(struct file_lock));
1963 	bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t));
1964 
1965 	copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client);
1966 
1967 	err = do_unlock(&fl);
1968 
1969 	sigunlock();
1970 
1971 	debuglog("Exiting unlock...\n");
1972 
1973 	return err;
1974 }
1975 
1976 /*
1977  * XXX: The following monitor/unmonitor routines
1978  * have not been extensively tested (ie. no regression
1979  * script exists like for the locking sections
1980  */
1981 
1982 /*
1983  * monitor_lock_host: monitor lock hosts locally with a ref count and
1984  * inform statd
1985  */
1986 void
1987 monitor_lock_host(const char *hostname)
1988 {
1989 	struct host *ihp, *nhp;
1990 	struct mon smon;
1991 	struct sm_stat_res sres;
1992 	int rpcret, statflag;
1993 
1994 	rpcret = 0;
1995 	statflag = 0;
1996 
1997 	LIST_FOREACH(ihp, &hostlst_head, hostlst) {
1998 		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
1999 			/* Host is already monitored, bump refcount */
2000 			++ihp->refcnt;
2001 			/* Host should only be in the monitor list once */
2002 			return;
2003 		}
2004 	}
2005 
2006 	/* Host is not yet monitored, add it */
2007 	nhp = malloc(sizeof(struct host));
2008 
2009 	if (nhp == NULL) {
2010 		debuglog("Unable to allocate entry for statd mon\n");
2011 		return;
2012 	}
2013 
2014 	/* Allocated new host entry, now fill the fields */
2015 	strncpy(nhp->name, hostname, SM_MAXSTRLEN);
2016 	nhp->refcnt = 1;
2017 	debuglog("Locally Monitoring host %16s\n",hostname);
2018 
2019 	debuglog("Attempting to tell statd\n");
2020 
2021 	bzero(&smon,sizeof(smon));
2022 
2023 	smon.mon_id.mon_name = nhp->name;
2024 	smon.mon_id.my_id.my_name = "localhost\0";
2025 
2026 	smon.mon_id.my_id.my_prog = NLM_PROG;
2027 	smon.mon_id.my_id.my_vers = NLM_SM;
2028 	smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
2029 
2030 	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON,
2031 	    (xdrproc_t)xdr_mon, &smon,
2032 	    (xdrproc_t)xdr_sm_stat_res, &sres);
2033 
2034 	if (rpcret == 0) {
2035 		if (sres.res_stat == stat_fail) {
2036 			debuglog("Statd call failed\n");
2037 			statflag = 0;
2038 		} else {
2039 			statflag = 1;
2040 		}
2041 	} else {
2042 		debuglog("Rpc call to statd failed with return value: %d\n",
2043 		    rpcret);
2044 		statflag = 0;
2045 	}
2046 
2047 	if (statflag == 1) {
2048 		LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst);
2049 	} else {
2050 		free(nhp);
2051 	}
2052 
2053 }
2054 
2055 /*
2056  * unmonitor_lock_host: clear monitor ref counts and inform statd when gone
2057  */
2058 void
2059 unmonitor_lock_host(char *hostname)
2060 {
2061 	struct host *ihp;
2062 	struct mon_id smon_id;
2063 	struct sm_stat smstat;
2064 	int rpcret;
2065 
2066 	rpcret = 0;
2067 
2068 	for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL;
2069 	     ihp=LIST_NEXT(ihp, hostlst)) {
2070 		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2071 			/* Host is monitored, bump refcount */
2072 			--ihp->refcnt;
2073 			/* Host should only be in the monitor list once */
2074 			break;
2075 		}
2076 	}
2077 
2078 	if (ihp == NULL) {
2079 		debuglog("Could not find host %16s in mon list\n", hostname);
2080 		return;
2081 	}
2082 
2083 	if (ihp->refcnt > 0)
2084 		return;
2085 
2086 	if (ihp->refcnt < 0) {
2087 		debuglog("Negative refcount!: %d\n",
2088 		    ihp->refcnt);
2089 	}
2090 
2091 	debuglog("Attempting to unmonitor host %16s\n", hostname);
2092 
2093 	bzero(&smon_id,sizeof(smon_id));
2094 
2095 	smon_id.mon_name = hostname;
2096 	smon_id.my_id.my_name = "localhost";
2097 	smon_id.my_id.my_prog = NLM_PROG;
2098 	smon_id.my_id.my_vers = NLM_SM;
2099 	smon_id.my_id.my_proc = NLM_SM_NOTIFY;
2100 
2101 	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON,
2102 	    (xdrproc_t)xdr_mon, &smon_id,
2103 	    (xdrproc_t)xdr_sm_stat_res, &smstat);
2104 
2105 	if (rpcret != 0) {
2106 		debuglog("Rpc call to unmonitor statd failed with "
2107 		   " return value: %d\n", rpcret);
2108 	}
2109 
2110 	LIST_REMOVE(ihp, hostlst);
2111 	free(ihp);
2112 }
2113 
2114 /*
2115  * notify: Clear all locks from a host if statd complains
2116  *
2117  * XXX: This routine has not been thoroughly tested.  However, neither
2118  * had the old one been.  It used to compare the statd crash state counter
2119  * to the current lock state.  The upshot of this was that it basically
2120  * cleared all locks from the specified host 99% of the time (with the
2121  * other 1% being a bug).  Consequently, the assumption is that clearing
2122  * all locks from a host when notified by statd is acceptable.
2123  *
2124  * Please note that this routine skips the usual level of redirection
2125  * through a do_* type routine.  This introduces a possible level of
2126  * error and might better be written as do_notify and take this one out.
2127 
2128  */
2129 
2130 void
2131 notify(const char *hostname, const int state)
2132 {
2133 	debuglog("notify from %s, new state %d", hostname, state);
2134 
2135 	siglock();
2136 	do_clear(hostname);
2137 	sigunlock();
2138 
2139 	debuglog("Leaving notify\n");
2140 }
2141 
2142 void
2143 send_granted(fl, opcode)
2144 	struct file_lock *fl;
2145 	int opcode __unused;
2146 {
2147 	CLIENT *cli;
2148 	static char dummy;
2149 	struct timeval timeo;
2150 	int success;
2151 	static struct nlm_res retval;
2152 	static struct nlm4_res retval4;
2153 
2154 	debuglog("About to send granted on blocked lock\n");
2155 	sleep(1);
2156 	debuglog("Blowing off return send\n");
2157 
2158 	cli = get_client(fl->addr,
2159 	    (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS);
2160 	if (cli == NULL) {
2161 		syslog(LOG_NOTICE, "failed to get CLIENT for %s",
2162 		    fl->client_name);
2163 		/*
2164 		 * We fail to notify remote that the lock has been granted.
2165 		 * The client will timeout and retry, the lock will be
2166 		 * granted at this time.
2167 		 */
2168 		return;
2169 	}
2170 	timeo.tv_sec = 0;
2171 	timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */
2172 
2173 	if (fl->flags & LOCK_V4) {
2174 		static nlm4_testargs res;
2175 		res.cookie = fl->client_cookie;
2176 		res.exclusive = fl->client.exclusive;
2177 		res.alock.caller_name = fl->client_name;
2178 		res.alock.fh.n_len = sizeof(fhandle_t);
2179 		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2180 		res.alock.oh = fl->client.oh;
2181 		res.alock.svid = fl->client.svid;
2182 		res.alock.l_offset = fl->client.l_offset;
2183 		res.alock.l_len = fl->client.l_len;
2184 		debuglog("sending v4 reply%s",
2185 			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2186 		if (fl->flags & LOCK_ASYNC) {
2187 			success = clnt_call(cli, NLM4_GRANTED_MSG,
2188 			    (xdrproc_t)xdr_nlm4_testargs, &res,
2189 			    (xdrproc_t)xdr_void, &dummy, timeo);
2190 		} else {
2191 			success = clnt_call(cli, NLM4_GRANTED,
2192 			    (xdrproc_t)xdr_nlm4_testargs, &res,
2193 			    (xdrproc_t)xdr_nlm4_res, &retval4, timeo);
2194 		}
2195 	} else {
2196 		static nlm_testargs res;
2197 
2198 		res.cookie = fl->client_cookie;
2199 		res.exclusive = fl->client.exclusive;
2200 		res.alock.caller_name = fl->client_name;
2201 		res.alock.fh.n_len = sizeof(fhandle_t);
2202 		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2203 		res.alock.oh = fl->client.oh;
2204 		res.alock.svid = fl->client.svid;
2205 		res.alock.l_offset = fl->client.l_offset;
2206 		res.alock.l_len = fl->client.l_len;
2207 		debuglog("sending v1 reply%s",
2208 			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2209 		if (fl->flags & LOCK_ASYNC) {
2210 			success = clnt_call(cli, NLM_GRANTED_MSG,
2211 			    (xdrproc_t)xdr_nlm_testargs, &res,
2212 			    (xdrproc_t)xdr_void, &dummy, timeo);
2213 		} else {
2214 			success = clnt_call(cli, NLM_GRANTED,
2215 			    (xdrproc_t)xdr_nlm_testargs, &res,
2216 			    (xdrproc_t)xdr_nlm_res, &retval, timeo);
2217 		}
2218 	}
2219 	if (debug_level > 2)
2220 		debuglog("clnt_call returns %d(%s) for granted",
2221 			 success, clnt_sperrno(success));
2222 
2223 }
2224 
2225 /*
2226  * Routines below here have not been modified in the overhaul
2227  */
2228 
2229 /*
2230  * Are these two routines still required since lockd is not spawning off
2231  * children to service locks anymore?  Presumably they were originally
2232  * put in place to prevent a one child from changing the lock list out
2233  * from under another one.
2234  */
2235 
2236 void
2237 siglock(void)
2238 {
2239   sigset_t block;
2240 
2241   sigemptyset(&block);
2242   sigaddset(&block, SIGCHLD);
2243 
2244   if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) {
2245     syslog(LOG_WARNING, "siglock failed: %s", strerror(errno));
2246   }
2247 }
2248 
2249 void
2250 sigunlock(void)
2251 {
2252   sigset_t block;
2253 
2254   sigemptyset(&block);
2255   sigaddset(&block, SIGCHLD);
2256 
2257   if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) {
2258     syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno));
2259   }
2260 }
2261 
2262 
2263