xref: /freebsd/usr.sbin/rpc.lockd/lockd_lock.c (revision 4e8d558c)
1 /*	$NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * Copyright (c) 2001 Andrew P. Lentvorski, Jr.
7  * Copyright (c) 2000 Manuel Bouyer.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #define LOCKD_DEBUG
43 
44 #include <stdio.h>
45 #ifdef LOCKD_DEBUG
46 #include <stdarg.h>
47 #endif
48 #include <stdlib.h>
49 #include <unistd.h>
50 #include <fcntl.h>
51 #include <syslog.h>
52 #include <errno.h>
53 #include <string.h>
54 #include <signal.h>
55 #include <rpc/rpc.h>
56 #include <sys/types.h>
57 #include <sys/stat.h>
58 #include <sys/socket.h>
59 #include <sys/param.h>
60 #include <sys/mount.h>
61 #include <sys/wait.h>
62 #include <rpcsvc/sm_inter.h>
63 #include <rpcsvc/nlm_prot.h>
64 #include "lockd_lock.h"
65 #include "lockd.h"
66 
67 #define MAXOBJECTSIZE 64
68 #define MAXBUFFERSIZE 1024
69 
70 /*
71  * A set of utilities for managing file locking
72  *
73  * XXX: All locks are in a linked list, a better structure should be used
74  * to improve search/access efficiency.
75  */
76 
77 /* struct describing a lock */
78 struct file_lock {
79 	LIST_ENTRY(file_lock) nfslocklist;
80 	fhandle_t filehandle; /* NFS filehandle */
81 	struct sockaddr *addr;
82 	struct nlm4_holder client; /* lock holder */
83 	/* XXX: client_cookie used *only* in send_granted */
84 	netobj client_cookie; /* cookie sent by the client */
85 	int nsm_status; /* status from the remote lock manager */
86 	int status; /* lock status, see below */
87 	int flags; /* lock flags, see lockd_lock.h */
88 	int blocking; /* blocking lock or not */
89 	char client_name[SM_MAXSTRLEN];	/* client_name is really variable
90 					   length and must be last! */
91 };
92 
93 LIST_HEAD(nfslocklist_head, file_lock);
94 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head);
95 
96 LIST_HEAD(blockedlocklist_head, file_lock);
97 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head);
98 
99 /* lock status */
100 #define LKST_LOCKED	1 /* lock is locked */
101 /* XXX: Is this flag file specific or lock specific? */
102 #define LKST_WAITING	2 /* file is already locked by another host */
103 #define LKST_PROCESSING	3 /* child is trying to acquire the lock */
104 #define LKST_DYING	4 /* must dies when we get news from the child */
105 
106 /* struct describing a monitored host */
107 struct host {
108 	LIST_ENTRY(host) hostlst;
109 	int refcnt;
110 	char name[SM_MAXSTRLEN]; /* name is really variable length and
111                                     must be last! */
112 };
113 /* list of hosts we monitor */
114 LIST_HEAD(hostlst_head, host);
115 struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head);
116 
117 /*
118  * File monitoring handlers
119  * XXX: These might be able to be removed when kevent support
120  * is placed into the hardware lock/unlock routines.  (ie.
121  * let the kernel do all the file monitoring)
122  */
123 
124 /* Struct describing a monitored file */
125 struct monfile {
126 	LIST_ENTRY(monfile) monfilelist;
127 	fhandle_t filehandle; /* Local access filehandle */
128 	int fd; /* file descriptor: remains open until unlock! */
129 	int refcount;
130 	int exclusive;
131 };
132 
133 /* List of files we monitor */
134 LIST_HEAD(monfilelist_head, monfile);
135 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head);
136 
137 static int debugdelay = 0;
138 
139 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE,
140 		      NFS_DENIED, NFS_DENIED_NOLOCK,
141 		      NFS_RESERR };
142 
143 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE,
144 		     HW_DENIED, HW_DENIED_NOLOCK,
145 		     HW_STALEFH, HW_READONLY, HW_RESERR };
146 
147 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED,
148 			      PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR,
149 			      PFL_HWDENIED,  PFL_HWBLOCKED,  PFL_HWDENIED_NOLOCK, PFL_HWRESERR};
150 
151 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT};
152 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT};
153 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM!  SPLIT IT APART INTO TWO */
154 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8};
155 
156 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl);
157 
158 void send_granted(struct file_lock *fl, int opcode);
159 void siglock(void);
160 void sigunlock(void);
161 void monitor_lock_host(const char *hostname);
162 void unmonitor_lock_host(char *hostname);
163 
164 void	copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src,
165     const bool_t exclusive, struct nlm4_holder *dest);
166 struct file_lock *	allocate_file_lock(const netobj *lockowner,
167 					   const netobj *matchcookie,
168 					   const struct sockaddr *addr,
169 					   const char *caller_name);
170 void	deallocate_file_lock(struct file_lock *fl);
171 void	fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
172 		       const bool_t exclusive, const int32_t svid,
173     const u_int64_t offset, const u_int64_t len,
174     const int state, const int status, const int flags, const int blocking);
175 int	regions_overlap(const u_int64_t start1, const u_int64_t len1,
176     const u_int64_t start2, const u_int64_t len2);
177 enum split_status  region_compare(const u_int64_t starte, const u_int64_t lene,
178     const u_int64_t startu, const u_int64_t lenu,
179     u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2);
180 int	same_netobj(const netobj *n0, const netobj *n1);
181 int	same_filelock_identity(const struct file_lock *fl0,
182     const struct file_lock *fl2);
183 
184 static void debuglog(char const *fmt, ...);
185 void dump_static_object(const unsigned char* object, const int sizeof_object,
186                         unsigned char* hbuff, const int sizeof_hbuff,
187                         unsigned char* cbuff, const int sizeof_cbuff);
188 void dump_netobj(const struct netobj *nobj);
189 void dump_filelock(const struct file_lock *fl);
190 struct file_lock *	get_lock_matching_unlock(const struct file_lock *fl);
191 enum nfslock_status	test_nfslock(const struct file_lock *fl,
192     struct file_lock **conflicting_fl);
193 enum nfslock_status	lock_nfslock(struct file_lock *fl);
194 enum nfslock_status	delete_nfslock(struct file_lock *fl);
195 enum nfslock_status	unlock_nfslock(const struct file_lock *fl,
196     struct file_lock **released_lock, struct file_lock **left_lock,
197     struct file_lock **right_lock);
198 enum hwlock_status lock_hwlock(struct file_lock *fl);
199 enum split_status split_nfslock(const struct file_lock *exist_lock,
200     const struct file_lock *unlock_lock, struct file_lock **left_lock,
201     struct file_lock **right_lock);
202 int	duplicate_block(struct file_lock *fl);
203 void	add_blockingfilelock(struct file_lock *fl);
204 enum hwlock_status	unlock_hwlock(const struct file_lock *fl);
205 enum hwlock_status	test_hwlock(const struct file_lock *fl,
206     struct file_lock **conflicting_fl);
207 void	remove_blockingfilelock(struct file_lock *fl);
208 void	clear_blockingfilelock(const char *hostname);
209 void	retry_blockingfilelocklist(void);
210 enum partialfilelock_status	unlock_partialfilelock(
211     const struct file_lock *fl);
212 void	clear_partialfilelock(const char *hostname);
213 enum partialfilelock_status	test_partialfilelock(
214     const struct file_lock *fl, struct file_lock **conflicting_fl);
215 enum nlm_stats	do_test(struct file_lock *fl,
216     struct file_lock **conflicting_fl);
217 enum nlm_stats	do_unlock(struct file_lock *fl);
218 enum nlm_stats	do_lock(struct file_lock *fl);
219 void	do_clear(const char *hostname);
220 
221 void
222 debuglog(char const *fmt, ...)
223 {
224 	va_list ap;
225 
226 	if (debug_level < 1) {
227 		return;
228 	}
229 
230 	sleep(debugdelay);
231 
232 	va_start(ap, fmt);
233 	vsyslog(LOG_DEBUG, fmt, ap);
234 	va_end(ap);
235 }
236 
237 void
238 dump_static_object(const unsigned char *object, const int size_object,
239     unsigned char *hbuff, const int size_hbuff, unsigned char *cbuff,
240     const int size_cbuff)
241 {
242 	int i, objectsize;
243 
244 	if (debug_level < 2) {
245 		return;
246 	}
247 
248 	objectsize = size_object;
249 
250 	if (objectsize == 0) {
251 		debuglog("object is size 0\n");
252 	} else {
253 		if (objectsize > MAXOBJECTSIZE) {
254 			debuglog("Object of size %d being clamped"
255 			    "to size %d\n", objectsize, MAXOBJECTSIZE);
256 			objectsize = MAXOBJECTSIZE;
257 		}
258 
259 		if (hbuff != NULL) {
260 			if (size_hbuff < objectsize*2+1) {
261 				debuglog("Hbuff not large enough."
262 				    "  Increase size\n");
263 			} else {
264 				for(i=0;i<objectsize;i++) {
265 					sprintf(hbuff+i*2,"%02x",*(object+i));
266 				}
267 				*(hbuff+i*2) = '\0';
268 			}
269 		}
270 
271 		if (cbuff != NULL) {
272 			if (size_cbuff < objectsize+1) {
273 				debuglog("Cbuff not large enough."
274 				    "  Increase Size\n");
275 			}
276 
277 			for(i=0;i<objectsize;i++) {
278 				if (*(object+i) >= 32 && *(object+i) <= 127) {
279 					*(cbuff+i) = *(object+i);
280 				} else {
281 					*(cbuff+i) = '.';
282 				}
283 			}
284 			*(cbuff+i) = '\0';
285 		}
286 	}
287 }
288 
289 void
290 dump_netobj(const struct netobj *nobj)
291 {
292 	char hbuff[MAXBUFFERSIZE*2];
293 	char cbuff[MAXBUFFERSIZE];
294 
295 	if (debug_level < 2) {
296 		return;
297 	}
298 
299 	if (nobj == NULL) {
300 		debuglog("Null netobj pointer\n");
301 	}
302 	else if (nobj->n_len == 0) {
303 		debuglog("Size zero netobj\n");
304 	} else {
305 		dump_static_object(nobj->n_bytes, nobj->n_len,
306 		    hbuff, sizeof(hbuff), cbuff, sizeof(cbuff));
307 		debuglog("netobj: len: %d  data: %s :::  %s\n",
308 		    nobj->n_len, hbuff, cbuff);
309 	}
310 }
311 
312 /* #define DUMP_FILELOCK_VERBOSE */
313 void
314 dump_filelock(const struct file_lock *fl)
315 {
316 #ifdef DUMP_FILELOCK_VERBOSE
317 	char hbuff[MAXBUFFERSIZE*2];
318 	char cbuff[MAXBUFFERSIZE];
319 #endif
320 
321 	if (debug_level < 2) {
322 		return;
323 	}
324 
325 	if (fl != NULL) {
326 		debuglog("Dumping file lock structure @ %p\n", fl);
327 
328 #ifdef DUMP_FILELOCK_VERBOSE
329 		dump_static_object((unsigned char *)&fl->filehandle,
330 		    sizeof(fl->filehandle), hbuff, sizeof(hbuff),
331 		    cbuff, sizeof(cbuff));
332 		debuglog("Filehandle: %8s  :::  %8s\n", hbuff, cbuff);
333 #endif
334 
335 		debuglog("Dumping nlm4_holder:\n"
336 		    "exc: %x  svid: %x  offset:len %llx:%llx\n",
337 		    fl->client.exclusive, fl->client.svid,
338 		    fl->client.l_offset, fl->client.l_len);
339 
340 #ifdef DUMP_FILELOCK_VERBOSE
341 		debuglog("Dumping client identity:\n");
342 		dump_netobj(&fl->client.oh);
343 
344 		debuglog("Dumping client cookie:\n");
345 		dump_netobj(&fl->client_cookie);
346 
347 		debuglog("nsm: %d  status: %d  flags: %d  svid: %x"
348 		    "  client_name: %s\n", fl->nsm_status, fl->status,
349 		    fl->flags, fl->client.svid, fl->client_name);
350 #endif
351 	} else {
352 		debuglog("NULL file lock structure\n");
353 	}
354 }
355 
356 void
357 copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src,
358     const bool_t exclusive, struct nlm4_holder *dest)
359 {
360 
361 	dest->exclusive = exclusive;
362 	dest->oh.n_len = src->oh.n_len;
363 	dest->oh.n_bytes = src->oh.n_bytes;
364 	dest->svid = src->svid;
365 	dest->l_offset = src->l_offset;
366 	dest->l_len = src->l_len;
367 }
368 
369 /*
370  * allocate_file_lock: Create a lock with the given parameters
371  */
372 
373 struct file_lock *
374 allocate_file_lock(const netobj *lockowner, const netobj *matchcookie,
375 		   const struct sockaddr *addr, const char *caller_name)
376 {
377 	struct file_lock *newfl;
378 	size_t n;
379 
380 	/* Beware of rubbish input! */
381 	n = strnlen(caller_name, SM_MAXSTRLEN);
382 	if (n == SM_MAXSTRLEN) {
383 		return NULL;
384 	}
385 
386 	newfl = malloc(sizeof(*newfl) - sizeof(newfl->client_name) + n + 1);
387 	if (newfl == NULL) {
388 		return NULL;
389 	}
390 	bzero(newfl, sizeof(*newfl) - sizeof(newfl->client_name));
391 	memcpy(newfl->client_name, caller_name, n);
392 	newfl->client_name[n] = 0;
393 
394 	newfl->client.oh.n_bytes = malloc(lockowner->n_len);
395 	if (newfl->client.oh.n_bytes == NULL) {
396 		free(newfl);
397 		return NULL;
398 	}
399 	newfl->client.oh.n_len = lockowner->n_len;
400 	bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len);
401 
402 	newfl->client_cookie.n_bytes = malloc(matchcookie->n_len);
403 	if (newfl->client_cookie.n_bytes == NULL) {
404 		free(newfl->client.oh.n_bytes);
405 		free(newfl);
406 		return NULL;
407 	}
408 	newfl->client_cookie.n_len = matchcookie->n_len;
409 	bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len);
410 
411 	newfl->addr = malloc(addr->sa_len);
412 	if (newfl->addr == NULL) {
413 		free(newfl->client_cookie.n_bytes);
414 		free(newfl->client.oh.n_bytes);
415 		free(newfl);
416 		return NULL;
417 	}
418 	memcpy(newfl->addr, addr, addr->sa_len);
419 
420 	return newfl;
421 }
422 
423 /*
424  * file_file_lock: Force creation of a valid file lock
425  */
426 void
427 fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
428     const bool_t exclusive, const int32_t svid,
429     const u_int64_t offset, const u_int64_t len,
430     const int state, const int status, const int flags, const int blocking)
431 {
432 	bcopy(fh, &fl->filehandle, sizeof(fhandle_t));
433 
434 	fl->client.exclusive = exclusive;
435 	fl->client.svid = svid;
436 	fl->client.l_offset = offset;
437 	fl->client.l_len = len;
438 
439 	fl->nsm_status = state;
440 	fl->status = status;
441 	fl->flags = flags;
442 	fl->blocking = blocking;
443 }
444 
445 /*
446  * deallocate_file_lock: Free all storage associated with a file lock
447  */
448 void
449 deallocate_file_lock(struct file_lock *fl)
450 {
451 	free(fl->addr);
452 	free(fl->client.oh.n_bytes);
453 	free(fl->client_cookie.n_bytes);
454 	free(fl);
455 }
456 
457 /*
458  * regions_overlap(): This function examines the two provided regions for
459  * overlap.
460  */
461 int
462 regions_overlap(const u_int64_t start1, const u_int64_t len1,
463     const u_int64_t start2, const u_int64_t len2)
464 {
465 	u_int64_t d1,d2,d3,d4;
466 	enum split_status result;
467 
468 	debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n",
469 		 start1, len1, start2, len2);
470 
471 	result = region_compare(start1, len1, start2, len2,
472 	    &d1, &d2, &d3, &d4);
473 
474 	debuglog("Exiting region overlap with val: %d\n",result);
475 
476 	if (result == SPL_DISJOINT) {
477 		return 0;
478 	} else {
479 		return 1;
480 	}
481 }
482 
483 /*
484  * region_compare(): Examine lock regions and split appropriately
485  *
486  * XXX: Fix 64 bit overflow problems
487  * XXX: Check to make sure I got *ALL* the cases.
488  * XXX: This DESPERATELY needs a regression test.
489  */
490 enum split_status
491 region_compare(const u_int64_t starte, const u_int64_t lene,
492     const u_int64_t startu, const u_int64_t lenu, u_int64_t *start1,
493     u_int64_t *len1, u_int64_t *start2, u_int64_t *len2)
494 {
495 	/*
496 	 * Please pay attention to the sequential exclusions
497 	 * of the if statements!!!
498 	 */
499 	enum LFLAGS lflags;
500 	enum RFLAGS rflags;
501 	enum split_status retval;
502 
503 	retval = SPL_DISJOINT;
504 
505 	if (lene == 0 && lenu == 0) {
506 		/* Examine left edge of locker */
507 		lflags = LEDGE_INSIDE;
508 		if (startu < starte) {
509 			lflags = LEDGE_LEFT;
510 		} else if (startu == starte) {
511 			lflags = LEDGE_LBOUNDARY;
512 		}
513 
514 		rflags = REDGE_RBOUNDARY; /* Both are infiinite */
515 
516 		if (lflags == LEDGE_INSIDE) {
517 			*start1 = starte;
518 			*len1 = startu - starte;
519 		}
520 
521 		if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) {
522 			retval = SPL_CONTAINED;
523 		} else {
524 			retval = SPL_LOCK1;
525 		}
526 	} else if (lene == 0 && lenu != 0) {
527 		/* Established lock is infinite */
528 		/* Examine left edge of unlocker */
529 		lflags = LEDGE_INSIDE;
530 		if (startu < starte) {
531 			lflags = LEDGE_LEFT;
532 		} else if (startu == starte) {
533 			lflags = LEDGE_LBOUNDARY;
534 		}
535 
536 		/* Examine right edge of unlocker */
537 		if (startu + lenu < starte) {
538 			/* Right edge of unlocker left of established lock */
539 			rflags = REDGE_LEFT;
540 			return SPL_DISJOINT;
541 		} else if (startu + lenu == starte) {
542 			/* Right edge of unlocker on start of established lock */
543 			rflags = REDGE_LBOUNDARY;
544 			return SPL_DISJOINT;
545 		} else { /* Infinifty is right of finity */
546 			/* Right edge of unlocker inside established lock */
547 			rflags = REDGE_INSIDE;
548 		}
549 
550 		if (lflags == LEDGE_INSIDE) {
551 			*start1 = starte;
552 			*len1 = startu - starte;
553 			retval |= SPL_LOCK1;
554 		}
555 
556 		if (rflags == REDGE_INSIDE) {
557 			/* Create right lock */
558 			*start2 = startu+lenu;
559 			*len2 = 0;
560 			retval |= SPL_LOCK2;
561 		}
562 	} else if (lene != 0 && lenu == 0) {
563 		/* Unlocker is infinite */
564 		/* Examine left edge of unlocker */
565 		lflags = LEDGE_RIGHT;
566 		if (startu < starte) {
567 			lflags = LEDGE_LEFT;
568 			retval = SPL_CONTAINED;
569 			return retval;
570 		} else if (startu == starte) {
571 			lflags = LEDGE_LBOUNDARY;
572 			retval = SPL_CONTAINED;
573 			return retval;
574 		} else if ((startu > starte) && (startu < starte + lene - 1)) {
575 			lflags = LEDGE_INSIDE;
576 		} else if (startu == starte + lene - 1) {
577 			lflags = LEDGE_RBOUNDARY;
578 		} else { /* startu > starte + lene -1 */
579 			lflags = LEDGE_RIGHT;
580 			return SPL_DISJOINT;
581 		}
582 
583 		rflags = REDGE_RIGHT; /* Infinity is right of finity */
584 
585 		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
586 			*start1 = starte;
587 			*len1 = startu - starte;
588 			retval |= SPL_LOCK1;
589 			return retval;
590 		}
591 	} else {
592 		/* Both locks are finite */
593 
594 		/* Examine left edge of unlocker */
595 		lflags = LEDGE_RIGHT;
596 		if (startu < starte) {
597 			lflags = LEDGE_LEFT;
598 		} else if (startu == starte) {
599 			lflags = LEDGE_LBOUNDARY;
600 		} else if ((startu > starte) && (startu < starte + lene - 1)) {
601 			lflags = LEDGE_INSIDE;
602 		} else if (startu == starte + lene - 1) {
603 			lflags = LEDGE_RBOUNDARY;
604 		} else { /* startu > starte + lene -1 */
605 			lflags = LEDGE_RIGHT;
606 			return SPL_DISJOINT;
607 		}
608 
609 		/* Examine right edge of unlocker */
610 		if (startu + lenu < starte) {
611 			/* Right edge of unlocker left of established lock */
612 			rflags = REDGE_LEFT;
613 			return SPL_DISJOINT;
614 		} else if (startu + lenu == starte) {
615 			/* Right edge of unlocker on start of established lock */
616 			rflags = REDGE_LBOUNDARY;
617 			return SPL_DISJOINT;
618 		} else if (startu + lenu < starte + lene) {
619 			/* Right edge of unlocker inside established lock */
620 			rflags = REDGE_INSIDE;
621 		} else if (startu + lenu == starte + lene) {
622 			/* Right edge of unlocker on right edge of established lock */
623 			rflags = REDGE_RBOUNDARY;
624 		} else { /* startu + lenu > starte + lene */
625 			/* Right edge of unlocker is right of established lock */
626 			rflags = REDGE_RIGHT;
627 		}
628 
629 		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
630 			/* Create left lock */
631 			*start1 = starte;
632 			*len1 = (startu - starte);
633 			retval |= SPL_LOCK1;
634 		}
635 
636 		if (rflags == REDGE_INSIDE) {
637 			/* Create right lock */
638 			*start2 = startu+lenu;
639 			*len2 = starte+lene-(startu+lenu);
640 			retval |= SPL_LOCK2;
641 		}
642 
643 		if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) &&
644 		    (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) {
645 			retval = SPL_CONTAINED;
646 		}
647 	}
648 	return retval;
649 }
650 
651 /*
652  * same_netobj: Compares the apprpriate bits of a netobj for identity
653  */
654 int
655 same_netobj(const netobj *n0, const netobj *n1)
656 {
657 	int retval;
658 
659 	retval = 0;
660 
661 	debuglog("Entering netobj identity check\n");
662 
663 	if (n0->n_len == n1->n_len) {
664 		debuglog("Preliminary length check passed\n");
665 		retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len);
666 		debuglog("netobj %smatch\n", retval ? "" : "mis");
667 	}
668 
669 	return (retval);
670 }
671 
672 /*
673  * same_filelock_identity: Compares the appropriate bits of a file_lock
674  */
675 int
676 same_filelock_identity(const struct file_lock *fl0, const struct file_lock *fl1)
677 {
678 	int retval;
679 
680 	retval = 0;
681 
682 	debuglog("Checking filelock identity\n");
683 
684 	/*
685 	 * Check process ids and host information.
686 	 */
687 	retval = (fl0->client.svid == fl1->client.svid &&
688 	    same_netobj(&(fl0->client.oh), &(fl1->client.oh)));
689 
690 	debuglog("Exiting checking filelock identity: retval: %d\n",retval);
691 
692 	return (retval);
693 }
694 
695 /*
696  * Below here are routines associated with manipulating the NFS
697  * lock list.
698  */
699 
700 /*
701  * get_lock_matching_unlock: Return a lock which matches the given unlock lock
702  *                           or NULL otehrwise
703  * XXX: It is a shame that this duplicates so much code from test_nfslock.
704  */
705 struct file_lock *
706 get_lock_matching_unlock(const struct file_lock *fl)
707 {
708 	struct file_lock *ifl; /* Iterator */
709 
710 	debuglog("Entering get_lock_matching_unlock\n");
711 	debuglog("********Dump of fl*****************\n");
712 	dump_filelock(fl);
713 
714 	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
715 		debuglog("Pointer to file lock: %p\n",ifl);
716 
717 		debuglog("****Dump of ifl****\n");
718 		dump_filelock(ifl);
719 		debuglog("*******************\n");
720 
721 		/*
722 		 * XXX: It is conceivable that someone could use the NLM RPC
723 		 * system to directly access filehandles.  This may be a
724 		 * security hazard as the filehandle code may bypass normal
725 		 * file access controls
726 		 */
727 		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
728 			continue;
729 
730 		debuglog("get_lock_matching_unlock: Filehandles match, "
731 		    "checking regions\n");
732 
733 		/* Filehandles match, check for region overlap */
734 		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
735 			ifl->client.l_offset, ifl->client.l_len))
736 			continue;
737 
738 		debuglog("get_lock_matching_unlock: Region overlap"
739 		    " found %llu : %llu -- %llu : %llu\n",
740 		    fl->client.l_offset,fl->client.l_len,
741 		    ifl->client.l_offset,ifl->client.l_len);
742 
743 		/* Regions overlap, check the identity */
744 		if (!same_filelock_identity(fl,ifl))
745 			continue;
746 
747 		debuglog("get_lock_matching_unlock: Duplicate lock id.  Granting\n");
748 		return (ifl);
749 	}
750 
751 	debuglog("Exiting bet_lock_matching_unlock\n");
752 
753 	return (NULL);
754 }
755 
756 /*
757  * test_nfslock: check for NFS lock in lock list
758  *
759  * This routine makes the following assumptions:
760  *    1) Nothing will adjust the lock list during a lookup
761  *
762  * This routine has an interesting quirk which bit me hard.
763  * The conflicting_fl is the pointer to the conflicting lock.
764  * However, to modify the "*pointer* to the conflicting lock" rather
765  * that the "conflicting lock itself" one must pass in a "pointer to
766  * the pointer of the conflicting lock".  Gross.
767  */
768 
769 enum nfslock_status
770 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl)
771 {
772 	struct file_lock *ifl; /* Iterator */
773 	enum nfslock_status retval;
774 
775 	debuglog("Entering test_nfslock\n");
776 
777 	retval = NFS_GRANTED;
778 	(*conflicting_fl) = NULL;
779 
780 	debuglog("Entering lock search loop\n");
781 
782 	debuglog("***********************************\n");
783 	debuglog("Dumping match filelock\n");
784 	debuglog("***********************************\n");
785 	dump_filelock(fl);
786 	debuglog("***********************************\n");
787 
788 	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
789 		if (retval == NFS_DENIED)
790 			break;
791 
792 		debuglog("Top of lock loop\n");
793 		debuglog("Pointer to file lock: %p\n",ifl);
794 
795 		debuglog("***********************************\n");
796 		debuglog("Dumping test filelock\n");
797 		debuglog("***********************************\n");
798 		dump_filelock(ifl);
799 		debuglog("***********************************\n");
800 
801 		/*
802 		 * XXX: It is conceivable that someone could use the NLM RPC
803 		 * system to directly access filehandles.  This may be a
804 		 * security hazard as the filehandle code may bypass normal
805 		 * file access controls
806 		 */
807 		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
808 			continue;
809 
810 		debuglog("test_nfslock: filehandle match found\n");
811 
812 		/* Filehandles match, check for region overlap */
813 		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
814 			ifl->client.l_offset, ifl->client.l_len))
815 			continue;
816 
817 		debuglog("test_nfslock: Region overlap found"
818 		    " %llu : %llu -- %llu : %llu\n",
819 		    fl->client.l_offset,fl->client.l_len,
820 		    ifl->client.l_offset,ifl->client.l_len);
821 
822 		/* Regions overlap, check the exclusivity */
823 		if (!(fl->client.exclusive || ifl->client.exclusive))
824 			continue;
825 
826 		debuglog("test_nfslock: Exclusivity failure: %d %d\n",
827 		    fl->client.exclusive,
828 		    ifl->client.exclusive);
829 
830 		if (same_filelock_identity(fl,ifl)) {
831 			debuglog("test_nfslock: Duplicate id.  Granting\n");
832 			(*conflicting_fl) = ifl;
833 			retval = NFS_GRANTED_DUPLICATE;
834 		} else {
835 			/* locking attempt fails */
836 			debuglog("test_nfslock: Lock attempt failed\n");
837 			debuglog("Desired lock\n");
838 			dump_filelock(fl);
839 			debuglog("Conflicting lock\n");
840 			dump_filelock(ifl);
841 			(*conflicting_fl) = ifl;
842 			retval = NFS_DENIED;
843 		}
844 	}
845 
846 	debuglog("Dumping file locks\n");
847 	debuglog("Exiting test_nfslock\n");
848 
849 	return (retval);
850 }
851 
852 /*
853  * lock_nfslock: attempt to create a lock in the NFS lock list
854  *
855  * This routine tests whether the lock will be granted and then adds
856  * the entry to the lock list if so.
857  *
858  * Argument fl gets modified as its list housekeeping entries get modified
859  * upon insertion into the NFS lock list
860  *
861  * This routine makes several assumptions:
862  *    1) It is perfectly happy to grant a duplicate lock from the same pid.
863  *       While this seems to be intuitively wrong, it is required for proper
864  *       Posix semantics during unlock.  It is absolutely imperative to not
865  *       unlock the main lock before the two child locks are established. Thus,
866  *       one has to be able to create duplicate locks over an existing lock
867  *    2) It currently accepts duplicate locks from the same id,pid
868  */
869 
870 enum nfslock_status
871 lock_nfslock(struct file_lock *fl)
872 {
873 	enum nfslock_status retval;
874 	struct file_lock *dummy_fl;
875 
876 	dummy_fl = NULL;
877 
878 	debuglog("Entering lock_nfslock...\n");
879 
880 	retval = test_nfslock(fl,&dummy_fl);
881 
882 	if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) {
883 		debuglog("Inserting lock...\n");
884 		dump_filelock(fl);
885 		LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist);
886 	}
887 
888 	debuglog("Exiting lock_nfslock...\n");
889 
890 	return (retval);
891 }
892 
893 /*
894  * delete_nfslock: delete an NFS lock list entry
895  *
896  * This routine is used to delete a lock out of the NFS lock list
897  * without regard to status, underlying locks, regions or anything else
898  *
899  * Note that this routine *does not deallocate memory* of the lock.
900  * It just disconnects it from the list.  The lock can then be used
901  * by other routines without fear of trashing the list.
902  */
903 
904 enum nfslock_status
905 delete_nfslock(struct file_lock *fl)
906 {
907 
908 	LIST_REMOVE(fl, nfslocklist);
909 
910 	return (NFS_GRANTED);
911 }
912 
913 enum split_status
914 split_nfslock(const struct file_lock *exist_lock,
915     const struct file_lock *unlock_lock, struct file_lock **left_lock,
916     struct file_lock **right_lock)
917 {
918 	u_int64_t start1, len1, start2, len2;
919 	enum split_status spstatus;
920 
921 	spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len,
922 	    unlock_lock->client.l_offset, unlock_lock->client.l_len,
923 	    &start1, &len1, &start2, &len2);
924 
925 	if ((spstatus & SPL_LOCK1) != 0) {
926 		*left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name);
927 		if (*left_lock == NULL) {
928 			debuglog("Unable to allocate resource for split 1\n");
929 			return SPL_RESERR;
930 		}
931 
932 		fill_file_lock(*left_lock, &exist_lock->filehandle,
933 		    exist_lock->client.exclusive, exist_lock->client.svid,
934 		    start1, len1,
935 		    exist_lock->nsm_status,
936 		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
937 	}
938 
939 	if ((spstatus & SPL_LOCK2) != 0) {
940 		*right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name);
941 		if (*right_lock == NULL) {
942 			debuglog("Unable to allocate resource for split 1\n");
943 			if (*left_lock != NULL) {
944 				deallocate_file_lock(*left_lock);
945 			}
946 			return SPL_RESERR;
947 		}
948 
949 		fill_file_lock(*right_lock, &exist_lock->filehandle,
950 		    exist_lock->client.exclusive, exist_lock->client.svid,
951 		    start2, len2,
952 		    exist_lock->nsm_status,
953 		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
954 	}
955 
956 	return spstatus;
957 }
958 
959 enum nfslock_status
960 unlock_nfslock(const struct file_lock *fl, struct file_lock **released_lock,
961     struct file_lock **left_lock, struct file_lock **right_lock)
962 {
963 	struct file_lock *mfl; /* Matching file lock */
964 	enum nfslock_status retval;
965 	enum split_status spstatus;
966 
967 	debuglog("Entering unlock_nfslock\n");
968 
969 	*released_lock = NULL;
970 	*left_lock = NULL;
971 	*right_lock = NULL;
972 
973 	retval = NFS_DENIED_NOLOCK;
974 
975 	debuglog("Attempting to match lock...\n");
976 	mfl = get_lock_matching_unlock(fl);
977 
978 	if (mfl != NULL) {
979 		debuglog("Unlock matched.  Querying for split\n");
980 
981 		spstatus = split_nfslock(mfl, fl, left_lock, right_lock);
982 
983 		debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock);
984 		debuglog("********Split dumps********");
985 		dump_filelock(mfl);
986 		dump_filelock(fl);
987 		dump_filelock(*left_lock);
988 		dump_filelock(*right_lock);
989 		debuglog("********End Split dumps********");
990 
991 		if (spstatus == SPL_RESERR) {
992 			if (*left_lock != NULL) {
993 				deallocate_file_lock(*left_lock);
994 				*left_lock = NULL;
995 			}
996 
997 			if (*right_lock != NULL) {
998 				deallocate_file_lock(*right_lock);
999 				*right_lock = NULL;
1000 			}
1001 
1002 			return NFS_RESERR;
1003 		}
1004 
1005 		/* Insert new locks from split if required */
1006 		if (*left_lock != NULL) {
1007 			debuglog("Split left activated\n");
1008 			LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist);
1009 		}
1010 
1011 		if (*right_lock != NULL) {
1012 			debuglog("Split right activated\n");
1013 			LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist);
1014 		}
1015 
1016 		/* Unlock the lock since it matches identity */
1017 		LIST_REMOVE(mfl, nfslocklist);
1018 		*released_lock = mfl;
1019 		retval = NFS_GRANTED;
1020 	}
1021 
1022 	debuglog("Exiting unlock_nfslock\n");
1023 
1024 	return retval;
1025 }
1026 
1027 /*
1028  * Below here are the routines for manipulating the file lock directly
1029  * on the disk hardware itself
1030  */
1031 enum hwlock_status
1032 lock_hwlock(struct file_lock *fl)
1033 {
1034 	struct monfile *imf,*nmf;
1035 	int lflags, flerror;
1036 
1037 	/* Scan to see if filehandle already present */
1038 	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1039 		if (bcmp(&fl->filehandle, &imf->filehandle,
1040 			sizeof(fl->filehandle)) == 0) {
1041 			/* imf is the correct filehandle */
1042 			break;
1043 		}
1044 	}
1045 
1046 	/*
1047 	 * Filehandle already exists (we control the file)
1048 	 * *AND* NFS has already cleared the lock for availability
1049 	 * Grant it and bump the refcount.
1050 	 */
1051 	if (imf != NULL) {
1052 		++(imf->refcount);
1053 		return (HW_GRANTED);
1054 	}
1055 
1056 	/* No filehandle found, create and go */
1057 	nmf = malloc(sizeof(struct monfile));
1058 	if (nmf == NULL) {
1059 		debuglog("hwlock resource allocation failure\n");
1060 		return (HW_RESERR);
1061 	}
1062 
1063 	/* XXX: Is O_RDWR always the correct mode? */
1064 	nmf->fd = fhopen(&fl->filehandle, O_RDWR);
1065 	if (nmf->fd < 0) {
1066 		debuglog("fhopen failed (from %16s): %32s\n",
1067 		    fl->client_name, strerror(errno));
1068 		free(nmf);
1069 		switch (errno) {
1070 		case ESTALE:
1071 			return (HW_STALEFH);
1072 		case EROFS:
1073 			return (HW_READONLY);
1074 		default:
1075 			return (HW_RESERR);
1076 		}
1077 	}
1078 
1079 	/* File opened correctly, fill the monitor struct */
1080 	bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle));
1081 	nmf->refcount = 1;
1082 	nmf->exclusive = fl->client.exclusive;
1083 
1084 	lflags = (nmf->exclusive == 1) ?
1085 	    (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB);
1086 
1087 	flerror = flock(nmf->fd, lflags);
1088 
1089 	if (flerror != 0) {
1090 		debuglog("flock failed (from %16s): %32s\n",
1091 		    fl->client_name, strerror(errno));
1092 		close(nmf->fd);
1093 		free(nmf);
1094 		switch (errno) {
1095 		case EAGAIN:
1096 			return (HW_DENIED);
1097 		case ESTALE:
1098 			return (HW_STALEFH);
1099 		case EROFS:
1100 			return (HW_READONLY);
1101 		default:
1102 			return (HW_RESERR);
1103 			break;
1104 		}
1105 	}
1106 
1107 	/* File opened and locked */
1108 	LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist);
1109 
1110 	debuglog("flock succeeded (from %16s)\n", fl->client_name);
1111 	return (HW_GRANTED);
1112 }
1113 
1114 enum hwlock_status
1115 unlock_hwlock(const struct file_lock *fl)
1116 {
1117 	struct monfile *imf;
1118 
1119 	debuglog("Entering unlock_hwlock\n");
1120 	debuglog("Entering loop interation\n");
1121 
1122 	/* Scan to see if filehandle already present */
1123 	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1124 		if (bcmp(&fl->filehandle, &imf->filehandle,
1125 			sizeof(fl->filehandle)) == 0) {
1126 			/* imf is the correct filehandle */
1127 			break;
1128 		}
1129 	}
1130 
1131 	debuglog("Completed iteration.  Proceeding\n");
1132 
1133 	if (imf == NULL) {
1134 		/* No lock found */
1135 		debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n");
1136 		return (HW_DENIED_NOLOCK);
1137 	}
1138 
1139 	/* Lock found */
1140 	--imf->refcount;
1141 
1142 	if (imf->refcount < 0) {
1143 		debuglog("Negative hardware reference count\n");
1144 	}
1145 
1146 	if (imf->refcount <= 0) {
1147 		close(imf->fd);
1148 		LIST_REMOVE(imf, monfilelist);
1149 		free(imf);
1150 	}
1151 	debuglog("Exiting unlock_hwlock (HW_GRANTED)\n");
1152 	return (HW_GRANTED);
1153 }
1154 
1155 enum hwlock_status
1156 test_hwlock(const struct file_lock *fl __unused,
1157     struct file_lock **conflicting_fl __unused)
1158 {
1159 
1160 	/*
1161 	 * XXX: lock tests on hardware are not required until
1162 	 * true partial file testing is done on the underlying file
1163 	 */
1164 	return (HW_RESERR);
1165 }
1166 
1167 
1168 
1169 /*
1170  * Below here are routines for manipulating blocked lock requests
1171  * They should only be called from the XXX_partialfilelock routines
1172  * if at all possible
1173  */
1174 
1175 int
1176 duplicate_block(struct file_lock *fl)
1177 {
1178 	struct file_lock *ifl;
1179 	int retval = 0;
1180 
1181 	debuglog("Entering duplicate_block");
1182 
1183 	/*
1184 	 * Is this lock request already on the blocking list?
1185 	 * Consider it a dupe if the file handles, offset, length,
1186 	 * exclusivity and client match.
1187 	 */
1188 	LIST_FOREACH(ifl, &blockedlocklist_head, nfslocklist) {
1189 		if (!bcmp(&fl->filehandle, &ifl->filehandle,
1190 			sizeof(fhandle_t)) &&
1191 		    fl->client.exclusive == ifl->client.exclusive &&
1192 		    fl->client.l_offset == ifl->client.l_offset &&
1193 		    fl->client.l_len == ifl->client.l_len &&
1194 		    same_filelock_identity(fl, ifl)) {
1195 			retval = 1;
1196 			break;
1197 		}
1198 	}
1199 
1200 	debuglog("Exiting duplicate_block: %s\n", retval ? "already blocked"
1201 	    : "not already blocked");
1202 	return retval;
1203 }
1204 
1205 void
1206 add_blockingfilelock(struct file_lock *fl)
1207 {
1208 	debuglog("Entering add_blockingfilelock\n");
1209 
1210 	/*
1211 	 * A blocking lock request _should_ never be duplicated as a client
1212 	 * that is already blocked shouldn't be able to request another
1213 	 * lock. Alas, there are some buggy clients that do request the same
1214 	 * lock repeatedly. Make sure only unique locks are on the blocked
1215 	 * lock list.
1216 	 */
1217 	if (duplicate_block(fl)) {
1218 		debuglog("Exiting add_blockingfilelock: already blocked\n");
1219 		return;
1220 	}
1221 
1222 	/*
1223 	 * Clear the blocking flag so that it can be reused without
1224 	 * adding it to the blocking queue a second time
1225 	 */
1226 
1227 	fl->blocking = 0;
1228 	LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist);
1229 
1230 	debuglog("Exiting add_blockingfilelock: added blocked lock\n");
1231 }
1232 
1233 void
1234 remove_blockingfilelock(struct file_lock *fl)
1235 {
1236 
1237 	debuglog("Entering remove_blockingfilelock\n");
1238 
1239 	LIST_REMOVE(fl, nfslocklist);
1240 
1241 	debuglog("Exiting remove_blockingfilelock\n");
1242 }
1243 
1244 void
1245 clear_blockingfilelock(const char *hostname)
1246 {
1247 	struct file_lock *ifl,*nfl;
1248 
1249 	/*
1250 	 * Normally, LIST_FOREACH is called for, but since
1251 	 * the current element *is* the iterator, deleting it
1252 	 * would mess up the iteration.  Thus, a next element
1253 	 * must be used explicitly
1254 	 */
1255 
1256 	ifl = LIST_FIRST(&blockedlocklist_head);
1257 
1258 	while (ifl != NULL) {
1259 		nfl = LIST_NEXT(ifl, nfslocklist);
1260 
1261 		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1262 			remove_blockingfilelock(ifl);
1263 			deallocate_file_lock(ifl);
1264 		}
1265 
1266 		ifl = nfl;
1267 	}
1268 }
1269 
1270 void
1271 retry_blockingfilelocklist(void)
1272 {
1273 	/* Retry all locks in the blocked list */
1274 	struct file_lock *ifl, *nfl; /* Iterator */
1275 	enum partialfilelock_status pflstatus;
1276 
1277 	debuglog("Entering retry_blockingfilelocklist\n");
1278 
1279 	LIST_FOREACH_SAFE(ifl, &blockedlocklist_head, nfslocklist, nfl) {
1280 		debuglog("Iterator choice %p\n",ifl);
1281 		debuglog("Next iterator choice %p\n",nfl);
1282 
1283 		/*
1284 		 * SUBTLE BUG: The file_lock must be removed from the
1285 		 * old list so that it's list pointers get disconnected
1286 		 * before being allowed to participate in the new list
1287 		 * which will automatically add it in if necessary.
1288 		 */
1289 
1290 		LIST_REMOVE(ifl, nfslocklist);
1291 		pflstatus = lock_partialfilelock(ifl);
1292 
1293 		if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1294 			debuglog("Granted blocked lock\n");
1295 			/* lock granted and is now being used */
1296 			send_granted(ifl,0);
1297 		} else {
1298 			/* Reinsert lock back into blocked list */
1299 			debuglog("Replacing blocked lock\n");
1300 			LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist);
1301 		}
1302 	}
1303 
1304 	debuglog("Exiting retry_blockingfilelocklist\n");
1305 }
1306 
1307 /*
1308  * Below here are routines associated with manipulating all
1309  * aspects of the partial file locking system (list, hardware, etc.)
1310  */
1311 
1312 /*
1313  * Please note that lock monitoring must be done at this level which
1314  * keeps track of *individual* lock requests on lock and unlock
1315  *
1316  * XXX: Split unlocking is going to make the unlock code miserable
1317  */
1318 
1319 /*
1320  * lock_partialfilelock:
1321  *
1322  * Argument fl gets modified as its list housekeeping entries get modified
1323  * upon insertion into the NFS lock list
1324  *
1325  * This routine makes several assumptions:
1326  * 1) It (will) pass locks through to flock to lock the entire underlying file
1327  *     and then parcel out NFS locks if it gets control of the file.
1328  *         This matches the old rpc.lockd file semantics (except where it
1329  *         is now more correct).  It is the safe solution, but will cause
1330  *         overly restrictive blocking if someone is trying to use the
1331  *         underlying files without using NFS.  This appears to be an
1332  *         acceptable tradeoff since most people use standalone NFS servers.
1333  * XXX: The right solution is probably kevent combined with fcntl
1334  *
1335  *    2) Nothing modifies the lock lists between testing and granting
1336  *           I have no idea whether this is a useful assumption or not
1337  */
1338 
1339 enum partialfilelock_status
1340 lock_partialfilelock(struct file_lock *fl)
1341 {
1342 	enum partialfilelock_status retval;
1343 	enum nfslock_status lnlstatus;
1344 	enum hwlock_status hwstatus;
1345 
1346 	debuglog("Entering lock_partialfilelock\n");
1347 
1348 	retval = PFL_DENIED;
1349 
1350 	/*
1351 	 * Execute the NFS lock first, if possible, as it is significantly
1352 	 * easier and less expensive to undo than the filesystem lock
1353 	 */
1354 
1355 	lnlstatus = lock_nfslock(fl);
1356 
1357 	switch (lnlstatus) {
1358 	case NFS_GRANTED:
1359 	case NFS_GRANTED_DUPLICATE:
1360 		/*
1361 		 * At this point, the NFS lock is allocated and active.
1362 		 * Remember to clean it up if the hardware lock fails
1363 		 */
1364 		hwstatus = lock_hwlock(fl);
1365 
1366 		switch (hwstatus) {
1367 		case HW_GRANTED:
1368 		case HW_GRANTED_DUPLICATE:
1369 			debuglog("HW GRANTED\n");
1370 			/*
1371 			 * XXX: Fixme: Check hwstatus for duplicate when
1372 			 * true partial file locking and accounting is
1373 			 * done on the hardware.
1374 			 */
1375 			if (lnlstatus == NFS_GRANTED_DUPLICATE) {
1376 				retval = PFL_GRANTED_DUPLICATE;
1377 			} else {
1378 				retval = PFL_GRANTED;
1379 			}
1380 			monitor_lock_host(fl->client_name);
1381 			break;
1382 		case HW_RESERR:
1383 			debuglog("HW RESERR\n");
1384 			retval = PFL_HWRESERR;
1385 			break;
1386 		case HW_DENIED:
1387 			debuglog("HW DENIED\n");
1388 			retval = PFL_HWDENIED;
1389 			break;
1390 		default:
1391 			debuglog("Unmatched hwstatus %d\n",hwstatus);
1392 			break;
1393 		}
1394 
1395 		if (retval != PFL_GRANTED &&
1396 		    retval != PFL_GRANTED_DUPLICATE) {
1397 			/* Clean up the NFS lock */
1398 			debuglog("Deleting trial NFS lock\n");
1399 			delete_nfslock(fl);
1400 		}
1401 		break;
1402 	case NFS_DENIED:
1403 		retval = PFL_NFSDENIED;
1404 		break;
1405 	case NFS_RESERR:
1406 		retval = PFL_NFSRESERR;
1407 		break;
1408 	default:
1409 		debuglog("Unmatched lnlstatus %d\n");
1410 		retval = PFL_NFSDENIED_NOLOCK;
1411 		break;
1412 	}
1413 
1414 	/*
1415 	 * By the time fl reaches here, it is completely free again on
1416 	 * failure.  The NFS lock done before attempting the
1417 	 * hardware lock has been backed out
1418 	 */
1419 
1420 	if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) {
1421 		/* Once last chance to check the lock */
1422 		if (fl->blocking == 1) {
1423 			if (retval == PFL_NFSDENIED) {
1424 				/* Queue the lock */
1425 				debuglog("BLOCKING LOCK RECEIVED\n");
1426 				retval = PFL_NFSBLOCKED;
1427 				add_blockingfilelock(fl);
1428 				dump_filelock(fl);
1429 			} else {
1430 				/* retval is okay as PFL_HWDENIED */
1431 				debuglog("BLOCKING LOCK DENIED IN HARDWARE\n");
1432 				dump_filelock(fl);
1433 			}
1434 		} else {
1435 			/* Leave retval alone, it's already correct */
1436 			debuglog("Lock denied.  Non-blocking failure\n");
1437 			dump_filelock(fl);
1438 		}
1439 	}
1440 
1441 	debuglog("Exiting lock_partialfilelock\n");
1442 
1443 	return retval;
1444 }
1445 
1446 /*
1447  * unlock_partialfilelock:
1448  *
1449  * Given a file_lock, unlock all locks which match.
1450  *
1451  * Note that a given lock might have to unlock ITSELF!  See
1452  * clear_partialfilelock for example.
1453  */
1454 
1455 enum partialfilelock_status
1456 unlock_partialfilelock(const struct file_lock *fl)
1457 {
1458 	struct file_lock *lfl,*rfl,*releasedfl,*selffl;
1459 	enum partialfilelock_status retval;
1460 	enum nfslock_status unlstatus;
1461 	enum hwlock_status unlhwstatus, lhwstatus;
1462 
1463 	debuglog("Entering unlock_partialfilelock\n");
1464 
1465 	selffl = NULL;
1466 	lfl = NULL;
1467 	rfl = NULL;
1468 	releasedfl = NULL;
1469 	retval = PFL_DENIED;
1470 
1471 	/*
1472 	 * There are significant overlap and atomicity issues
1473 	 * with partially releasing a lock.  For example, releasing
1474 	 * part of an NFS shared lock does *not* always release the
1475 	 * corresponding part of the file since there is only one
1476 	 * rpc.lockd UID but multiple users could be requesting it
1477 	 * from NFS.  Also, an unlock request should never allow
1478 	 * another process to gain a lock on the remaining parts.
1479 	 * ie. Always apply the new locks before releasing the
1480 	 * old one
1481 	 */
1482 
1483 	/*
1484 	 * Loop is required since multiple little locks
1485 	 * can be allocated and then deallocated with one
1486 	 * big unlock.
1487 	 *
1488 	 * The loop is required to be here so that the nfs &
1489 	 * hw subsystems do not need to communicate with one
1490 	 * one another
1491 	 */
1492 
1493 	do {
1494 		debuglog("Value of releasedfl: %p\n",releasedfl);
1495 		/* lfl&rfl are created *AND* placed into the NFS lock list if required */
1496 		unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl);
1497 		debuglog("Value of releasedfl: %p\n",releasedfl);
1498 
1499 
1500 		/* XXX: This is grungy.  It should be refactored to be cleaner */
1501 		if (lfl != NULL) {
1502 			lhwstatus = lock_hwlock(lfl);
1503 			if (lhwstatus != HW_GRANTED &&
1504 			    lhwstatus != HW_GRANTED_DUPLICATE) {
1505 				debuglog("HW duplicate lock failure for left split\n");
1506 			}
1507 			monitor_lock_host(lfl->client_name);
1508 		}
1509 
1510 		if (rfl != NULL) {
1511 			lhwstatus = lock_hwlock(rfl);
1512 			if (lhwstatus != HW_GRANTED &&
1513 			    lhwstatus != HW_GRANTED_DUPLICATE) {
1514 				debuglog("HW duplicate lock failure for right split\n");
1515 			}
1516 			monitor_lock_host(rfl->client_name);
1517 		}
1518 
1519 		switch (unlstatus) {
1520 		case NFS_GRANTED:
1521 			/* Attempt to unlock on the hardware */
1522 			debuglog("NFS unlock granted.  Attempting hardware unlock\n");
1523 
1524 			/* This call *MUST NOT* unlock the two newly allocated locks */
1525 			unlhwstatus = unlock_hwlock(fl);
1526 			debuglog("HW unlock returned with code %d\n",unlhwstatus);
1527 
1528 			switch (unlhwstatus) {
1529 			case HW_GRANTED:
1530 				debuglog("HW unlock granted\n");
1531 				unmonitor_lock_host(releasedfl->client_name);
1532 				retval = PFL_GRANTED;
1533 				break;
1534 			case HW_DENIED_NOLOCK:
1535 				/* Huh?!?!  This shouldn't happen */
1536 				debuglog("HW unlock denied no lock\n");
1537 				retval = PFL_HWRESERR;
1538 				/* Break out of do-while */
1539 				unlstatus = NFS_RESERR;
1540 				break;
1541 			default:
1542 				debuglog("HW unlock failed\n");
1543 				retval = PFL_HWRESERR;
1544 				/* Break out of do-while */
1545 				unlstatus = NFS_RESERR;
1546 				break;
1547 			}
1548 
1549 			debuglog("Exiting with status retval: %d\n",retval);
1550 
1551 			retry_blockingfilelocklist();
1552 			break;
1553 		case NFS_DENIED_NOLOCK:
1554 			retval = PFL_GRANTED;
1555 			debuglog("All locks cleaned out\n");
1556 			break;
1557 		default:
1558 			retval = PFL_NFSRESERR;
1559 			debuglog("NFS unlock failure\n");
1560 			dump_filelock(fl);
1561 			break;
1562 		}
1563 
1564 		if (releasedfl != NULL) {
1565 			if (fl == releasedfl) {
1566 				/*
1567 				 * XXX: YECHHH!!! Attempt to unlock self succeeded
1568 				 * but we can't deallocate the space yet.  This is what
1569 				 * happens when you don't write malloc and free together
1570 				 */
1571 				debuglog("Attempt to unlock self\n");
1572 				selffl = releasedfl;
1573 			} else {
1574 				/*
1575 				 * XXX: this deallocation *still* needs to migrate closer
1576 				 * to the allocation code way up in get_lock or the allocation
1577 				 * code needs to migrate down (violation of "When you write
1578 				 * malloc you must write free")
1579 				 */
1580 
1581 				deallocate_file_lock(releasedfl);
1582 				releasedfl = NULL;
1583 			}
1584 		}
1585 
1586 	} while (unlstatus == NFS_GRANTED);
1587 
1588 	if (selffl != NULL) {
1589 		/*
1590 		 * This statement wipes out the incoming file lock (fl)
1591 		 * in spite of the fact that it is declared const
1592 		 */
1593 		debuglog("WARNING!  Destroying incoming lock pointer\n");
1594 		deallocate_file_lock(selffl);
1595 	}
1596 
1597 	debuglog("Exiting unlock_partialfilelock\n");
1598 
1599 	return retval;
1600 }
1601 
1602 /*
1603  * clear_partialfilelock
1604  *
1605  * Normally called in response to statd state number change.
1606  * Wipe out all locks held by a host.  As a bonus, the act of
1607  * doing so should automatically clear their statd entries and
1608  * unmonitor the host.
1609  */
1610 
1611 void
1612 clear_partialfilelock(const char *hostname)
1613 {
1614 	struct file_lock *ifl, *nfl;
1615 
1616 	/* Clear blocking file lock list */
1617 	clear_blockingfilelock(hostname);
1618 
1619 	/* do all required unlocks */
1620 	/* Note that unlock can smash the current pointer to a lock */
1621 
1622 	/*
1623 	 * Normally, LIST_FOREACH is called for, but since
1624 	 * the current element *is* the iterator, deleting it
1625 	 * would mess up the iteration.  Thus, a next element
1626 	 * must be used explicitly
1627 	 */
1628 
1629 	ifl = LIST_FIRST(&nfslocklist_head);
1630 
1631 	while (ifl != NULL) {
1632 		nfl = LIST_NEXT(ifl, nfslocklist);
1633 
1634 		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1635 			/* Unlock destroys ifl out from underneath */
1636 			unlock_partialfilelock(ifl);
1637 			/* ifl is NO LONGER VALID AT THIS POINT */
1638 		}
1639 		ifl = nfl;
1640 	}
1641 }
1642 
1643 /*
1644  * test_partialfilelock:
1645  */
1646 enum partialfilelock_status
1647 test_partialfilelock(const struct file_lock *fl,
1648     struct file_lock **conflicting_fl)
1649 {
1650 	enum partialfilelock_status retval;
1651 	enum nfslock_status teststatus;
1652 
1653 	debuglog("Entering testpartialfilelock...\n");
1654 
1655 	retval = PFL_DENIED;
1656 
1657 	teststatus = test_nfslock(fl, conflicting_fl);
1658 	debuglog("test_partialfilelock: teststatus %d\n",teststatus);
1659 
1660 	if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) {
1661 		/* XXX: Add the underlying filesystem locking code */
1662 		retval = (teststatus == NFS_GRANTED) ?
1663 		    PFL_GRANTED : PFL_GRANTED_DUPLICATE;
1664 		debuglog("Dumping locks...\n");
1665 		dump_filelock(fl);
1666 		dump_filelock(*conflicting_fl);
1667 		debuglog("Done dumping locks...\n");
1668 	} else {
1669 		retval = PFL_NFSDENIED;
1670 		debuglog("NFS test denied.\n");
1671 		dump_filelock(fl);
1672 		debuglog("Conflicting.\n");
1673 		dump_filelock(*conflicting_fl);
1674 	}
1675 
1676 	debuglog("Exiting testpartialfilelock...\n");
1677 
1678 	return retval;
1679 }
1680 
1681 /*
1682  * Below here are routines associated with translating the partial file locking
1683  * codes into useful codes to send back to the NFS RPC messaging system
1684  */
1685 
1686 /*
1687  * These routines translate the (relatively) useful return codes back onto
1688  * the few return codes which the nlm subsystems wishes to trasmit
1689  */
1690 
1691 enum nlm_stats
1692 do_test(struct file_lock *fl, struct file_lock **conflicting_fl)
1693 {
1694 	enum partialfilelock_status pfsret;
1695 	enum nlm_stats retval;
1696 
1697 	debuglog("Entering do_test...\n");
1698 
1699 	pfsret = test_partialfilelock(fl,conflicting_fl);
1700 
1701 	switch (pfsret) {
1702 	case PFL_GRANTED:
1703 		debuglog("PFL test lock granted\n");
1704 		dump_filelock(fl);
1705 		dump_filelock(*conflicting_fl);
1706 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1707 		break;
1708 	case PFL_GRANTED_DUPLICATE:
1709 		debuglog("PFL test lock granted--duplicate id detected\n");
1710 		dump_filelock(fl);
1711 		dump_filelock(*conflicting_fl);
1712 		debuglog("Clearing conflicting_fl for call semantics\n");
1713 		*conflicting_fl = NULL;
1714 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1715 		break;
1716 	case PFL_NFSDENIED:
1717 	case PFL_HWDENIED:
1718 		debuglog("PFL test lock denied\n");
1719 		dump_filelock(fl);
1720 		dump_filelock(*conflicting_fl);
1721 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1722 		break;
1723 	case PFL_NFSRESERR:
1724 	case PFL_HWRESERR:
1725 		debuglog("PFL test lock resource fail\n");
1726 		dump_filelock(fl);
1727 		dump_filelock(*conflicting_fl);
1728 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1729 		break;
1730 	default:
1731 		debuglog("PFL test lock *FAILED*\n");
1732 		dump_filelock(fl);
1733 		dump_filelock(*conflicting_fl);
1734 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1735 		break;
1736 	}
1737 
1738 	debuglog("Exiting do_test...\n");
1739 
1740 	return retval;
1741 }
1742 
1743 /*
1744  * do_lock: Try to acquire a lock
1745  *
1746  * This routine makes a distinction between NLM versions.  I am pretty
1747  * convinced that this should be abstracted out and bounced up a level
1748  */
1749 
1750 enum nlm_stats
1751 do_lock(struct file_lock *fl)
1752 {
1753 	enum partialfilelock_status pfsret;
1754 	enum nlm_stats retval;
1755 
1756 	debuglog("Entering do_lock...\n");
1757 
1758 	pfsret = lock_partialfilelock(fl);
1759 
1760 	switch (pfsret) {
1761 	case PFL_GRANTED:
1762 		debuglog("PFL lock granted");
1763 		dump_filelock(fl);
1764 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1765 		break;
1766 	case PFL_GRANTED_DUPLICATE:
1767 		debuglog("PFL lock granted--duplicate id detected");
1768 		dump_filelock(fl);
1769 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1770 		break;
1771 	case PFL_NFSDENIED:
1772 	case PFL_HWDENIED:
1773 		debuglog("PFL_NFS lock denied");
1774 		dump_filelock(fl);
1775 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1776 		break;
1777 	case PFL_NFSBLOCKED:
1778 	case PFL_HWBLOCKED:
1779 		debuglog("PFL_NFS blocking lock denied.  Queued.\n");
1780 		dump_filelock(fl);
1781 		retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked;
1782 		break;
1783 	case PFL_NFSRESERR:
1784 	case PFL_HWRESERR:
1785 		debuglog("PFL lock resource alocation fail\n");
1786 		dump_filelock(fl);
1787 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1788 		break;
1789 	default:
1790 		debuglog("PFL lock *FAILED*");
1791 		dump_filelock(fl);
1792 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1793 		break;
1794 	}
1795 
1796 	debuglog("Exiting do_lock...\n");
1797 
1798 	return retval;
1799 }
1800 
1801 enum nlm_stats
1802 do_unlock(struct file_lock *fl)
1803 {
1804 	enum partialfilelock_status pfsret;
1805 	enum nlm_stats retval;
1806 
1807 	debuglog("Entering do_unlock...\n");
1808 	pfsret = unlock_partialfilelock(fl);
1809 
1810 	switch (pfsret) {
1811 	case PFL_GRANTED:
1812 		debuglog("PFL unlock granted");
1813 		dump_filelock(fl);
1814 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1815 		break;
1816 	case PFL_NFSDENIED:
1817 	case PFL_HWDENIED:
1818 		debuglog("PFL_NFS unlock denied");
1819 		dump_filelock(fl);
1820 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1821 		break;
1822 	case PFL_NFSDENIED_NOLOCK:
1823 	case PFL_HWDENIED_NOLOCK:
1824 		debuglog("PFL_NFS no lock found\n");
1825 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1826 		break;
1827 	case PFL_NFSRESERR:
1828 	case PFL_HWRESERR:
1829 		debuglog("PFL unlock resource failure");
1830 		dump_filelock(fl);
1831 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1832 		break;
1833 	default:
1834 		debuglog("PFL unlock *FAILED*");
1835 		dump_filelock(fl);
1836 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1837 		break;
1838 	}
1839 
1840 	debuglog("Exiting do_unlock...\n");
1841 
1842 	return retval;
1843 }
1844 
1845 /*
1846  * do_clear
1847  *
1848  * This routine is non-existent because it doesn't have a return code.
1849  * It is here for completeness in case someone *does* need to do return
1850  * codes later.  A decent compiler should optimize this away.
1851  */
1852 
1853 void
1854 do_clear(const char *hostname)
1855 {
1856 
1857 	clear_partialfilelock(hostname);
1858 }
1859 
1860 /*
1861  * The following routines are all called from the code which the
1862  * RPC layer invokes
1863  */
1864 
1865 /*
1866  * testlock(): inform the caller if the requested lock would be granted
1867  *
1868  * returns NULL if lock would granted
1869  * returns pointer to a conflicting nlm4_holder if not
1870  */
1871 
1872 struct nlm4_holder *
1873 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused)
1874 {
1875 	struct file_lock test_fl, *conflicting_fl;
1876 
1877 	bzero(&test_fl, sizeof(test_fl));
1878 
1879 	bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t));
1880 	copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client);
1881 
1882 	siglock();
1883 	do_test(&test_fl, &conflicting_fl);
1884 
1885 	if (conflicting_fl == NULL) {
1886 		debuglog("No conflicting lock found\n");
1887 		sigunlock();
1888 		return NULL;
1889 	} else {
1890 		debuglog("Found conflicting lock\n");
1891 		dump_filelock(conflicting_fl);
1892 		sigunlock();
1893 		return (&conflicting_fl->client);
1894 	}
1895 }
1896 
1897 /*
1898  * getlock: try to acquire the lock.
1899  * If file is already locked and we can sleep, put the lock in the list with
1900  * status LKST_WAITING; it'll be processed later.
1901  * Otherwise try to lock. If we're allowed to block, fork a child which
1902  * will do the blocking lock.
1903  */
1904 
1905 enum nlm_stats
1906 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags)
1907 {
1908 	struct file_lock *newfl;
1909 	enum nlm_stats retval;
1910 
1911 	debuglog("Entering getlock...\n");
1912 
1913 	if (grace_expired == 0 && lckarg->reclaim == 0)
1914 		return (flags & LOCK_V4) ?
1915 		    nlm4_denied_grace_period : nlm_denied_grace_period;
1916 
1917 	/* allocate new file_lock for this request */
1918 	newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie,
1919 				   (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf, lckarg->alock.caller_name);
1920 	if (newfl == NULL) {
1921 		syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno));
1922 		/* failed */
1923 		return (flags & LOCK_V4) ?
1924 		    nlm4_denied_nolocks : nlm_denied_nolocks;
1925 	}
1926 
1927 	if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) {
1928 		debuglog("received fhandle size %d, local size %d",
1929 		    lckarg->alock.fh.n_len, (int)sizeof(fhandle_t));
1930 	}
1931 
1932 	fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes,
1933 	    lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset,
1934 	    lckarg->alock.l_len,
1935 	    lckarg->state, 0, flags, lckarg->block);
1936 
1937 	/*
1938 	 * newfl is now fully constructed and deallocate_file_lock
1939 	 * can now be used to delete it
1940 	 */
1941 
1942 	siglock();
1943 	debuglog("Pointer to new lock is %p\n",newfl);
1944 
1945 	retval = do_lock(newfl);
1946 
1947 	debuglog("Pointer to new lock is %p\n",newfl);
1948 	sigunlock();
1949 
1950 	switch (retval)
1951 		{
1952 		case nlm4_granted:
1953 			/* case nlm_granted: is the same as nlm4_granted */
1954 			/* do_mon(lckarg->alock.caller_name); */
1955 			break;
1956 		case nlm4_blocked:
1957 			/* case nlm_blocked: is the same as nlm4_blocked */
1958 			/* do_mon(lckarg->alock.caller_name); */
1959 			break;
1960 		default:
1961 			deallocate_file_lock(newfl);
1962 			break;
1963 		}
1964 
1965 	debuglog("Exiting getlock...\n");
1966 
1967 	return retval;
1968 }
1969 
1970 
1971 /* unlock a filehandle */
1972 enum nlm_stats
1973 unlock(nlm4_lock *lock, const int flags __unused)
1974 {
1975 	struct file_lock fl;
1976 	enum nlm_stats err;
1977 
1978 	siglock();
1979 
1980 	debuglog("Entering unlock...\n");
1981 
1982 	bzero(&fl,sizeof(struct file_lock));
1983 	bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t));
1984 
1985 	copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client);
1986 
1987 	err = do_unlock(&fl);
1988 
1989 	sigunlock();
1990 
1991 	debuglog("Exiting unlock...\n");
1992 
1993 	return err;
1994 }
1995 
1996 /*
1997  * XXX: The following monitor/unmonitor routines
1998  * have not been extensively tested (ie. no regression
1999  * script exists like for the locking sections
2000  */
2001 
2002 /*
2003  * monitor_lock_host: monitor lock hosts locally with a ref count and
2004  * inform statd
2005  */
2006 void
2007 monitor_lock_host(const char *hostname)
2008 {
2009 	struct host *ihp, *nhp;
2010 	struct mon smon;
2011 	struct sm_stat_res sres;
2012 	int rpcret, statflag;
2013 	size_t n;
2014 
2015 	rpcret = 0;
2016 	statflag = 0;
2017 
2018 	LIST_FOREACH(ihp, &hostlst_head, hostlst) {
2019 		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2020 			/* Host is already monitored, bump refcount */
2021 			++ihp->refcnt;
2022 			/* Host should only be in the monitor list once */
2023 			return;
2024 		}
2025 	}
2026 
2027 	/* Host is not yet monitored, add it */
2028 	n = strnlen(hostname, SM_MAXSTRLEN);
2029 	if (n == SM_MAXSTRLEN) {
2030 		return;
2031 	}
2032 	nhp = malloc(sizeof(*nhp) - sizeof(nhp->name) + n + 1);
2033 	if (nhp == NULL) {
2034 		debuglog("Unable to allocate entry for statd mon\n");
2035 		return;
2036 	}
2037 
2038 	/* Allocated new host entry, now fill the fields */
2039 	memcpy(nhp->name, hostname, n);
2040 	nhp->name[n] = 0;
2041 	nhp->refcnt = 1;
2042 	debuglog("Locally Monitoring host %16s\n",hostname);
2043 
2044 	debuglog("Attempting to tell statd\n");
2045 
2046 	bzero(&smon,sizeof(smon));
2047 
2048 	smon.mon_id.mon_name = nhp->name;
2049 	smon.mon_id.my_id.my_name = "localhost";
2050 	smon.mon_id.my_id.my_prog = NLM_PROG;
2051 	smon.mon_id.my_id.my_vers = NLM_SM;
2052 	smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
2053 
2054 	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON,
2055 	    (xdrproc_t)xdr_mon, &smon,
2056 	    (xdrproc_t)xdr_sm_stat_res, &sres);
2057 
2058 	if (rpcret == 0) {
2059 		if (sres.res_stat == stat_fail) {
2060 			debuglog("Statd call failed\n");
2061 			statflag = 0;
2062 		} else {
2063 			statflag = 1;
2064 		}
2065 	} else {
2066 		debuglog("Rpc call to statd failed with return value: %d\n",
2067 		    rpcret);
2068 		statflag = 0;
2069 	}
2070 
2071 	if (statflag == 1) {
2072 		LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst);
2073 	} else {
2074 		free(nhp);
2075 	}
2076 
2077 }
2078 
2079 /*
2080  * unmonitor_lock_host: clear monitor ref counts and inform statd when gone
2081  */
2082 void
2083 unmonitor_lock_host(char *hostname)
2084 {
2085 	struct host *ihp;
2086 	struct mon_id smon_id;
2087 	struct sm_stat smstat;
2088 	int rpcret;
2089 
2090 	rpcret = 0;
2091 
2092 	for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL;
2093 	     ihp=LIST_NEXT(ihp, hostlst)) {
2094 		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2095 			/* Host is monitored, bump refcount */
2096 			--ihp->refcnt;
2097 			/* Host should only be in the monitor list once */
2098 			break;
2099 		}
2100 	}
2101 
2102 	if (ihp == NULL) {
2103 		debuglog("Could not find host %16s in mon list\n", hostname);
2104 		return;
2105 	}
2106 
2107 	if (ihp->refcnt > 0)
2108 		return;
2109 
2110 	if (ihp->refcnt < 0) {
2111 		debuglog("Negative refcount!: %d\n",
2112 		    ihp->refcnt);
2113 	}
2114 
2115 	debuglog("Attempting to unmonitor host %16s\n", hostname);
2116 
2117 	bzero(&smon_id,sizeof(smon_id));
2118 
2119 	smon_id.mon_name = hostname;
2120 	smon_id.my_id.my_name = "localhost";
2121 	smon_id.my_id.my_prog = NLM_PROG;
2122 	smon_id.my_id.my_vers = NLM_SM;
2123 	smon_id.my_id.my_proc = NLM_SM_NOTIFY;
2124 
2125 	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON,
2126 	    (xdrproc_t)xdr_mon_id, &smon_id,
2127 	    (xdrproc_t)xdr_sm_stat, &smstat);
2128 
2129 	if (rpcret != 0) {
2130 		debuglog("Rpc call to unmonitor statd failed with "
2131 		   " return value: %d\n", rpcret);
2132 	}
2133 
2134 	LIST_REMOVE(ihp, hostlst);
2135 	free(ihp);
2136 }
2137 
2138 /*
2139  * notify: Clear all locks from a host if statd complains
2140  *
2141  * XXX: This routine has not been thoroughly tested.  However, neither
2142  * had the old one been.  It used to compare the statd crash state counter
2143  * to the current lock state.  The upshot of this was that it basically
2144  * cleared all locks from the specified host 99% of the time (with the
2145  * other 1% being a bug).  Consequently, the assumption is that clearing
2146  * all locks from a host when notified by statd is acceptable.
2147  *
2148  * Please note that this routine skips the usual level of redirection
2149  * through a do_* type routine.  This introduces a possible level of
2150  * error and might better be written as do_notify and take this one out.
2151 
2152  */
2153 
2154 void
2155 notify(const char *hostname, const int state)
2156 {
2157 	debuglog("notify from %s, new state %d", hostname, state);
2158 
2159 	siglock();
2160 	do_clear(hostname);
2161 	sigunlock();
2162 
2163 	debuglog("Leaving notify\n");
2164 }
2165 
2166 void
2167 send_granted(struct file_lock *fl, int opcode __unused)
2168 {
2169 	CLIENT *cli;
2170 	static char dummy;
2171 	struct timeval timeo;
2172 	int success;
2173 	static struct nlm_res retval;
2174 	static struct nlm4_res retval4;
2175 
2176 	debuglog("About to send granted on blocked lock\n");
2177 
2178 	cli = get_client(fl->addr,
2179 	    (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS);
2180 	if (cli == NULL) {
2181 		syslog(LOG_NOTICE, "failed to get CLIENT for %s",
2182 		    fl->client_name);
2183 		/*
2184 		 * We fail to notify remote that the lock has been granted.
2185 		 * The client will timeout and retry, the lock will be
2186 		 * granted at this time.
2187 		 */
2188 		return;
2189 	}
2190 	timeo.tv_sec = 0;
2191 	timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */
2192 
2193 	if (fl->flags & LOCK_V4) {
2194 		static nlm4_testargs res;
2195 		res.cookie = fl->client_cookie;
2196 		res.exclusive = fl->client.exclusive;
2197 		res.alock.caller_name = fl->client_name;
2198 		res.alock.fh.n_len = sizeof(fhandle_t);
2199 		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2200 		res.alock.oh = fl->client.oh;
2201 		res.alock.svid = fl->client.svid;
2202 		res.alock.l_offset = fl->client.l_offset;
2203 		res.alock.l_len = fl->client.l_len;
2204 		debuglog("sending v4 reply%s",
2205 			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2206 		if (fl->flags & LOCK_ASYNC) {
2207 			success = clnt_call(cli, NLM4_GRANTED_MSG,
2208 			    (xdrproc_t)xdr_nlm4_testargs, &res,
2209 			    (xdrproc_t)xdr_void, &dummy, timeo);
2210 		} else {
2211 			success = clnt_call(cli, NLM4_GRANTED,
2212 			    (xdrproc_t)xdr_nlm4_testargs, &res,
2213 			    (xdrproc_t)xdr_nlm4_res, &retval4, timeo);
2214 		}
2215 	} else {
2216 		static nlm_testargs res;
2217 
2218 		res.cookie = fl->client_cookie;
2219 		res.exclusive = fl->client.exclusive;
2220 		res.alock.caller_name = fl->client_name;
2221 		res.alock.fh.n_len = sizeof(fhandle_t);
2222 		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2223 		res.alock.oh = fl->client.oh;
2224 		res.alock.svid = fl->client.svid;
2225 		res.alock.l_offset = fl->client.l_offset;
2226 		res.alock.l_len = fl->client.l_len;
2227 		debuglog("sending v1 reply%s",
2228 			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2229 		if (fl->flags & LOCK_ASYNC) {
2230 			success = clnt_call(cli, NLM_GRANTED_MSG,
2231 			    (xdrproc_t)xdr_nlm_testargs, &res,
2232 			    (xdrproc_t)xdr_void, &dummy, timeo);
2233 		} else {
2234 			success = clnt_call(cli, NLM_GRANTED,
2235 			    (xdrproc_t)xdr_nlm_testargs, &res,
2236 			    (xdrproc_t)xdr_nlm_res, &retval, timeo);
2237 		}
2238 	}
2239 	if (debug_level > 2)
2240 		debuglog("clnt_call returns %d(%s) for granted",
2241 			 success, clnt_sperrno(success));
2242 
2243 }
2244 
2245 /*
2246  * Routines below here have not been modified in the overhaul
2247  */
2248 
2249 /*
2250  * Are these two routines still required since lockd is not spawning off
2251  * children to service locks anymore?  Presumably they were originally
2252  * put in place to prevent a one child from changing the lock list out
2253  * from under another one.
2254  */
2255 
2256 void
2257 siglock(void)
2258 {
2259   sigset_t block;
2260 
2261   sigemptyset(&block);
2262   sigaddset(&block, SIGCHLD);
2263 
2264   if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) {
2265     syslog(LOG_WARNING, "siglock failed: %s", strerror(errno));
2266   }
2267 }
2268 
2269 void
2270 sigunlock(void)
2271 {
2272   sigset_t block;
2273 
2274   sigemptyset(&block);
2275   sigaddset(&block, SIGCHLD);
2276 
2277   if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) {
2278     syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno));
2279   }
2280 }
2281