1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * This file contains the code to implement file range locking in
28 * ZFS, although there isn't much specific to ZFS (all that comes to mind
29 * support for growing the blocksize).
30 *
31 * Interface
32 * ---------
33 * Defined in zfs_rlock.h but essentially:
34 * rl = zfs_range_lock(zp, off, len, lock_type);
35 * zfs_range_unlock(rl);
36 * zfs_range_reduce(rl, off, len);
37 *
38 * AVL tree
39 * --------
40 * An AVL tree is used to maintain the state of the existing ranges
41 * that are locked for exclusive (writer) or shared (reader) use.
42 * The starting range offset is used for searching and sorting the tree.
43 *
44 * Common case
45 * -----------
46 * The (hopefully) usual case is of no overlaps or contention for
47 * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree
48 * searched that finds no overlap, and *this* rl_t is placed in the tree.
49 *
50 * Overlaps/Reference counting/Proxy locks
51 * ---------------------------------------
52 * The avl code only allows one node at a particular offset. Also it's very
53 * inefficient to search through all previous entries looking for overlaps
54 * (because the very 1st in the ordered list might be at offset 0 but
55 * cover the whole file).
56 * So this implementation uses reference counts and proxy range locks.
57 * Firstly, only reader locks use reference counts and proxy locks,
58 * because writer locks are exclusive.
59 * When a reader lock overlaps with another then a proxy lock is created
60 * for that range and replaces the original lock. If the overlap
61 * is exact then the reference count of the proxy is simply incremented.
62 * Otherwise, the proxy lock is split into smaller lock ranges and
63 * new proxy locks created for non overlapping ranges.
64 * The reference counts are adjusted accordingly.
65 * Meanwhile, the orginal lock is kept around (this is the callers handle)
66 * and its offset and length are used when releasing the lock.
67 *
68 * Thread coordination
69 * -------------------
70 * In order to make wakeups efficient and to ensure multiple continuous
71 * readers on a range don't starve a writer for the same range lock,
72 * two condition variables are allocated in each rl_t.
73 * If a writer (or reader) can't get a range it initialises the writer
74 * (or reader) cv; sets a flag saying there's a writer (or reader) waiting;
75 * and waits on that cv. When a thread unlocks that range it wakes up all
76 * writers then all readers before destroying the lock.
77 *
78 * Append mode writes
79 * ------------------
80 * Append mode writes need to lock a range at the end of a file.
81 * The offset of the end of the file is determined under the
82 * range locking mutex, and the lock type converted from RL_APPEND to
83 * RL_WRITER and the range locked.
84 *
85 * Grow block handling
86 * -------------------
87 * ZFS supports multiple block sizes currently upto 128K. The smallest
88 * block size is used for the file which is grown as needed. During this
89 * growth all other writers and readers must be excluded.
90 * So if the block size needs to be grown then the whole file is
91 * exclusively locked, then later the caller will reduce the lock
92 * range to just the range to be written using zfs_reduce_range.
93 */
94
95 #include <sys/zfs_rlock.h>
96
97 static int
zfs_range_lock_hold(rl_t * rl)98 zfs_range_lock_hold(rl_t *rl)
99 {
100
101 KASSERT(rl->r_zp != NULL);
102 KASSERT(0 < rl->r_refcnt);
103 KASSERT(mutex_owned(&rl->r_zp->z_range_lock));
104
105 if (rl->r_refcnt >= ULONG_MAX)
106 return (ENFILE); /* XXX What to do? */
107
108 rl->r_refcnt++;
109 return (0);
110 }
111
112 static void
zfs_range_lock_rele(rl_t * rl)113 zfs_range_lock_rele(rl_t *rl)
114 {
115
116 KASSERT(rl->r_zp != NULL);
117 KASSERT(0 < rl->r_refcnt);
118 KASSERT(mutex_owned(&rl->r_zp->z_range_lock));
119
120 if (--rl->r_refcnt == 0) {
121 cv_destroy(&rl->r_wr_cv);
122 cv_destroy(&rl->r_rd_cv);
123 kmem_free(rl, sizeof (rl_t));
124 }
125 }
126
127 /*
128 * Check if a write lock can be grabbed, or wait and recheck until available.
129 */
130 static void
zfs_range_lock_writer(znode_t * zp,rl_t * new)131 zfs_range_lock_writer(znode_t *zp, rl_t *new)
132 {
133 avl_tree_t *tree = &zp->z_range_avl;
134 rl_t *rl;
135 avl_index_t where;
136 uint64_t end_size;
137 uint64_t off = new->r_off;
138 uint64_t len = new->r_len;
139
140 for (;;) {
141 /*
142 * Range locking is also used by zvol and uses a
143 * dummied up znode. However, for zvol, we don't need to
144 * append or grow blocksize, and besides we don't have
145 * a z_phys or z_zfsvfs - so skip that processing.
146 *
147 * Yes, this is ugly, and would be solved by not handling
148 * grow or append in range lock code. If that was done then
149 * we could make the range locking code generically available
150 * to other non-zfs consumers.
151 */
152 if (zp->z_vnode) { /* caller is ZPL */
153 /*
154 * If in append mode pick up the current end of file.
155 * This is done under z_range_lock to avoid races.
156 */
157 if (new->r_type == RL_APPEND)
158 new->r_off = zp->z_phys->zp_size;
159
160 /*
161 * If we need to grow the block size then grab the whole
162 * file range. This is also done under z_range_lock to
163 * avoid races.
164 */
165 end_size = MAX(zp->z_phys->zp_size, new->r_off + len);
166 if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
167 zp->z_blksz < zp->z_zfsvfs->z_max_blksz)) {
168 new->r_off = 0;
169 new->r_len = UINT64_MAX;
170 }
171 }
172
173 /*
174 * First check for the usual case of no locks
175 */
176 if (avl_numnodes(tree) == 0) {
177 new->r_type = RL_WRITER; /* convert to writer */
178 avl_add(tree, new);
179 return;
180 }
181
182 /*
183 * Look for any locks in the range.
184 */
185 rl = avl_find(tree, new, &where);
186 if (rl)
187 goto wait; /* already locked at same offset */
188
189 rl = (rl_t *)avl_nearest(tree, where, AVL_AFTER);
190 KASSERT(0 < rl->r_refcnt);
191 if (rl && (rl->r_off < new->r_off + new->r_len))
192 goto wait;
193
194 rl = (rl_t *)avl_nearest(tree, where, AVL_BEFORE);
195 KASSERT(0 < rl->r_refcnt);
196 if (rl && rl->r_off + rl->r_len > new->r_off)
197 goto wait;
198
199 new->r_type = RL_WRITER; /* convert possible RL_APPEND */
200 avl_insert(tree, new, where);
201 return;
202 wait:
203 if (!rl->r_write_wanted) {
204 rl->r_write_wanted = B_TRUE;
205 }
206 if (zfs_range_lock_hold(rl) != 0)
207 panic("too many waiters on zfs range lock %p", rl);
208 cv_wait(&rl->r_wr_cv, &zp->z_range_lock);
209 zfs_range_lock_rele(rl);
210
211 /* reset to original */
212 new->r_off = off;
213 new->r_len = len;
214 }
215 }
216
217 /*
218 * If this is an original (non-proxy) lock then replace it by
219 * a proxy and return the proxy.
220 */
221 static rl_t *
zfs_range_proxify(avl_tree_t * tree,rl_t * rl)222 zfs_range_proxify(avl_tree_t *tree, rl_t *rl)
223 {
224 rl_t *proxy;
225
226 if (rl->r_proxy)
227 return (rl); /* already a proxy */
228
229 ASSERT3U(rl->r_cnt, ==, 1);
230 ASSERT(rl->r_write_wanted == B_FALSE);
231 ASSERT(rl->r_read_wanted == B_FALSE);
232 avl_remove(tree, rl);
233 rl->r_cnt = 0;
234
235 /* create a proxy range lock */
236 proxy = kmem_alloc(sizeof (rl_t), KM_SLEEP);
237 proxy->r_zp = rl->r_zp;
238 proxy->r_off = rl->r_off;
239 proxy->r_len = rl->r_len;
240 proxy->r_cnt = 1;
241 proxy->r_type = RL_READER;
242 proxy->r_proxy = B_TRUE;
243 cv_init(&proxy->r_wr_cv, NULL, CV_DEFAULT, NULL);
244 cv_init(&proxy->r_rd_cv, NULL, CV_DEFAULT, NULL);
245 proxy->r_write_wanted = B_FALSE;
246 proxy->r_read_wanted = B_FALSE;
247 proxy->r_refcnt = 1;
248 avl_add(tree, proxy);
249
250 return (proxy);
251 }
252
253 /*
254 * Split the range lock at the supplied offset
255 * returning the *front* proxy.
256 */
257 static rl_t *
zfs_range_split(avl_tree_t * tree,rl_t * rl,uint64_t off)258 zfs_range_split(avl_tree_t *tree, rl_t *rl, uint64_t off)
259 {
260 rl_t *front, *rear;
261
262 ASSERT3U(rl->r_len, >, 1);
263 ASSERT3U(off, >, rl->r_off);
264 ASSERT3U(off, <, rl->r_off + rl->r_len);
265 ASSERT(rl->r_write_wanted == B_FALSE);
266 ASSERT(rl->r_read_wanted == B_FALSE);
267
268 /* create the rear proxy range lock */
269 rear = kmem_alloc(sizeof (rl_t), KM_SLEEP);
270 rear->r_zp = rl->r_zp;
271 rear->r_off = off;
272 rear->r_len = rl->r_off + rl->r_len - off;
273 rear->r_cnt = rl->r_cnt;
274 rear->r_type = RL_READER;
275 rear->r_proxy = B_TRUE;
276 cv_init(&rear->r_wr_cv, NULL, CV_DEFAULT, NULL);
277 cv_init(&rear->r_rd_cv, NULL, CV_DEFAULT, NULL);
278 rear->r_refcnt = 1;
279 rear->r_write_wanted = B_FALSE;
280 rear->r_read_wanted = B_FALSE;
281
282 front = zfs_range_proxify(tree, rl);
283 front->r_len = off - rl->r_off;
284
285 avl_insert_here(tree, rear, front, AVL_AFTER);
286 return (front);
287 }
288
289 /*
290 * Create and add a new proxy range lock for the supplied range.
291 */
292 static void
zfs_range_new_proxy(avl_tree_t * tree,uint64_t off,uint64_t len,znode_t * zp)293 zfs_range_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len, znode_t *zp)
294 {
295 rl_t *rl;
296
297 ASSERT(len);
298 rl = kmem_alloc(sizeof (rl_t), KM_SLEEP);
299 rl->r_zp = zp;
300 rl->r_off = off;
301 rl->r_len = len;
302 rl->r_cnt = 1;
303 rl->r_type = RL_READER;
304 rl->r_proxy = B_TRUE;
305 cv_init(&rl->r_wr_cv, NULL, CV_DEFAULT, NULL);
306 cv_init(&rl->r_rd_cv, NULL, CV_DEFAULT, NULL);
307 rl->r_write_wanted = B_FALSE;
308 rl->r_read_wanted = B_FALSE;
309 rl->r_refcnt = 1;
310 avl_add(tree, rl);
311 }
312
313 static void
zfs_range_add_reader(avl_tree_t * tree,rl_t * new,rl_t * prev,avl_index_t where)314 zfs_range_add_reader(avl_tree_t *tree, rl_t *new, rl_t *prev, avl_index_t where)
315 {
316 znode_t *zp = new->r_zp;
317 rl_t *next;
318 uint64_t off = new->r_off;
319 uint64_t len = new->r_len;
320
321 /*
322 * prev arrives either:
323 * - pointing to an entry at the same offset
324 * - pointing to the entry with the closest previous offset whose
325 * range may overlap with the new range
326 * - null, if there were no ranges starting before the new one
327 */
328 if (prev) {
329 if (prev->r_off + prev->r_len <= off) {
330 prev = NULL;
331 } else if (prev->r_off != off) {
332 /*
333 * convert to proxy if needed then
334 * split this entry and bump ref count
335 */
336 prev = zfs_range_split(tree, prev, off);
337 prev = AVL_NEXT(tree, prev); /* move to rear range */
338 }
339 }
340 ASSERT((prev == NULL) || (prev->r_off == off));
341
342 if (prev)
343 next = prev;
344 else
345 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER);
346
347 if (next == NULL || off + len <= next->r_off) {
348 /* no overlaps, use the original new rl_t in the tree */
349 avl_insert(tree, new, where);
350 return;
351 }
352
353 KASSERT(0 < next->r_refcnt);
354 if (off < next->r_off) {
355 /* Add a proxy for initial range before the overlap */
356 zfs_range_new_proxy(tree, off, next->r_off - off, zp);
357 }
358
359 new->r_cnt = 0; /* will use proxies in tree */
360 /*
361 * We now search forward through the ranges, until we go past the end
362 * of the new range. For each entry we make it a proxy if it
363 * isn't already, then bump its reference count. If there's any
364 * gaps between the ranges then we create a new proxy range.
365 */
366 for (prev = NULL; next; prev = next, next = AVL_NEXT(tree, next)) {
367 if (off + len <= next->r_off)
368 break;
369 if (prev && prev->r_off + prev->r_len < next->r_off) {
370 /* there's a gap */
371 ASSERT3U(next->r_off, >, prev->r_off + prev->r_len);
372 zfs_range_new_proxy(tree, prev->r_off + prev->r_len,
373 next->r_off - (prev->r_off + prev->r_len), zp);
374 }
375 if (off + len == next->r_off + next->r_len) {
376 /* exact overlap with end */
377 next = zfs_range_proxify(tree, next);
378 KASSERT(0 < next->r_refcnt);
379 next->r_cnt++;
380 return;
381 }
382 if (off + len < next->r_off + next->r_len) {
383 /* new range ends in the middle of this block */
384 next = zfs_range_split(tree, next, off + len);
385 KASSERT(0 < next->r_refcnt);
386 next->r_cnt++;
387 return;
388 }
389 ASSERT3U(off + len, >, next->r_off + next->r_len);
390 next = zfs_range_proxify(tree, next);
391 KASSERT(0 < next->r_refcnt);
392 next->r_cnt++;
393 }
394
395 /* Add the remaining end range. */
396 zfs_range_new_proxy(tree, prev->r_off + prev->r_len,
397 (off + len) - (prev->r_off + prev->r_len), zp);
398 }
399
400 /*
401 * Check if a reader lock can be grabbed, or wait and recheck until available.
402 */
403 static void
zfs_range_lock_reader(znode_t * zp,rl_t * new)404 zfs_range_lock_reader(znode_t *zp, rl_t *new)
405 {
406 avl_tree_t *tree = &zp->z_range_avl;
407 rl_t *prev, *next;
408 avl_index_t where;
409 uint64_t off = new->r_off;
410 uint64_t len = new->r_len;
411
412 /*
413 * Look for any writer locks in the range.
414 */
415 retry:
416 prev = avl_find(tree, new, &where);
417 if (prev == NULL)
418 prev = (rl_t *)avl_nearest(tree, where, AVL_BEFORE);
419
420 /*
421 * Check the previous range for a writer lock overlap.
422 */
423 if (prev && (off < prev->r_off + prev->r_len)) {
424 if ((prev->r_type == RL_WRITER) || (prev->r_write_wanted)) {
425 if (!prev->r_read_wanted) {
426 prev->r_read_wanted = B_TRUE;
427 }
428 if (zfs_range_lock_hold(prev) != 0)
429 panic("too many waiters on zfs range lock %p",
430 prev);
431 cv_wait(&prev->r_rd_cv, &zp->z_range_lock);
432 zfs_range_lock_rele(prev);
433 goto retry;
434 }
435 if (off + len < prev->r_off + prev->r_len)
436 goto got_lock;
437 }
438
439 /*
440 * Search through the following ranges to see if there's
441 * write lock any overlap.
442 */
443 if (prev)
444 next = AVL_NEXT(tree, prev);
445 else
446 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER);
447 for (; next; next = AVL_NEXT(tree, next)) {
448 if (off + len <= next->r_off)
449 goto got_lock;
450 if ((next->r_type == RL_WRITER) || (next->r_write_wanted)) {
451 if (!next->r_read_wanted) {
452 next->r_read_wanted = B_TRUE;
453 }
454 if (zfs_range_lock_hold(next) != 0)
455 panic("too many waiters on zfs range lock %p",
456 next);
457 cv_wait(&next->r_rd_cv, &zp->z_range_lock);
458 zfs_range_lock_rele(next);
459 goto retry;
460 }
461 if (off + len <= next->r_off + next->r_len)
462 goto got_lock;
463 }
464
465 got_lock:
466 /*
467 * Add the read lock, which may involve splitting existing
468 * locks and bumping ref counts (r_cnt).
469 */
470 zfs_range_add_reader(tree, new, prev, where);
471 }
472
473 /*
474 * Lock a range (offset, length) as either shared (RL_READER)
475 * or exclusive (RL_WRITER). Returns the range lock structure
476 * for later unlocking or reduce range (if entire file
477 * previously locked as RL_WRITER).
478 */
479 rl_t *
zfs_range_lock(znode_t * zp,uint64_t off,uint64_t len,rl_type_t type)480 zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type)
481 {
482 rl_t *new;
483
484 ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND);
485
486 new = kmem_alloc(sizeof (rl_t), KM_SLEEP);
487 new->r_zp = zp;
488 new->r_off = off;
489 if (len + off < off) /* overflow */
490 len = UINT64_MAX - off;
491 new->r_len = len;
492 new->r_cnt = 1; /* assume it's going to be in the tree */
493 new->r_type = type;
494 new->r_proxy = B_FALSE;
495 cv_init(&new->r_wr_cv, NULL, CV_DEFAULT, NULL);
496 cv_init(&new->r_rd_cv, NULL, CV_DEFAULT, NULL);
497 new->r_write_wanted = B_FALSE;
498 new->r_read_wanted = B_FALSE;
499 new->r_refcnt = 1;
500
501 mutex_enter(&zp->z_range_lock);
502 if (type == RL_READER) {
503 /*
504 * First check for the usual case of no locks
505 */
506 if (avl_numnodes(&zp->z_range_avl) == 0)
507 avl_add(&zp->z_range_avl, new);
508 else
509 zfs_range_lock_reader(zp, new);
510 } else {
511 zfs_range_lock_writer(zp, new); /* RL_WRITER or RL_APPEND */
512 }
513 mutex_exit(&zp->z_range_lock);
514 return (new);
515 }
516
517 /*
518 * Unlock a reader lock
519 */
520 static void
zfs_range_unlock_reader(znode_t * zp,rl_t * remove)521 zfs_range_unlock_reader(znode_t *zp, rl_t *remove)
522 {
523 avl_tree_t *tree = &zp->z_range_avl;
524 rl_t *rl, *next;
525 uint64_t len;
526
527 /*
528 * The common case is when the remove entry is in the tree
529 * (cnt == 1) meaning there's been no other reader locks overlapping
530 * with this one. Otherwise the remove entry will have been
531 * removed from the tree and replaced by proxies (one or
532 * more ranges mapping to the entire range).
533 */
534 if (remove->r_cnt == 1) {
535 avl_remove(tree, remove);
536 if (remove->r_write_wanted) {
537 cv_broadcast(&remove->r_wr_cv);
538 }
539 if (remove->r_read_wanted) {
540 cv_broadcast(&remove->r_rd_cv);
541 }
542 } else {
543 ASSERT3U(remove->r_cnt, ==, 0);
544 ASSERT3U(remove->r_write_wanted, ==, 0);
545 ASSERT3U(remove->r_read_wanted, ==, 0);
546 /*
547 * Find start proxy representing this reader lock,
548 * then decrement ref count on all proxies
549 * that make up this range, freeing them as needed.
550 */
551 rl = avl_find(tree, remove, NULL);
552 ASSERT(rl);
553 ASSERT(rl->r_cnt);
554 ASSERT(rl->r_type == RL_READER);
555 for (len = remove->r_len; len != 0; rl = next) {
556 len -= rl->r_len;
557 if (len) {
558 next = AVL_NEXT(tree, rl);
559 ASSERT(next);
560 ASSERT(rl->r_off + rl->r_len == next->r_off);
561 ASSERT(next->r_cnt);
562 ASSERT(next->r_type == RL_READER);
563 }
564 rl->r_cnt--;
565 if (rl->r_cnt == 0) {
566 avl_remove(tree, rl);
567 if (rl->r_write_wanted) {
568 cv_broadcast(&rl->r_wr_cv);
569 }
570 if (rl->r_read_wanted) {
571 cv_broadcast(&rl->r_rd_cv);
572 }
573 zfs_range_lock_rele(rl);
574 }
575 }
576 }
577 zfs_range_lock_rele(remove);
578 }
579
580 /*
581 * Unlock range and destroy range lock structure.
582 */
583 void
zfs_range_unlock(rl_t * rl)584 zfs_range_unlock(rl_t *rl)
585 {
586 znode_t *zp = rl->r_zp;
587
588 ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER);
589 ASSERT(rl->r_cnt == 1 || rl->r_cnt == 0);
590 ASSERT(!rl->r_proxy);
591
592 mutex_enter(&zp->z_range_lock);
593 if (rl->r_type == RL_WRITER) {
594 /* writer locks can't be shared or split */
595 avl_remove(&zp->z_range_avl, rl);
596 if (rl->r_write_wanted) {
597 cv_broadcast(&rl->r_wr_cv);
598 }
599 if (rl->r_read_wanted) {
600 cv_broadcast(&rl->r_rd_cv);
601 }
602 zfs_range_lock_rele(rl);
603 mutex_exit(&zp->z_range_lock);
604 } else {
605 /*
606 * lock may be shared, let zfs_range_unlock_reader()
607 * release the lock and free the rl_t
608 */
609 zfs_range_unlock_reader(zp, rl);
610 mutex_exit(&zp->z_range_lock);
611 }
612 }
613
614 /*
615 * Reduce range locked as RL_WRITER from whole file to specified range.
616 * Asserts the whole file is exclusivly locked and so there's only one
617 * entry in the tree.
618 */
619 void
zfs_range_reduce(rl_t * rl,uint64_t off,uint64_t len)620 zfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len)
621 {
622 znode_t *zp = rl->r_zp;
623
624 /* Ensure there are no other locks */
625 ASSERT(avl_numnodes(&zp->z_range_avl) == 1);
626 ASSERT(rl->r_off == 0);
627 ASSERT(rl->r_type == RL_WRITER);
628 ASSERT(!rl->r_proxy);
629 ASSERT3U(rl->r_len, ==, UINT64_MAX);
630 ASSERT3U(rl->r_cnt, ==, 1);
631
632 mutex_enter(&zp->z_range_lock);
633 rl->r_off = off;
634 rl->r_len = len;
635 if (rl->r_write_wanted)
636 cv_broadcast(&rl->r_wr_cv);
637 if (rl->r_read_wanted)
638 cv_broadcast(&rl->r_rd_cv);
639 mutex_exit(&zp->z_range_lock);
640 }
641
642 /*
643 * AVL comparison function used to order range locks
644 * Locks are ordered on the start offset of the range.
645 */
646 int
zfs_range_compare(const void * arg1,const void * arg2)647 zfs_range_compare(const void *arg1, const void *arg2)
648 {
649 const rl_t *rl1 = arg1;
650 const rl_t *rl2 = arg2;
651
652 if (rl1->r_off > rl2->r_off)
653 return (1);
654 if (rl1->r_off < rl2->r_off)
655 return (-1);
656 return (0);
657 }
658