1 /*
2 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
3 * All Rights Reserved. See COPYRIGHT for more information.
4 *
5 * Because fcntl locks are
6 * process-oriented, we need to keep around a list of file descriptors
7 * that refer to the same file.
8 *
9 * TODO: fix the race when reading/writing.
10 * keep a pool of both locks and reference counters around so that
11 * we can save on mallocs. we should also use a tree to keep things
12 * sorted.
13 */
14
15 #ifdef HAVE_CONFIG_H
16 #include "config.h"
17 #endif /* HAVE_CONFIG_H */
18
19 #include <atalk/adouble.h>
20 #include <atalk/logger.h>
21 #include <atalk/compat.h>
22 #include <atalk/errchk.h>
23 #include <atalk/util.h>
24
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <inttypes.h>
29
30 #include <string.h>
31
32 #include "ad_lock.h"
33
shmdstrfromoff(off_t off)34 static const char *shmdstrfromoff(off_t off)
35 {
36 switch (off) {
37 case AD_FILELOCK_OPEN_WR:
38 return "OPEN_WR_DATA";
39 case AD_FILELOCK_OPEN_RD:
40 return "OPEN_RD_DATA";
41 case AD_FILELOCK_RSRC_OPEN_WR:
42 return "OPEN_WR_RSRC";
43 case AD_FILELOCK_RSRC_OPEN_RD:
44 return "OPEN_RD_RSRC";
45 case AD_FILELOCK_DENY_WR:
46 return "DENY_WR_DATA";
47 case AD_FILELOCK_DENY_RD:
48 return "DENY_RD_DATA";
49 case AD_FILELOCK_RSRC_DENY_WR:
50 return "DENY_WR_RSRC";
51 case AD_FILELOCK_RSRC_DENY_RD:
52 return "DENY_RD_RSRC";
53 case AD_FILELOCK_OPEN_NONE:
54 return "OPEN_NONE_DATA";
55 case AD_FILELOCK_RSRC_OPEN_NONE:
56 return "OPEN_NONE_RSRC";
57 default:
58 return "-";
59 }
60 }
61
62 /* ----------------------- */
set_lock(int fd,int cmd,struct flock * lock)63 static int set_lock(int fd, int cmd, struct flock *lock)
64 {
65 EC_INIT;
66
67 LOG(log_debug, logtype_ad, "set_lock(fd: %d, %s, %s, off: %jd (%s), len: %jd): BEGIN",
68 fd, cmd == F_SETLK ? "F_SETLK" : "F_GETLK",
69 lock->l_type == F_RDLCK ? "F_RDLCK" : lock->l_type == F_WRLCK ? "F_WRLCK" : "F_UNLCK",
70 (intmax_t)lock->l_start,
71 shmdstrfromoff(lock->l_start),
72 (intmax_t)lock->l_len);
73
74 if (fd == AD_SYMLINK) {
75 if (cmd == F_GETLK)
76 lock->l_type = F_UNLCK;
77 return 0;
78 }
79
80 EC_NEG1( fcntl(fd, cmd, lock) );
81
82 EC_CLEANUP:
83 EC_EXIT;
84 }
85
86 /* ----------------------- */
XLATE_FCNTL_LOCK(int type)87 static int XLATE_FCNTL_LOCK(int type)
88 {
89 switch(type) {
90 case ADLOCK_RD:
91 return F_RDLCK;
92 case ADLOCK_WR:
93 return F_WRLCK;
94 case ADLOCK_CLR:
95 return F_UNLCK;
96 }
97 return -1;
98 }
99
100 /* ----------------------- */
OVERLAP(off_t a,off_t alen,off_t b,off_t blen)101 static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
102 {
103 return (!alen && a <= b) ||
104 (!blen && b <= a) ||
105 ( (a + alen > b) && (b + blen > a) );
106 }
107
108 /* allocation for lock regions. we allocate aggressively and shrink
109 * only in large chunks. */
110 #define ARRAY_BLOCK_SIZE 10
111 #define ARRAY_FREE_DELTA 100
112
113 /* remove a lock and compact space if necessary */
adf_freelock(struct ad_fd * ad,const int i)114 static void adf_freelock(struct ad_fd *ad, const int i)
115 {
116 adf_lock_t *lock = ad->adf_lock + i;
117
118 if (--(*lock->refcount) < 1) {
119 free(lock->refcount);
120 lock->lock.l_type = F_UNLCK;
121 set_lock(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
122 }
123
124 ad->adf_lockcount--;
125
126 /* move another lock into the empty space */
127 if (i < ad->adf_lockcount) {
128 memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
129 }
130
131 /* free extra cruft if we go past a boundary. we always want to
132 * keep at least some stuff around for allocations. this wastes
133 * a bit of space to save time on reallocations. */
134 if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
135 (ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
136 struct adf_lock_t *tmp;
137
138 tmp = (struct adf_lock_t *)
139 realloc(ad->adf_lock, sizeof(adf_lock_t)*
140 (ad->adf_lockcount + ARRAY_FREE_DELTA));
141 if (tmp) {
142 ad->adf_lock = tmp;
143 ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
144 }
145 }
146 }
147
148
149 /* this needs to deal with the following cases:
150 * 1) free all UNIX byterange lock from any fork
151 * 2) free all locks of the requested fork
152 *
153 * i converted to using arrays of locks. everytime a lock
154 * gets removed, we shift all of the locks down.
155 */
adf_unlock(struct adouble * ad,struct ad_fd * adf,const int fork,int unlckbrl)156 static void adf_unlock(struct adouble *ad, struct ad_fd *adf, const int fork, int unlckbrl)
157 {
158 adf_lock_t *lock = adf->adf_lock;
159 int i;
160
161 for (i = 0; i < adf->adf_lockcount; i++) {
162 if ((unlckbrl && lock[i].lock.l_start < AD_FILELOCK_BASE)
163 || lock[i].user == fork) {
164 /* we're really going to delete this lock. note: read locks
165 are the only ones that allow refcounts > 1 */
166 adf_freelock(adf, i);
167 /* we shifted things down, so we need to backtrack */
168 i--;
169 /* unlikely but realloc may have change adf_lock */
170 lock = adf->adf_lock;
171 }
172 }
173 }
174
175 /* relock any byte lock that overlaps off/len. unlock everything
176 * else. */
adf_relockrange(struct ad_fd * ad,int fd,off_t off,off_t len)177 static void adf_relockrange(struct ad_fd *ad, int fd, off_t off, off_t len)
178 {
179 adf_lock_t *lock = ad->adf_lock;
180 int i;
181
182 for (i = 0; i < ad->adf_lockcount; i++) {
183 if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
184 set_lock(fd, F_SETLK, &lock[i].lock);
185 }
186 }
187
188
189 /* find a byte lock that overlaps off/len for a particular open fork */
adf_findlock(struct ad_fd * ad,const int fork,const int type,const off_t off,const off_t len)190 static int adf_findlock(struct ad_fd *ad,
191 const int fork, const int type,
192 const off_t off,
193 const off_t len)
194 {
195 adf_lock_t *lock = ad->adf_lock;
196 int i;
197
198 for (i = 0; i < ad->adf_lockcount; i++) {
199 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
200 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
201 (lock[i].user == fork) &&
202 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
203 return i;
204 }
205 }
206 return -1;
207 }
208
209
210 /* search other fork lock lists */
adf_findxlock(struct ad_fd * ad,const int fork,const int type,const off_t off,const off_t len)211 static int adf_findxlock(struct ad_fd *ad,
212 const int fork, const int type,
213 const off_t off,
214 const off_t len)
215 {
216 adf_lock_t *lock = ad->adf_lock;
217 int i;
218
219 for (i = 0; i < ad->adf_lockcount; i++) {
220 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK))
221 ||
222 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK)))
223 &&
224 (lock[i].user != fork)
225 &&
226 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
227 return i;
228 }
229 return -1;
230 }
231
232 /* okay, this needs to do the following:
233 * 1) check current list of locks. error on conflict.
234 * 2) apply the lock. error on conflict with another process.
235 * 3) update the list of locks this file has.
236 *
237 * NOTE: this treats synchronization locks a little differently. we
238 * do the following things for those:
239 * 1) if the header file exists, all the locks go in the beginning
240 * of that.
241 * 2) if the header file doesn't exist, we stick the locks
242 * in the locations specified by AD_FILELOCK_RD/WR.
243 */
244
245 /* --------------
246 translate a resource fork lock to an offset
247 */
rf2off(off_t off)248 static off_t rf2off(off_t off)
249 {
250 off_t start = off;
251 if (off == AD_FILELOCK_OPEN_WR)
252 start = AD_FILELOCK_RSRC_OPEN_WR;
253 else if (off == AD_FILELOCK_OPEN_RD)
254 start = AD_FILELOCK_RSRC_OPEN_RD;
255 else if (off == AD_FILELOCK_DENY_RD)
256 start = AD_FILELOCK_RSRC_DENY_RD;
257 else if (off == AD_FILELOCK_DENY_WR)
258 start = AD_FILELOCK_RSRC_DENY_WR;
259 else if (off == AD_FILELOCK_OPEN_NONE)
260 start = AD_FILELOCK_RSRC_OPEN_NONE;
261 return start;
262 }
263
264 /*!
265 * Test a lock
266 *
267 * (1) Test against our own locks array
268 * (2) Test fcntl lock, locks from other processes
269 *
270 * @param adf (r) handle
271 * @param off (r) offset
272 * @param len (r) lenght
273 *
274 * @returns 1 if there's an existing lock, 0 if there's no lock,
275 * -1 in case any error occured
276 */
testlock(const struct ad_fd * adf,off_t off,off_t len)277 static int testlock(const struct ad_fd *adf, off_t off, off_t len)
278 {
279 struct flock lock;
280 adf_lock_t *plock;
281 int i;
282
283 lock.l_start = off;
284
285 plock = adf->adf_lock;
286 lock.l_whence = SEEK_SET;
287 lock.l_len = len;
288
289 /* (1) Do we have a lock ? */
290 for (i = 0; i < adf->adf_lockcount; i++) {
291 if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
292 return 1;
293 }
294
295 /* (2) Does another process have a lock? */
296 lock.l_type = (adf->adf_flags & O_RDWR) ? F_WRLCK : F_RDLCK;
297
298 if (set_lock(adf->adf_fd, F_GETLK, &lock) < 0) {
299 /* is that kind of error possible ?*/
300 return (errno == EACCES || errno == EAGAIN) ? 1 : -1;
301 }
302
303 if (lock.l_type == F_UNLCK) {
304 return 0;
305 }
306 return 1;
307 }
308
309 #define LTYPE2STRBUFSIZ 128
locktypetostr(int type)310 static const char *locktypetostr(int type)
311 {
312 int first = 1;
313 static char buf[LTYPE2STRBUFSIZ];
314
315 buf[0] = 0;
316
317 if (type == 0) {
318 strlcat(buf, "CLR", LTYPE2STRBUFSIZ);
319 first = 0;
320 return buf;
321 }
322 if (type & ADLOCK_RD) {
323 if (!first)
324 strlcat(buf, "|", LTYPE2STRBUFSIZ);
325 strlcat(buf, "RD", LTYPE2STRBUFSIZ);
326 first = 0;
327 }
328 if (type & ADLOCK_WR) {
329 if (!first)
330 strlcat(buf, "|", LTYPE2STRBUFSIZ);
331 strlcat(buf, "WR", LTYPE2STRBUFSIZ);
332 first = 0;
333 }
334 if (type & ADLOCK_UPGRADE) {
335 if (!first)
336 strlcat(buf, "|", LTYPE2STRBUFSIZ);
337 strlcat(buf, "UPG", LTYPE2STRBUFSIZ);
338 first = 0;
339 }
340 if (type & ADLOCK_FILELOCK) {
341 if (!first)
342 strlcat(buf, "|", LTYPE2STRBUFSIZ);
343 strlcat(buf, "FILELOCK", LTYPE2STRBUFSIZ);
344 first = 0;
345 }
346
347 return buf;
348 }
349
350 /******************************************************************************
351 * Public functions
352 ******************************************************************************/
353
ad_lock(struct adouble * ad,uint32_t eid,int locktype,off_t off,off_t len,int fork)354 int ad_lock(struct adouble *ad, uint32_t eid, int locktype, off_t off, off_t len, int fork)
355 {
356 struct flock lock;
357 struct ad_fd *adf;
358 adf_lock_t *adflock;
359 int oldlock;
360 int i;
361 int type;
362 int ret = 0, fcntl_lock_err = 0;
363
364 LOG(log_debug, logtype_ad, "ad_lock(%s, %s, off: %jd (%s), len: %jd): BEGIN",
365 eid == ADEID_DFORK ? "data" : "reso",
366 locktypetostr(locktype),
367 (intmax_t)off,
368 shmdstrfromoff(off),
369 (intmax_t)len);
370
371 if ((locktype & ADLOCK_FILELOCK) && (len != 1))
372 AFP_PANIC("lock API error");
373
374 type = locktype;
375
376 if (eid == ADEID_DFORK) {
377 adf = &ad->ad_data_fork;
378 lock.l_start = off;
379 } else { /* rfork */
380 if (type & ADLOCK_FILELOCK) {
381 adf = &ad->ad_data_fork;
382 lock.l_start = rf2off(off);
383 } else {
384 adf = ad->ad_rfp;
385 lock.l_start = off + ad_getentryoff(ad, ADEID_RFORK);
386 }
387 }
388
389 /* NOTE: we can't write lock a read-only file. on those, we just
390 * make sure that we have a read lock set. that way, we at least prevent
391 * someone else from really setting a deny read/write on the file.
392 */
393 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
394 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
395 }
396
397 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
398 lock.l_whence = SEEK_SET;
399 lock.l_len = len;
400
401 /* byte_lock(len=-1) lock whole file */
402 if (len == BYTELOCK_MAX) {
403 lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */
404 }
405
406 /* see if it's locked by another fork.
407 * NOTE: this guarantees that any existing locks must be at most
408 * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
409 * guaranteed to be ORable. */
410 if (adf_findxlock(adf, fork, ADLOCK_WR |
411 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
412 lock.l_start, lock.l_len) > -1) {
413 errno = EACCES;
414 ret = -1;
415 goto exit;
416 }
417
418 /* look for any existing lock that we may have */
419 i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
420 adflock = (i < 0) ? NULL : adf->adf_lock + i;
421
422 /* here's what we check for:
423 1) we're trying to re-lock a lock, but we didn't specify an update.
424 2) we're trying to free only part of a lock.
425 3) we're trying to free a non-existent lock. */
426 if ( (!adflock && (lock.l_type == F_UNLCK))
427 ||
428 (adflock
429 && !(type & ADLOCK_UPGRADE)
430 && ((lock.l_type != F_UNLCK)
431 || (adflock->lock.l_start != lock.l_start)
432 || (adflock->lock.l_len != lock.l_len) ))
433 ) {
434 errno = EINVAL;
435 ret = -1;
436 goto exit;
437 }
438
439
440 /* now, update our list of locks */
441 /* clear the lock */
442 if (lock.l_type == F_UNLCK) {
443 adf_freelock(adf, i);
444 goto exit;
445 }
446
447 /* attempt to lock the file. */
448 if (set_lock(adf->adf_fd, F_SETLK, &lock) < 0) {
449 ret = -1;
450 goto exit;
451 }
452
453 /* we upgraded this lock. */
454 if (adflock && (type & ADLOCK_UPGRADE)) {
455 memcpy(&adflock->lock, &lock, sizeof(lock));
456 goto exit;
457 }
458
459 /* it wasn't an upgrade */
460 oldlock = -1;
461 if (lock.l_type == F_RDLCK) {
462 oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len);
463 }
464
465 /* no more space. this will also happen if lockmax == lockcount == 0 */
466 if (adf->adf_lockmax == adf->adf_lockcount) {
467 adf_lock_t *tmp = (adf_lock_t *)
468 realloc(adf->adf_lock, sizeof(adf_lock_t)*
469 (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
470 if (!tmp) {
471 ret = fcntl_lock_err = -1;
472 goto exit;
473 }
474 adf->adf_lock = tmp;
475 adf->adf_lockmax += ARRAY_BLOCK_SIZE;
476 }
477 adflock = adf->adf_lock + adf->adf_lockcount;
478
479 /* fill in fields */
480 memcpy(&adflock->lock, &lock, sizeof(lock));
481 adflock->user = fork;
482 if (oldlock > -1) {
483 adflock->refcount = (adf->adf_lock + oldlock)->refcount;
484 } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
485 ret = fcntl_lock_err = 1;
486 goto exit;
487 }
488
489 (*adflock->refcount)++;
490 adf->adf_lockcount++;
491
492 exit:
493 if (ret != 0) {
494 if (fcntl_lock_err != 0) {
495 lock.l_type = F_UNLCK;
496 set_lock(adf->adf_fd, F_SETLK, &lock);
497 }
498 }
499 LOG(log_debug, logtype_ad, "ad_lock: END: %d", ret);
500 return ret;
501 }
502
ad_tmplock(struct adouble * ad,uint32_t eid,int locktype,off_t off,off_t len,int fork)503 int ad_tmplock(struct adouble *ad, uint32_t eid, int locktype, off_t off, off_t len, int fork)
504 {
505 struct flock lock;
506 struct ad_fd *adf;
507 int err;
508 int type;
509
510 LOG(log_debug, logtype_ad, "ad_tmplock(%s, %s, off: %jd (%s), len: %jd): BEGIN",
511 eid == ADEID_DFORK ? "data" : "reso",
512 locktypetostr(locktype),
513 (intmax_t)off,
514 shmdstrfromoff(off),
515 (intmax_t)len);
516
517 lock.l_start = off;
518 type = locktype;
519
520 if (eid == ADEID_DFORK) {
521 adf = &ad->ad_data_fork;
522 } else {
523 adf = &ad->ad_resource_fork;
524 if (adf->adf_fd == -1) {
525 /* there's no resource fork. return success */
526 err = 0;
527 goto exit;
528 }
529 /* if ADLOCK_FILELOCK we want a lock from offset 0
530 * it's used when deleting a file:
531 * in open we put read locks on meta datas
532 * in delete a write locks on the whole file
533 * so if the file is open by somebody else it fails
534 */
535 if (!(type & ADLOCK_FILELOCK))
536 lock.l_start += ad_getentryoff(ad, eid);
537 }
538
539 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
540 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
541 }
542
543 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
544 lock.l_whence = SEEK_SET;
545 lock.l_len = len;
546
547 /* see if it's locked by another fork. */
548 if (fork && adf_findxlock(adf, fork,
549 ADLOCK_WR | ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
550 lock.l_start, lock.l_len) > -1) {
551 errno = EACCES;
552 err = -1;
553 goto exit;
554 }
555
556 /* okay, we might have ranges byte-locked. we need to make sure that
557 * we restore the appropriate ranges once we're done. so, we check
558 * for overlap on an unlock and relock.
559 * XXX: in the future, all the byte locks will be sorted and contiguous.
560 * we just want to upgrade all the locks and then downgrade them
561 * here. */
562 err = set_lock(adf->adf_fd, F_SETLK, &lock);
563 if (!err && (lock.l_type == F_UNLCK))
564 adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
565
566 exit:
567 LOG(log_debug, logtype_ad, "ad_tmplock: END: %d", err);
568 return err;
569 }
570
571 /* --------------------- */
ad_unlock(struct adouble * ad,const int fork,int unlckbrl)572 void ad_unlock(struct adouble *ad, const int fork, int unlckbrl)
573 {
574 LOG(log_debug, logtype_ad, "ad_unlock(unlckbrl: %d): BEGIN", unlckbrl);
575
576 if (ad_data_fileno(ad) != -1) {
577 adf_unlock(ad, &ad->ad_data_fork, fork, unlckbrl);
578 }
579 if (ad_reso_fileno(ad) != -1) {
580 adf_unlock(ad, &ad->ad_resource_fork, fork, unlckbrl);
581 }
582
583 LOG(log_debug, logtype_ad, "ad_unlock: END");
584 }
585
586 /*!
587 * Test for a share mode lock
588 *
589 * @param ad (rw) handle
590 * @param eid (r) datafork or ressource fork
591 * @param off (r) sharemode lock to test
592 *
593 * @returns 1 if there's an existing lock, 0 if there's no lock,
594 * -1 in case any error occured
595 */
ad_testlock(struct adouble * ad,int eid,const off_t off)596 int ad_testlock(struct adouble *ad, int eid, const off_t off)
597 {
598 int ret = 0;
599 off_t lock_offset;
600
601 LOG(log_debug, logtype_ad, "ad_testlock(%s, off: %jd (%s): BEGIN",
602 eid == ADEID_DFORK ? "data" : "reso",
603 (intmax_t)off,
604 shmdstrfromoff(off));
605
606 if (eid == ADEID_DFORK) {
607 lock_offset = off;
608 } else { /* rfork */
609 lock_offset = rf2off(off);
610 }
611
612 ret = testlock(&ad->ad_data_fork, lock_offset, 1);
613
614 LOG(log_debug, logtype_ad, "ad_testlock: END: %d", ret);
615 return ret;
616 }
617
618 /*!
619 * Return if a file is open by another process.
620 *
621 * Optimized for the common case:
622 * - there's no locks held by another process (clients)
623 * - or we already know the answer and don't need to test (attrbits)
624 *
625 * @param ad (rw) handle
626 * @param attrbits (r) forks opened by us
627 * @returns bitflags ATTRBIT_DOPEN | ATTRBIT_ROPEN if
628 * other process has fork of file opened
629 */
ad_openforks(struct adouble * ad,uint16_t attrbits)630 uint16_t ad_openforks(struct adouble *ad, uint16_t attrbits)
631 {
632 uint16_t ret = 0;
633 off_t off;
634 off_t len;
635
636 if (ad_data_fileno(ad) == -1)
637 return 0;
638
639 if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
640 /* Test all 4 locks at once */
641 off = AD_FILELOCK_OPEN_WR;
642 len = 4;
643 if (testlock(&ad->ad_data_fork, off, len) == 0)
644 return 0;
645 }
646
647 /* either there's a lock or we already know one fork is open */
648
649 if (!(attrbits & ATTRBIT_DOPEN)) {
650 off = AD_FILELOCK_OPEN_WR;
651 ret = testlock(&ad->ad_data_fork, off, 2) > 0 ? ATTRBIT_DOPEN : 0;
652 }
653
654 if (!(attrbits & ATTRBIT_ROPEN)) {
655 off = AD_FILELOCK_RSRC_OPEN_WR;
656 ret |= testlock(&ad->ad_data_fork, off, 2) > 0? ATTRBIT_ROPEN : 0;
657 }
658
659 return ret;
660 }
661