1 /*-
2 * Copyright (c) 1996, 2020 Oracle and/or its affiliates. All rights reserved.
3 *
4 * See the file LICENSE for license information.
5 *
6 * $Id$
7 */
8
9 #include "db_config.h"
10
11 #include "db_int.h"
12 #include "dbinc/lock.h"
13 #include "dbinc/log.h"
14
15 static int __lock_freelocker_int
16 __P((DB_LOCKTAB *, DB_LOCKREGION *, DB_LOCKER *, int));
17
18 /*
19 * __lock_id_pp --
20 * DB_ENV->lock_id pre/post processing.
21 *
22 * PUBLIC: int __lock_id_pp __P((DB_ENV *, u_int32_t *));
23 */
24 int
__lock_id_pp(dbenv,idp)25 __lock_id_pp(dbenv, idp)
26 DB_ENV *dbenv;
27 u_int32_t *idp;
28 {
29 DB_THREAD_INFO *ip;
30 ENV *env;
31 int ret;
32
33 env = dbenv->env;
34
35 ENV_REQUIRES_CONFIG(env,
36 env->lk_handle, "DB_ENV->lock_id", DB_INIT_LOCK);
37
38 ENV_ENTER(env, ip);
39 REPLICATION_WRAP(env, (__lock_id(env, idp, NULL)), 0, ret);
40 ENV_LEAVE(env, ip);
41 return (ret);
42 }
43
44 /*
45 * __lock_id --
46 * Allocate a new lock id as well as a locker struct to hold it. If we wrap
47 * around then we find the minimum currently in use and make sure we can
48 * stay below that. This is similar to __txn_begin_int's code to recover
49 * txn ids.
50 *
51 *
52 * PUBLIC: int __lock_id __P((ENV *, u_int32_t *, DB_LOCKER **));
53 */
54 int
__lock_id(env,idp,lkp)55 __lock_id(env, idp, lkp)
56 ENV *env;
57 u_int32_t *idp;
58 DB_LOCKER **lkp;
59 {
60 DB_LOCKER *lk;
61 DB_LOCKREGION *region;
62 DB_LOCKTAB *lt;
63 u_int32_t id, *ids;
64 int nids, ret;
65
66 lt = env->lk_handle;
67 region = lt->reginfo.primary;
68 id = DB_LOCK_INVALIDID;
69 lk = NULL;
70 ret = 0;
71
72 LOCK_LOCKERS(env, region);
73
74 /*
75 * Our current valid range can span the maximum valid value, so check
76 * for it and wrap manually.
77 */
78 if (region->lock_id == DB_LOCK_MAXID &&
79 region->cur_maxid != DB_LOCK_MAXID)
80 region->lock_id = DB_LOCK_INVALIDID;
81 if (region->lock_id == region->cur_maxid) {
82 if ((ret = __os_malloc(env,
83 sizeof(u_int32_t) * region->nlockers, &ids)) != 0)
84 goto err;
85 nids = 0;
86 SH_TAILQ_FOREACH(lk, ®ion->lockers, ulinks, __db_locker)
87 ids[nids++] = lk->id;
88 region->lock_id = DB_LOCK_INVALIDID;
89 region->cur_maxid = DB_LOCK_MAXID;
90 if (nids != 0)
91 __db_idspace(ids, nids,
92 ®ion->lock_id, ®ion->cur_maxid);
93 __os_free(env, ids);
94 }
95 id = ++region->lock_id;
96
97 /* Allocate a locker for this id. */
98 ret = __lock_getlocker_int(lt, id, 1, NULL, &lk);
99
100 err: UNLOCK_LOCKERS(env, region);
101
102 if (idp != NULL)
103 *idp = id;
104 if (lkp != NULL)
105 *lkp = lk;
106
107 return (ret);
108 }
109
110 /*
111 * __lock_set_thread_id --
112 * Set the thread_id in an existing locker.
113 * PUBLIC: void __lock_set_thread_id __P((void *, pid_t, db_threadid_t));
114 */
115 void
__lock_set_thread_id(lref_arg,pid,tid)116 __lock_set_thread_id(lref_arg, pid, tid)
117 void *lref_arg;
118 pid_t pid;
119 db_threadid_t tid;
120 {
121 DB_LOCKER *lref;
122
123 lref = lref_arg;
124 lref->pid = pid;
125 lref->tid = tid;
126 }
127
128 /*
129 * __lock_id_free_pp --
130 * ENV->lock_id_free pre/post processing.
131 *
132 * PUBLIC: int __lock_id_free_pp __P((DB_ENV *, u_int32_t));
133 */
134 int
__lock_id_free_pp(dbenv,id)135 __lock_id_free_pp(dbenv, id)
136 DB_ENV *dbenv;
137 u_int32_t id;
138 {
139 DB_LOCKER *sh_locker;
140 DB_LOCKREGION *region;
141 DB_LOCKTAB *lt;
142 DB_THREAD_INFO *ip;
143 ENV *env;
144 int handle_check, ret, t_ret;
145
146 env = dbenv->env;
147
148 ENV_REQUIRES_CONFIG(env,
149 env->lk_handle, "DB_ENV->lock_id_free", DB_INIT_LOCK);
150
151 ENV_ENTER(env, ip);
152
153 /* Check for replication block. */
154 handle_check = IS_ENV_REPLICATED(env);
155 if (handle_check && (ret = __env_rep_enter(env, 0)) != 0) {
156 handle_check = 0;
157 goto err;
158 }
159
160 lt = env->lk_handle;
161 region = lt->reginfo.primary;
162
163 LOCK_LOCKERS(env, region);
164 if ((ret =
165 __lock_getlocker_int(env->lk_handle,
166 id, 0, NULL, &sh_locker)) == 0) {
167 if (sh_locker != NULL)
168 ret = __lock_freelocker_int(lt, region, sh_locker, 1);
169 else {
170 ret = USR_ERR(env, EINVAL);
171 __db_errx(env, DB_STR_A("2045",
172 "Unknown locker id: %lx", "%lx"), (u_long)id);
173 }
174 }
175 UNLOCK_LOCKERS(env, region);
176
177 if (handle_check && (t_ret = __env_db_rep_exit(env)) != 0 && ret == 0)
178 ret = t_ret;
179
180 err: ENV_LEAVE(env, ip);
181 return (ret);
182 }
183
184 /*
185 * __lock_id_free --
186 * Free a locker id.
187 *
188 * PUBLIC: int __lock_id_free __P((ENV *, DB_LOCKER *));
189 */
190 int
__lock_id_free(env,sh_locker)191 __lock_id_free(env, sh_locker)
192 ENV *env;
193 DB_LOCKER *sh_locker;
194 {
195 DB_LOCKER locker;
196 DB_LOCKREGION *region;
197 DB_LOCKTAB *lt;
198 DB_MSGBUF mb;
199 int ret;
200
201 lt = env->lk_handle;
202 region = lt->reginfo.primary;
203 ret = 0;
204
205 if (sh_locker->nlocks != 0) {
206 locker = *sh_locker;
207 ret = USR_ERR(env, EINVAL);
208 __db_errx(env, DB_STR_A("2046",
209 "Locker %d still has %d locks", "%d %d"),
210 locker.id, locker.nlocks );
211 DB_MSGBUF_INIT(&mb);
212 (void)__lock_dump_locker(env, &mb, lt, sh_locker);
213 DB_MSGBUF_FLUSH(env, &mb);
214 goto err;
215 }
216
217 LOCK_LOCKERS(env, region);
218 ret = __lock_freelocker_int(lt, region, sh_locker, 1);
219 UNLOCK_LOCKERS(env, region);
220
221 err: return (ret);
222 }
223
224 /*
225 * __lock_id_set --
226 * Set the current locker ID and current maximum unused ID (for
227 * testing purposes only).
228 *
229 * PUBLIC: int __lock_id_set __P((ENV *, u_int32_t, u_int32_t));
230 */
231 int
__lock_id_set(env,cur_id,max_id)232 __lock_id_set(env, cur_id, max_id)
233 ENV *env;
234 u_int32_t cur_id, max_id;
235 {
236 DB_LOCKREGION *region;
237 DB_LOCKTAB *lt;
238
239 ENV_REQUIRES_CONFIG(env,
240 env->lk_handle, "lock_id_set", DB_INIT_LOCK);
241
242 lt = env->lk_handle;
243 region = lt->reginfo.primary;
244 region->lock_id = cur_id;
245 region->cur_maxid = max_id;
246
247 return (0);
248 }
249
250 /*
251 * __lock_getlocker,__lock_getlocker_int --
252 * Get a locker in the locker hash table. The create parameter indicates
253 * whether the locker should be created if it doesn't exist in the table. If
254 * there's a matching locker cached in the thread info, use that without
255 * locking.
256 *
257 * The internal version does not check the thread info cache; it must be called
258 * with the locker mutex locked.
259 *
260 * PUBLIC: int __lock_getlocker __P((DB_LOCKTAB *,
261 * PUBLIC: u_int32_t, int, DB_LOCKER **));
262 * PUBLIC: int __lock_getlocker_int __P((DB_LOCKTAB *,
263 * PUBLIC: u_int32_t, int, DB_THREAD_INFO *, DB_LOCKER **));
264 */
265 int
__lock_getlocker(lt,locker,create,retp)266 __lock_getlocker(lt, locker, create, retp)
267 DB_LOCKTAB *lt;
268 u_int32_t locker;
269 int create;
270 DB_LOCKER **retp;
271 {
272 DB_LOCKREGION *region;
273 DB_THREAD_INFO *ip;
274 ENV *env;
275 int ret;
276
277 COMPQUIET(region, NULL);
278 env = lt->env;
279 region = lt->reginfo.primary;
280 ENV_GET_THREAD_INFO(env, ip);
281
282 /* Check to see if the locker is already in the thread info */
283 if (ip != NULL && ip->dbth_local_locker != INVALID_ROFF) {
284 *retp = (DB_LOCKER *)
285 R_ADDR(<->reginfo, ip->dbth_local_locker);
286 if ((*retp)->id == locker) {
287 DB_ASSERT(env, !F_ISSET(*retp, DB_LOCKER_FREE));
288 #ifdef HAVE_STATISTICS
289 region->stat.st_nlockers_hit++;
290 #endif
291 return (0);
292 }
293 }
294 LOCK_LOCKERS(env, region);
295 ret = __lock_getlocker_int(lt, locker, create, ip, retp);
296 UNLOCK_LOCKERS(env, region);
297 return (ret);
298 }
299
300 int
__lock_getlocker_int(lt,locker,create,ip,retp)301 __lock_getlocker_int(lt, locker, create, ip, retp)
302 DB_LOCKTAB *lt;
303 u_int32_t locker;
304 int create;
305 DB_THREAD_INFO *ip;
306 DB_LOCKER **retp;
307 {
308 DB_LOCKER *sh_locker;
309 DB_LOCKREGION *region;
310 #ifdef DIAGNOSTIC
311 DB_THREAD_INFO *diag;
312 #endif
313 ENV *env;
314 u_int32_t i, indx, nlockers;
315 int ret;
316
317 env = lt->env;
318 region = lt->reginfo.primary;
319 MUTEX_REQUIRED(env, region->mtx_lockers);
320
321 LOCKER_HASH(lt, region, locker, indx);
322
323 /*
324 * If we find the locker, then we can just return it. If we don't find
325 * the locker, then we need to create it.
326 */
327 SH_TAILQ_FOREACH(sh_locker, <->locker_tab[indx], links, __db_locker)
328 if (sh_locker->id == locker)
329 break;
330
331 if (sh_locker == NULL && create) {
332 /* Can we reuse a locker struct cached in the thread info? */
333 if (ip != NULL && ip->dbth_local_locker != INVALID_ROFF &&
334 (sh_locker = (DB_LOCKER*)R_ADDR(<->reginfo,
335 ip->dbth_local_locker))->id == DB_LOCK_INVALIDID) {
336 DB_ASSERT(env, !F_ISSET(sh_locker, DB_LOCKER_FREE));
337 #ifdef HAVE_STATISTICS
338 region->stat.st_nlockers_reused++;
339 #endif
340 } else {
341 /* Create new locker and insert it into hash table. */
342 if ((sh_locker = SH_TAILQ_FIRST(
343 ®ion->free_lockers, __db_locker)) == NULL) {
344 if (region->stat.st_maxlockers != 0 &&
345 region->stat.st_maxlockers <=
346 region->stat.st_lockers)
347 return (__lock_nomem(env,
348 "locker entries"));
349 nlockers = region->stat.st_lockers >> 2;
350 /* Just in case. */
351 if (nlockers == 0)
352 nlockers = 1;
353 if (region->stat.st_maxlockers != 0 &&
354 region->stat.st_maxlockers <
355 region->stat.st_lockers + nlockers)
356 nlockers = region->stat.st_maxlockers -
357 region->stat.st_lockers;
358 /*
359 * Don't hold lockers when getting the region,
360 * we could deadlock. When creating a locker
361 * there is no race since the id allocation
362 * is synchronized.
363 */
364 UNLOCK_LOCKERS(env, region);
365 LOCK_REGION_LOCK(env);
366 /*
367 * If the max memory is not sized for max
368 * objects, allocate as much as possible.
369 */
370 F_SET(<->reginfo, REGION_TRACKED);
371 while (__env_alloc(<->reginfo, nlockers *
372 sizeof(struct __db_locker),
373 &sh_locker) != 0) {
374 nlockers >>= 1;
375 if (nlockers == 0)
376 break;
377 }
378 F_CLR(<->reginfo, REGION_TRACKED);
379 LOCK_REGION_UNLOCK(lt->env);
380 LOCK_LOCKERS(env, region);
381 for (i = 0; i < nlockers; i++) {
382 SH_TAILQ_INSERT_HEAD(
383 ®ion->free_lockers,
384 sh_locker, links, __db_locker);
385 sh_locker->mtx_locker = MUTEX_INVALID;
386 #ifdef DIAGNOSTIC
387 sh_locker->prev_locker = INVALID_ROFF;
388 #endif
389 sh_locker++;
390 }
391 if (nlockers == 0)
392 return (__lock_nomem(env,
393 "locker entries"));
394 region->stat.st_lockers += nlockers;
395 sh_locker = SH_TAILQ_FIRST(
396 ®ion->free_lockers, __db_locker);
397 }
398 SH_TAILQ_REMOVE(
399 ®ion->free_lockers,
400 sh_locker, links, __db_locker);
401 }
402 F_CLR(sh_locker, DB_LOCKER_FREE);
403 if (sh_locker->mtx_locker == MUTEX_INVALID) {
404 if ((ret = __mutex_alloc(env, MTX_LOGICAL_LOCK,
405 DB_MUTEX_LOGICAL_LOCK | DB_MUTEX_SELF_BLOCK,
406 &sh_locker->mtx_locker)) != 0) {
407 SH_TAILQ_INSERT_HEAD(®ion->free_lockers,
408 sh_locker, links, __db_locker);
409 return (ret);
410 }
411 MUTEX_LOCK_NO_CTR(env, sh_locker->mtx_locker);
412 }
413
414 ++region->nlockers;
415 #ifdef HAVE_STATISTICS
416 STAT_PERFMON2(env, lock, nlockers, region->nlockers, locker);
417 if (region->nlockers > region->stat.st_maxnlockers)
418 STAT_SET(env, lock, maxnlockers,
419 region->stat.st_maxnlockers,
420 region->nlockers, locker);
421 #endif
422
423 sh_locker->id = locker;
424 env->dbenv->thread_id(
425 env->dbenv, &sh_locker->pid, &sh_locker->tid);
426 sh_locker->dd_id = 0;
427 sh_locker->master_locker = INVALID_ROFF;
428 sh_locker->parent_locker = INVALID_ROFF;
429 SH_LIST_INIT(&sh_locker->child_locker);
430 sh_locker->flags = 0;
431 SH_LIST_INIT(&sh_locker->heldby);
432 sh_locker->nlocks = 0;
433 sh_locker->nwrites = 0;
434 sh_locker->priority = DB_LOCK_DEFPRIORITY;
435 sh_locker->lk_timeout = 0;
436 timespecclear(&sh_locker->tx_expire);
437 timespecclear(&sh_locker->lk_expire);
438
439 SH_TAILQ_INSERT_HEAD(
440 <->locker_tab[indx], sh_locker, links, __db_locker);
441 SH_TAILQ_INSERT_HEAD(®ion->lockers,
442 sh_locker, ulinks, __db_locker);
443
444 if (ip != NULL && ip->dbth_local_locker == INVALID_ROFF)
445 ip->dbth_local_locker =
446 R_OFFSET(<->reginfo, sh_locker);
447 #ifdef DIAGNOSTIC
448 /*
449 * __db_has_pagelock checks for proper locking by dbth_locker.
450 */
451 if ((diag = ip) == NULL)
452 ENV_GET_THREAD_INFO(env, diag);
453 if (diag != NULL) {
454 sh_locker->prev_locker = diag->dbth_locker;
455 diag->dbth_locker = R_OFFSET(<->reginfo, sh_locker);
456 }
457 #endif
458 }
459
460 *retp = sh_locker;
461 return (0);
462 }
463
464 /*
465 * __lock_addfamilylocker
466 * Put a locker entry in for a child transaction.
467 *
468 * PUBLIC: int __lock_addfamilylocker __P((ENV *,
469 * PUBLIC: u_int32_t, u_int32_t, u_int32_t));
470 */
471 int
__lock_addfamilylocker(env,pid,id,is_family)472 __lock_addfamilylocker(env, pid, id, is_family)
473 ENV *env;
474 u_int32_t pid, id, is_family;
475 {
476 DB_LOCKER *lockerp, *mlockerp;
477 DB_LOCKREGION *region;
478 DB_LOCKTAB *lt;
479 int ret;
480
481 COMPQUIET(region, NULL);
482 lt = env->lk_handle;
483 region = lt->reginfo.primary;
484 LOCK_LOCKERS(env, region);
485
486 /* get/create the parent locker info */
487 if ((ret = __lock_getlocker_int(lt, pid, 1, NULL, &mlockerp)) != 0)
488 goto err;
489
490 /*
491 * We assume that only one thread can manipulate
492 * a single transaction family.
493 * Therefore the master locker cannot go away while
494 * we manipulate it, nor can another child in the
495 * family be created at the same time.
496 */
497 if ((ret = __lock_getlocker_int(lt, id, 1, NULL, &lockerp)) != 0)
498 goto err;
499
500 /* Point to our parent. */
501 lockerp->parent_locker = R_OFFSET(<->reginfo, mlockerp);
502
503 /* See if this locker is the family master. */
504 if (mlockerp->master_locker == INVALID_ROFF)
505 lockerp->master_locker = R_OFFSET(<->reginfo, mlockerp);
506 else {
507 lockerp->master_locker = mlockerp->master_locker;
508 mlockerp = R_ADDR(<->reginfo, mlockerp->master_locker);
509 }
510
511 /*
512 * Set the family locker flag, so it is possible to distinguish
513 * between locks held by subtransactions and those with compatible
514 * lockers.
515 */
516 if (is_family)
517 F_SET(mlockerp, DB_LOCKER_FAMILY_LOCKER);
518
519 /*
520 * Link the child at the head of the master's list.
521 * The guess is when looking for deadlock that
522 * the most recent child is the one that's blocked.
523 */
524 SH_LIST_INSERT_HEAD(
525 &mlockerp->child_locker, lockerp, child_link, __db_locker);
526
527 err: UNLOCK_LOCKERS(env, region);
528
529 return (ret);
530 }
531
532 /*
533 * __lock_freelocker_int --
534 * Common code for deleting a locker; must be called with the
535 * lockers mutex locked.
536 */
537 static int
__lock_freelocker_int(lt,region,sh_locker,reallyfree)538 __lock_freelocker_int(lt, region, sh_locker, reallyfree)
539 DB_LOCKTAB *lt;
540 DB_LOCKREGION *region;
541 DB_LOCKER *sh_locker;
542 int reallyfree;
543 {
544 ENV *env;
545 DB_MSGBUF mb;
546 DB_THREAD_INFO *ip;
547 u_int32_t indx;
548 int ret;
549
550 env = lt->env;
551 if (!SH_LIST_EMPTY(&sh_locker->heldby)) {
552 ret = USR_ERR(env, EINVAL);
553 __db_errx(env,
554 DB_STR_A("2060", "Freeing locker %x with locks", "%x"),
555 sh_locker->id);
556 DB_MSGBUF_INIT(&mb);
557 (void)__lock_dump_locker(env, &mb, lt, sh_locker);
558 DB_MSGBUF_FLUSH(env, &mb);
559 return (ret);
560 }
561
562 /* If this is part of a family, we must fix up its links. */
563 if (sh_locker->master_locker != INVALID_ROFF) {
564 SH_LIST_REMOVE(sh_locker, child_link, __db_locker);
565 sh_locker->master_locker = INVALID_ROFF;
566 }
567 sh_locker->parent_locker = INVALID_ROFF;
568
569 if (reallyfree) {
570 LOCKER_HASH(lt, region, sh_locker->id, indx);
571 SH_TAILQ_REMOVE(<->locker_tab[indx], sh_locker,
572 links, __db_locker);
573 SH_TAILQ_REMOVE(®ion->lockers, sh_locker,
574 ulinks, __db_locker);
575 region->nlockers--;
576 STAT_PERFMON2(env,
577 lock, nlockers, region->nlockers, sh_locker->id);
578 /*
579 * If this locker is cached in the thread info, zero the id and
580 * leave it allocated. Otherwise, put it back on the free list.
581 */
582 ENV_GET_THREAD_INFO(env, ip);
583 if (ip != NULL && ip->dbth_local_locker ==
584 R_OFFSET(<->reginfo, sh_locker)) {
585 DB_ASSERT(env,
586 MUTEX_IS_BUSY(env, sh_locker->mtx_locker));
587 sh_locker->id = DB_LOCK_INVALIDID;
588 } else {
589 if (sh_locker->mtx_locker != MUTEX_INVALID && (ret =
590 __mutex_free(env, &sh_locker->mtx_locker)) != 0)
591 return (ret);
592 F_SET(sh_locker, DB_LOCKER_FREE);
593 SH_TAILQ_INSERT_HEAD(®ion->free_lockers, sh_locker,
594 links, __db_locker);
595 }
596 }
597
598 return (0);
599 }
600
601 /*
602 * __lock_freelocker
603 * Remove a locker its family from the hash table.
604 *
605 * This must be called without the lockers mutex locked.
606 *
607 * PUBLIC: int __lock_freelocker __P((DB_LOCKTAB *, DB_LOCKER *));
608 */
609 int
__lock_freelocker(lt,sh_locker)610 __lock_freelocker(lt, sh_locker)
611 DB_LOCKTAB *lt;
612 DB_LOCKER *sh_locker;
613 {
614 DB_LOCKREGION *region;
615 ENV *env;
616 int ret;
617
618 region = lt->reginfo.primary;
619 env = lt->env;
620
621 if (sh_locker == NULL)
622 return (0);
623
624 LOCK_LOCKERS(env, region);
625 ret = __lock_freelocker_int(lt, region, sh_locker, 1);
626 UNLOCK_LOCKERS(env, region);
627
628 return (ret);
629 }
630
631 /*
632 * __lock_familyremove
633 * Remove a locker from its family.
634 *
635 * This must be called without the locker bucket locked.
636 *
637 * PUBLIC: int __lock_familyremove __P((DB_LOCKTAB *, DB_LOCKER *));
638 */
639 int
__lock_familyremove(lt,sh_locker)640 __lock_familyremove(lt, sh_locker)
641 DB_LOCKTAB *lt;
642 DB_LOCKER *sh_locker;
643 {
644 DB_LOCKREGION *region;
645 ENV *env;
646 int ret;
647
648 region = lt->reginfo.primary;
649 env = lt->env;
650
651 LOCK_LOCKERS(env, region);
652 ret = __lock_freelocker_int(lt, region, sh_locker, 0);
653 UNLOCK_LOCKERS(env, region);
654
655 return (ret);
656 }
657
658 /*
659 * __lock_local_locker_invalidate --
660 * Search the thread info table's cached lockers and discard any reference
661 * to this mutex.
662 *
663 * PUBLIC: int __lock_local_locker_invalidate __P((ENV *, db_mutex_t));
664 */
665 int
__lock_local_locker_invalidate(env,mutex)666 __lock_local_locker_invalidate(env, mutex)
667 ENV *env;
668 db_mutex_t mutex;
669 {
670 DB_HASHTAB *htab;
671 DB_LOCKER *locker;
672 DB_THREAD_INFO *ip;
673 u_int32_t i;
674 char buf[DB_THREADID_STRLEN];
675
676 htab = env->thr_hashtab;
677 for (i = 0; i < env->thr_nbucket; i++) {
678 SH_TAILQ_FOREACH(ip, &htab[i], dbth_links, __db_thread_info) {
679 if (ip->dbth_local_locker == INVALID_ROFF)
680 continue;
681 locker = (DB_LOCKER *)R_ADDR(&env->lk_handle->reginfo,
682 ip->dbth_local_locker);
683 if (locker->mtx_locker == mutex) {
684 __db_msg(env,
685 DB_STR_A("2061", "Removing cached locker mutex %lu reference by %s", "%lu %s"),
686 (u_long)mutex,
687 env->dbenv->thread_id_string(env->dbenv,
688 locker->pid, locker->tid, buf));
689 locker->mtx_locker = MUTEX_INVALID;
690 return (0);
691 }
692 }
693 }
694 return (0);
695 }
696