1 /*
2 Unix SMB/CIFS implementation.
3 global locks based on dbwrap and messaging
4 Copyright (C) 2009 by Volker Lendecke
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "replace.h"
21 #include "system/filesys.h"
22 #include "lib/util/server_id.h"
23 #include "lib/util/debug.h"
24 #include "lib/util/talloc_stack.h"
25 #include "lib/util/samba_util.h"
26 #include "lib/util_path.h"
27 #include "dbwrap/dbwrap.h"
28 #include "dbwrap/dbwrap_open.h"
29 #include "dbwrap/dbwrap_watch.h"
30 #include "g_lock.h"
31 #include "util_tdb.h"
32 #include "../lib/util/tevent_ntstatus.h"
33 #include "messages.h"
34 #include "serverid.h"
35
36 struct g_lock_ctx {
37 struct db_context *db;
38 struct messaging_context *msg;
39 };
40
41 struct g_lock {
42 struct server_id exclusive;
43 size_t num_shared;
44 uint8_t *shared;
45 size_t datalen;
46 uint8_t *data;
47 };
48
g_lock_parse(uint8_t * buf,size_t buflen,struct g_lock * lck)49 static bool g_lock_parse(uint8_t *buf, size_t buflen, struct g_lock *lck)
50 {
51 struct server_id exclusive;
52 size_t num_shared, shared_len;
53
54 if (buflen < (SERVER_ID_BUF_LENGTH + sizeof(uint32_t))) {
55 *lck = (struct g_lock) { .exclusive.pid = 0 };
56 return true;
57 }
58
59 server_id_get(&exclusive, buf);
60 buf += SERVER_ID_BUF_LENGTH;
61 buflen -= SERVER_ID_BUF_LENGTH;
62
63 num_shared = IVAL(buf, 0);
64 buf += sizeof(uint32_t);
65 buflen -= sizeof(uint32_t);
66
67 if (num_shared > buflen/SERVER_ID_BUF_LENGTH) {
68 return false;
69 }
70
71 shared_len = num_shared * SERVER_ID_BUF_LENGTH;
72
73 *lck = (struct g_lock) {
74 .exclusive = exclusive,
75 .num_shared = num_shared,
76 .shared = buf,
77 .datalen = buflen-shared_len,
78 .data = buf+shared_len,
79 };
80
81 return true;
82 }
83
g_lock_get_shared(const struct g_lock * lck,size_t i,struct server_id * shared)84 static void g_lock_get_shared(const struct g_lock *lck,
85 size_t i,
86 struct server_id *shared)
87 {
88 if (i >= lck->num_shared) {
89 abort();
90 }
91 server_id_get(shared, lck->shared + i*SERVER_ID_BUF_LENGTH);
92 }
93
g_lock_del_shared(struct g_lock * lck,size_t i)94 static void g_lock_del_shared(struct g_lock *lck, size_t i)
95 {
96 if (i >= lck->num_shared) {
97 abort();
98 }
99 lck->num_shared -= 1;
100 if (i < lck->num_shared) {
101 memcpy(lck->shared + i*SERVER_ID_BUF_LENGTH,
102 lck->shared + lck->num_shared*SERVER_ID_BUF_LENGTH,
103 SERVER_ID_BUF_LENGTH);
104 }
105 }
106
g_lock_store(struct db_record * rec,struct g_lock * lck,struct server_id * new_shared)107 static NTSTATUS g_lock_store(
108 struct db_record *rec,
109 struct g_lock *lck,
110 struct server_id *new_shared)
111 {
112 uint8_t exclusive[SERVER_ID_BUF_LENGTH];
113 uint8_t sizebuf[sizeof(uint32_t)];
114 uint8_t shared[SERVER_ID_BUF_LENGTH];
115
116 struct TDB_DATA dbufs[] = {
117 { .dptr = exclusive, .dsize = sizeof(exclusive) },
118 { .dptr = sizebuf, .dsize = sizeof(sizebuf) },
119 { .dptr = lck->shared,
120 .dsize = lck->num_shared * SERVER_ID_BUF_LENGTH },
121 { 0 },
122 { .dptr = lck->data, .dsize = lck->datalen }
123 };
124
125 server_id_put(exclusive, lck->exclusive);
126
127 if (new_shared != NULL) {
128 if (lck->num_shared >= UINT32_MAX) {
129 return NT_STATUS_BUFFER_OVERFLOW;
130 }
131
132 server_id_put(shared, *new_shared);
133
134 dbufs[3] = (TDB_DATA) {
135 .dptr = shared, .dsize = sizeof(shared),
136 };
137
138 lck->num_shared += 1;
139 }
140
141 SIVAL(sizebuf, 0, lck->num_shared);
142
143 return dbwrap_record_storev(rec, dbufs, ARRAY_SIZE(dbufs), 0);
144 }
145
g_lock_ctx_init_backend(TALLOC_CTX * mem_ctx,struct messaging_context * msg,struct db_context ** backend)146 struct g_lock_ctx *g_lock_ctx_init_backend(
147 TALLOC_CTX *mem_ctx,
148 struct messaging_context *msg,
149 struct db_context **backend)
150 {
151 struct g_lock_ctx *result;
152
153 result = talloc(mem_ctx, struct g_lock_ctx);
154 if (result == NULL) {
155 return NULL;
156 }
157 result->msg = msg;
158
159 result->db = db_open_watched(result, backend, msg);
160 if (result->db == NULL) {
161 DBG_WARNING("db_open_watched failed\n");
162 TALLOC_FREE(result);
163 return NULL;
164 }
165 return result;
166 }
167
g_lock_ctx_init(TALLOC_CTX * mem_ctx,struct messaging_context * msg)168 struct g_lock_ctx *g_lock_ctx_init(TALLOC_CTX *mem_ctx,
169 struct messaging_context *msg)
170 {
171 char *db_path = NULL;
172 struct db_context *backend = NULL;
173 struct g_lock_ctx *ctx = NULL;
174
175 db_path = lock_path(mem_ctx, "g_lock.tdb");
176 if (db_path == NULL) {
177 return NULL;
178 }
179
180 backend = db_open(
181 mem_ctx,
182 db_path,
183 0,
184 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
185 O_RDWR|O_CREAT,
186 0600,
187 DBWRAP_LOCK_ORDER_3,
188 DBWRAP_FLAG_NONE);
189 TALLOC_FREE(db_path);
190 if (backend == NULL) {
191 DBG_WARNING("Could not open g_lock.tdb\n");
192 return NULL;
193 }
194
195 ctx = g_lock_ctx_init_backend(mem_ctx, msg, &backend);
196 return ctx;
197 }
198
g_lock_cleanup_dead(struct db_record * rec,struct g_lock * lck,struct server_id * dead_blocker)199 static NTSTATUS g_lock_cleanup_dead(
200 struct db_record *rec,
201 struct g_lock *lck,
202 struct server_id *dead_blocker)
203 {
204 bool modified = false;
205 bool exclusive_died;
206 NTSTATUS status = NT_STATUS_OK;
207 struct server_id_buf tmp;
208
209 if (dead_blocker == NULL) {
210 return NT_STATUS_OK;
211 }
212
213 exclusive_died = server_id_equal(dead_blocker, &lck->exclusive);
214
215 if (exclusive_died) {
216 DBG_DEBUG("Exclusive holder %s died\n",
217 server_id_str_buf(lck->exclusive, &tmp));
218 lck->exclusive.pid = 0;
219 modified = true;
220 }
221
222 if (lck->num_shared != 0) {
223 bool shared_died;
224 struct server_id shared;
225
226 g_lock_get_shared(lck, 0, &shared);
227 shared_died = server_id_equal(dead_blocker, &shared);
228
229 if (shared_died) {
230 DBG_DEBUG("Shared holder %s died\n",
231 server_id_str_buf(shared, &tmp));
232 g_lock_del_shared(lck, 0);
233 modified = true;
234 }
235 }
236
237 if (modified) {
238 status = g_lock_store(rec, lck, NULL);
239 if (!NT_STATUS_IS_OK(status)) {
240 DBG_DEBUG("g_lock_store() failed: %s\n",
241 nt_errstr(status));
242 }
243 }
244
245 return status;
246 }
247
g_lock_find_shared(struct g_lock * lck,const struct server_id * self)248 static ssize_t g_lock_find_shared(
249 struct g_lock *lck,
250 const struct server_id *self)
251 {
252 size_t i;
253
254 for (i=0; i<lck->num_shared; i++) {
255 struct server_id shared;
256 bool same;
257
258 g_lock_get_shared(lck, i, &shared);
259
260 same = server_id_equal(self, &shared);
261 if (same) {
262 return i;
263 }
264 }
265
266 return -1;
267 }
268
g_lock_cleanup_shared(struct g_lock * lck)269 static void g_lock_cleanup_shared(struct g_lock *lck)
270 {
271 size_t i;
272 struct server_id check;
273 bool exists;
274
275 if (lck->num_shared == 0) {
276 return;
277 }
278
279 /*
280 * Read locks can stay around forever if the process dies. Do
281 * a heuristic check for process existence: Check one random
282 * process for existence. Hopefully this will keep runaway
283 * read locks under control.
284 */
285 i = generate_random() % lck->num_shared;
286 g_lock_get_shared(lck, i, &check);
287
288 exists = serverid_exists(&check);
289 if (!exists) {
290 struct server_id_buf tmp;
291 DBG_DEBUG("Shared locker %s died -- removing\n",
292 server_id_str_buf(check, &tmp));
293 g_lock_del_shared(lck, i);
294 }
295 }
296
297 struct g_lock_lock_state {
298 struct tevent_context *ev;
299 struct g_lock_ctx *ctx;
300 TDB_DATA key;
301 enum g_lock_type type;
302 bool retry;
303 };
304
305 struct g_lock_lock_fn_state {
306 struct g_lock_lock_state *req_state;
307 struct server_id *dead_blocker;
308
309 struct tevent_req *watch_req;
310 NTSTATUS status;
311 };
312
313 static int g_lock_lock_state_destructor(struct g_lock_lock_state *s);
314
g_lock_trylock(struct db_record * rec,struct g_lock_lock_fn_state * state,TDB_DATA data,struct server_id * blocker)315 static NTSTATUS g_lock_trylock(
316 struct db_record *rec,
317 struct g_lock_lock_fn_state *state,
318 TDB_DATA data,
319 struct server_id *blocker)
320 {
321 struct g_lock_lock_state *req_state = state->req_state;
322 struct server_id self = messaging_server_id(req_state->ctx->msg);
323 enum g_lock_type type = req_state->type;
324 bool retry = req_state->retry;
325 struct g_lock lck = { .exclusive.pid = 0 };
326 struct server_id_buf tmp;
327 NTSTATUS status;
328 bool ok;
329
330 ok = g_lock_parse(data.dptr, data.dsize, &lck);
331 if (!ok) {
332 DBG_DEBUG("g_lock_parse failed\n");
333 return NT_STATUS_INTERNAL_DB_CORRUPTION;
334 }
335
336 status = g_lock_cleanup_dead(rec, &lck, state->dead_blocker);
337 if (!NT_STATUS_IS_OK(status)) {
338 DBG_DEBUG("g_lock_cleanup_dead() failed: %s\n",
339 nt_errstr(status));
340 return status;
341 }
342
343 if (lck.exclusive.pid != 0) {
344 bool self_exclusive = server_id_equal(&self, &lck.exclusive);
345
346 if (!self_exclusive) {
347 bool exists = serverid_exists(&lck.exclusive);
348 if (!exists) {
349 lck.exclusive = (struct server_id) { .pid=0 };
350 goto noexclusive;
351 }
352
353 DBG_DEBUG("%s has an exclusive lock\n",
354 server_id_str_buf(lck.exclusive, &tmp));
355
356 if (type == G_LOCK_DOWNGRADE) {
357 struct server_id_buf tmp2;
358 DBG_DEBUG("%s: Trying to downgrade %s\n",
359 server_id_str_buf(self, &tmp),
360 server_id_str_buf(
361 lck.exclusive, &tmp2));
362 return NT_STATUS_NOT_LOCKED;
363 }
364
365 if (type == G_LOCK_UPGRADE) {
366 ssize_t shared_idx;
367 shared_idx = g_lock_find_shared(&lck, &self);
368
369 if (shared_idx == -1) {
370 DBG_DEBUG("Trying to upgrade %s "
371 "without "
372 "existing shared lock\n",
373 server_id_str_buf(
374 self, &tmp));
375 return NT_STATUS_NOT_LOCKED;
376 }
377
378 /*
379 * We're trying to upgrade, and the
380 * exlusive lock is taken by someone
381 * else. This means that someone else
382 * is waiting for us to give up our
383 * shared lock. If we now also wait
384 * for someone to give their shared
385 * lock, we will deadlock.
386 */
387
388 DBG_DEBUG("Trying to upgrade %s while "
389 "someone else is also "
390 "trying to upgrade\n",
391 server_id_str_buf(self, &tmp));
392 return NT_STATUS_POSSIBLE_DEADLOCK;
393 }
394
395 *blocker = lck.exclusive;
396 return NT_STATUS_LOCK_NOT_GRANTED;
397 }
398
399 if (type == G_LOCK_DOWNGRADE) {
400 DBG_DEBUG("Downgrading %s from WRITE to READ\n",
401 server_id_str_buf(self, &tmp));
402
403 lck.exclusive = (struct server_id) { .pid = 0 };
404 goto do_shared;
405 }
406
407 if (!retry) {
408 DBG_DEBUG("%s already locked by self\n",
409 server_id_str_buf(self, &tmp));
410 return NT_STATUS_WAS_LOCKED;
411 }
412
413 if (lck.num_shared != 0) {
414 g_lock_get_shared(&lck, 0, blocker);
415
416 DBG_DEBUG("Continue waiting for shared lock %s\n",
417 server_id_str_buf(*blocker, &tmp));
418
419 return NT_STATUS_LOCK_NOT_GRANTED;
420 }
421
422 talloc_set_destructor(req_state, NULL);
423
424 /*
425 * Retry after a conflicting lock was released
426 */
427 return NT_STATUS_OK;
428 }
429
430 noexclusive:
431
432 if (type == G_LOCK_UPGRADE) {
433 ssize_t shared_idx = g_lock_find_shared(&lck, &self);
434
435 if (shared_idx == -1) {
436 DBG_DEBUG("Trying to upgrade %s without "
437 "existing shared lock\n",
438 server_id_str_buf(self, &tmp));
439 return NT_STATUS_NOT_LOCKED;
440 }
441
442 g_lock_del_shared(&lck, shared_idx);
443 type = G_LOCK_WRITE;
444 }
445
446 if (type == G_LOCK_WRITE) {
447 ssize_t shared_idx = g_lock_find_shared(&lck, &self);
448
449 if (shared_idx != -1) {
450 DBG_DEBUG("Trying to writelock existing shared %s\n",
451 server_id_str_buf(self, &tmp));
452 return NT_STATUS_WAS_LOCKED;
453 }
454
455 lck.exclusive = self;
456
457 status = g_lock_store(rec, &lck, NULL);
458 if (!NT_STATUS_IS_OK(status)) {
459 DBG_DEBUG("g_lock_store() failed: %s\n",
460 nt_errstr(status));
461 return status;
462 }
463
464 if (lck.num_shared != 0) {
465 talloc_set_destructor(
466 req_state, g_lock_lock_state_destructor);
467
468 g_lock_get_shared(&lck, 0, blocker);
469
470 DBG_DEBUG("Waiting for %zu shared locks, "
471 "picking blocker %s\n",
472 lck.num_shared,
473 server_id_str_buf(*blocker, &tmp));
474
475 return NT_STATUS_LOCK_NOT_GRANTED;
476 }
477
478 talloc_set_destructor(req_state, NULL);
479
480 return NT_STATUS_OK;
481 }
482
483 do_shared:
484
485 if (lck.num_shared == 0) {
486 status = g_lock_store(rec, &lck, &self);
487 if (!NT_STATUS_IS_OK(status)) {
488 DBG_DEBUG("g_lock_store() failed: %s\n",
489 nt_errstr(status));
490 }
491
492 return status;
493 }
494
495 g_lock_cleanup_shared(&lck);
496
497 status = g_lock_store(rec, &lck, &self);
498 if (!NT_STATUS_IS_OK(status)) {
499 DBG_DEBUG("g_lock_store() failed: %s\n",
500 nt_errstr(status));
501 return status;
502 }
503
504 return NT_STATUS_OK;
505 }
506
g_lock_lock_fn(struct db_record * rec,TDB_DATA value,void * private_data)507 static void g_lock_lock_fn(
508 struct db_record *rec,
509 TDB_DATA value,
510 void *private_data)
511 {
512 struct g_lock_lock_fn_state *state = private_data;
513 struct server_id blocker = {0};
514
515 state->status = g_lock_trylock(rec, state, value, &blocker);
516 if (!NT_STATUS_EQUAL(state->status, NT_STATUS_LOCK_NOT_GRANTED)) {
517 return;
518 }
519
520 state->watch_req = dbwrap_watched_watch_send(
521 state->req_state, state->req_state->ev, rec, blocker);
522 if (state->watch_req == NULL) {
523 state->status = NT_STATUS_NO_MEMORY;
524 }
525 }
526
g_lock_lock_state_destructor(struct g_lock_lock_state * s)527 static int g_lock_lock_state_destructor(struct g_lock_lock_state *s)
528 {
529 NTSTATUS status = g_lock_unlock(s->ctx, s->key);
530 if (!NT_STATUS_IS_OK(status)) {
531 DBG_DEBUG("g_lock_unlock failed: %s\n", nt_errstr(status));
532 }
533 return 0;
534 }
535
536 static void g_lock_lock_retry(struct tevent_req *subreq);
537
g_lock_lock_send(TALLOC_CTX * mem_ctx,struct tevent_context * ev,struct g_lock_ctx * ctx,TDB_DATA key,enum g_lock_type type)538 struct tevent_req *g_lock_lock_send(TALLOC_CTX *mem_ctx,
539 struct tevent_context *ev,
540 struct g_lock_ctx *ctx,
541 TDB_DATA key,
542 enum g_lock_type type)
543 {
544 struct tevent_req *req;
545 struct g_lock_lock_state *state;
546 struct g_lock_lock_fn_state fn_state;
547 NTSTATUS status;
548 bool ok;
549
550 req = tevent_req_create(mem_ctx, &state, struct g_lock_lock_state);
551 if (req == NULL) {
552 return NULL;
553 }
554 state->ev = ev;
555 state->ctx = ctx;
556 state->key = key;
557 state->type = type;
558
559 fn_state = (struct g_lock_lock_fn_state) {
560 .req_state = state,
561 };
562
563 status = dbwrap_do_locked(ctx->db, key, g_lock_lock_fn, &fn_state);
564 if (tevent_req_nterror(req, status)) {
565 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
566 nt_errstr(status));
567 return tevent_req_post(req, ev);
568 }
569
570 if (NT_STATUS_IS_OK(fn_state.status)) {
571 tevent_req_done(req);
572 return tevent_req_post(req, ev);
573 }
574 if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
575 tevent_req_nterror(req, fn_state.status);
576 return tevent_req_post(req, ev);
577 }
578
579 if (tevent_req_nomem(fn_state.watch_req, req)) {
580 return tevent_req_post(req, ev);
581 }
582
583 ok = tevent_req_set_endtime(
584 fn_state.watch_req,
585 state->ev,
586 timeval_current_ofs(5 + generate_random() % 5, 0));
587 if (!ok) {
588 tevent_req_oom(req);
589 return tevent_req_post(req, ev);
590 }
591 tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
592
593 return req;
594 }
595
g_lock_lock_retry(struct tevent_req * subreq)596 static void g_lock_lock_retry(struct tevent_req *subreq)
597 {
598 struct tevent_req *req = tevent_req_callback_data(
599 subreq, struct tevent_req);
600 struct g_lock_lock_state *state = tevent_req_data(
601 req, struct g_lock_lock_state);
602 struct g_lock_lock_fn_state fn_state;
603 struct server_id blocker = { .pid = 0 };
604 bool blockerdead = false;
605 NTSTATUS status;
606
607 status = dbwrap_watched_watch_recv(subreq, &blockerdead, &blocker);
608 DBG_DEBUG("watch_recv returned %s\n", nt_errstr(status));
609 TALLOC_FREE(subreq);
610
611 if (!NT_STATUS_IS_OK(status) &&
612 !NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
613 tevent_req_nterror(req, status);
614 return;
615 }
616
617 state->retry = true;
618
619 fn_state = (struct g_lock_lock_fn_state) {
620 .req_state = state,
621 .dead_blocker = blockerdead ? &blocker : NULL,
622 };
623
624 status = dbwrap_do_locked(state->ctx->db, state->key,
625 g_lock_lock_fn, &fn_state);
626 if (tevent_req_nterror(req, status)) {
627 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
628 nt_errstr(status));
629 return;
630 }
631
632 if (NT_STATUS_IS_OK(fn_state.status)) {
633 tevent_req_done(req);
634 return;
635 }
636 if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
637 tevent_req_nterror(req, fn_state.status);
638 return;
639 }
640
641 if (tevent_req_nomem(fn_state.watch_req, req)) {
642 return;
643 }
644
645 if (!tevent_req_set_endtime(
646 fn_state.watch_req, state->ev,
647 timeval_current_ofs(5 + generate_random() % 5, 0))) {
648 return;
649 }
650 tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
651 }
652
g_lock_lock_recv(struct tevent_req * req)653 NTSTATUS g_lock_lock_recv(struct tevent_req *req)
654 {
655 return tevent_req_simple_recv_ntstatus(req);
656 }
657
658 struct g_lock_lock_simple_state {
659 struct server_id me;
660 enum g_lock_type type;
661 NTSTATUS status;
662 };
663
g_lock_lock_simple_fn(struct db_record * rec,TDB_DATA value,void * private_data)664 static void g_lock_lock_simple_fn(
665 struct db_record *rec,
666 TDB_DATA value,
667 void *private_data)
668 {
669 struct g_lock_lock_simple_state *state = private_data;
670 struct g_lock lck = { .exclusive.pid = 0 };
671 bool ok;
672
673 ok = g_lock_parse(value.dptr, value.dsize, &lck);
674 if (!ok) {
675 DBG_DEBUG("g_lock_parse failed\n");
676 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
677 return;
678 }
679
680 if (lck.exclusive.pid != 0) {
681 goto not_granted;
682 }
683
684 if (state->type == G_LOCK_WRITE) {
685 if (lck.num_shared != 0) {
686 goto not_granted;
687 }
688 lck.exclusive = state->me;
689 state->status = g_lock_store(rec, &lck, NULL);
690 return;
691 }
692
693 if (state->type == G_LOCK_READ) {
694 g_lock_cleanup_shared(&lck);
695 state->status = g_lock_store(rec, &lck, &state->me);
696 return;
697 }
698
699 not_granted:
700 state->status = NT_STATUS_LOCK_NOT_GRANTED;
701 }
702
g_lock_lock(struct g_lock_ctx * ctx,TDB_DATA key,enum g_lock_type type,struct timeval timeout)703 NTSTATUS g_lock_lock(struct g_lock_ctx *ctx, TDB_DATA key,
704 enum g_lock_type type, struct timeval timeout)
705 {
706 TALLOC_CTX *frame;
707 struct tevent_context *ev;
708 struct tevent_req *req;
709 struct timeval end;
710 NTSTATUS status;
711
712 if ((type == G_LOCK_READ) || (type == G_LOCK_WRITE)) {
713 /*
714 * This is an abstraction violation: Normally we do
715 * the sync wrappers around async functions with full
716 * nested event contexts. However, this is used in
717 * very hot code paths, so avoid the event context
718 * creation for the good path where there's no lock
719 * contention. My benchmark gave a factor of 2
720 * improvement for lock/unlock.
721 */
722 struct g_lock_lock_simple_state state = {
723 .me = messaging_server_id(ctx->msg),
724 .type = type,
725 };
726 status = dbwrap_do_locked(
727 ctx->db, key, g_lock_lock_simple_fn, &state);
728 if (!NT_STATUS_IS_OK(status)) {
729 DBG_DEBUG("dbwrap_do_locked() failed: %s\n",
730 nt_errstr(status));
731 return status;
732 }
733 if (NT_STATUS_IS_OK(state.status)) {
734 return NT_STATUS_OK;
735 }
736 if (!NT_STATUS_EQUAL(
737 state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
738 return state.status;
739 }
740
741 /*
742 * Fall back to the full g_lock_trylock logic,
743 * g_lock_lock_simple_fn() called above only covers
744 * the uncontended path.
745 */
746 }
747
748 frame = talloc_stackframe();
749 status = NT_STATUS_NO_MEMORY;
750
751 ev = samba_tevent_context_init(frame);
752 if (ev == NULL) {
753 goto fail;
754 }
755 req = g_lock_lock_send(frame, ev, ctx, key, type);
756 if (req == NULL) {
757 goto fail;
758 }
759 end = timeval_current_ofs(timeout.tv_sec, timeout.tv_usec);
760 if (!tevent_req_set_endtime(req, ev, end)) {
761 goto fail;
762 }
763 if (!tevent_req_poll_ntstatus(req, ev, &status)) {
764 goto fail;
765 }
766 status = g_lock_lock_recv(req);
767 fail:
768 TALLOC_FREE(frame);
769 return status;
770 }
771
772 struct g_lock_unlock_state {
773 struct server_id self;
774 NTSTATUS status;
775 };
776
g_lock_unlock_fn(struct db_record * rec,TDB_DATA value,void * private_data)777 static void g_lock_unlock_fn(
778 struct db_record *rec,
779 TDB_DATA value,
780 void *private_data)
781 {
782 struct g_lock_unlock_state *state = private_data;
783 struct server_id_buf tmp;
784 struct g_lock lck;
785 size_t i;
786 bool ok, exclusive;
787
788 ok = g_lock_parse(value.dptr, value.dsize, &lck);
789 if (!ok) {
790 DBG_DEBUG("g_lock_parse() failed\n");
791 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
792 return;
793 }
794
795 exclusive = server_id_equal(&state->self, &lck.exclusive);
796
797 for (i=0; i<lck.num_shared; i++) {
798 struct server_id shared;
799 g_lock_get_shared(&lck, i, &shared);
800 if (server_id_equal(&state->self, &shared)) {
801 break;
802 }
803 }
804
805 if (i < lck.num_shared) {
806 if (exclusive) {
807 DBG_DEBUG("%s both exclusive and shared (%zu)\n",
808 server_id_str_buf(state->self, &tmp),
809 i);
810 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
811 return;
812 }
813 g_lock_del_shared(&lck, i);
814 } else {
815 if (!exclusive) {
816 DBG_DEBUG("Lock %s not found, num_rec=%zu\n",
817 server_id_str_buf(state->self, &tmp),
818 lck.num_shared);
819 state->status = NT_STATUS_NOT_FOUND;
820 return;
821 }
822 lck.exclusive = (struct server_id) { .pid = 0 };
823 }
824
825 if ((lck.exclusive.pid == 0) &&
826 (lck.num_shared == 0) &&
827 (lck.datalen == 0)) {
828 state->status = dbwrap_record_delete(rec);
829 return;
830 }
831
832 state->status = g_lock_store(rec, &lck, NULL);
833 }
834
g_lock_unlock(struct g_lock_ctx * ctx,TDB_DATA key)835 NTSTATUS g_lock_unlock(struct g_lock_ctx *ctx, TDB_DATA key)
836 {
837 struct g_lock_unlock_state state = {
838 .self = messaging_server_id(ctx->msg),
839 };
840 NTSTATUS status;
841
842 status = dbwrap_do_locked(ctx->db, key, g_lock_unlock_fn, &state);
843 if (!NT_STATUS_IS_OK(status)) {
844 DBG_WARNING("dbwrap_do_locked failed: %s\n",
845 nt_errstr(status));
846 return status;
847 }
848 if (!NT_STATUS_IS_OK(state.status)) {
849 DBG_WARNING("g_lock_unlock_fn failed: %s\n",
850 nt_errstr(state.status));
851 return state.status;
852 }
853
854 return NT_STATUS_OK;
855 }
856
857 struct g_lock_write_data_state {
858 TDB_DATA key;
859 struct server_id self;
860 const uint8_t *data;
861 size_t datalen;
862 NTSTATUS status;
863 };
864
g_lock_write_data_fn(struct db_record * rec,TDB_DATA value,void * private_data)865 static void g_lock_write_data_fn(
866 struct db_record *rec,
867 TDB_DATA value,
868 void *private_data)
869 {
870 struct g_lock_write_data_state *state = private_data;
871 struct g_lock lck;
872 bool exclusive;
873 bool ok;
874
875 ok = g_lock_parse(value.dptr, value.dsize, &lck);
876 if (!ok) {
877 DBG_DEBUG("g_lock_parse for %s failed\n",
878 hex_encode_talloc(talloc_tos(),
879 state->key.dptr,
880 state->key.dsize));
881 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
882 return;
883 }
884
885 exclusive = server_id_equal(&state->self, &lck.exclusive);
886
887 /*
888 * Make sure we're really exclusive. We are marked as
889 * exclusive when we are waiting for an exclusive lock
890 */
891 exclusive &= (lck.num_shared == 0);
892
893 if (!exclusive) {
894 DBG_DEBUG("Not locked by us\n");
895 state->status = NT_STATUS_NOT_LOCKED;
896 return;
897 }
898
899 lck.data = discard_const_p(uint8_t, state->data);
900 lck.datalen = state->datalen;
901 state->status = g_lock_store(rec, &lck, NULL);
902 }
903
g_lock_write_data(struct g_lock_ctx * ctx,TDB_DATA key,const uint8_t * buf,size_t buflen)904 NTSTATUS g_lock_write_data(struct g_lock_ctx *ctx, TDB_DATA key,
905 const uint8_t *buf, size_t buflen)
906 {
907 struct g_lock_write_data_state state = {
908 .key = key, .self = messaging_server_id(ctx->msg),
909 .data = buf, .datalen = buflen
910 };
911 NTSTATUS status;
912
913 status = dbwrap_do_locked(ctx->db, key,
914 g_lock_write_data_fn, &state);
915 if (!NT_STATUS_IS_OK(status)) {
916 DBG_WARNING("dbwrap_do_locked failed: %s\n",
917 nt_errstr(status));
918 return status;
919 }
920 if (!NT_STATUS_IS_OK(state.status)) {
921 DBG_WARNING("g_lock_write_data_fn failed: %s\n",
922 nt_errstr(state.status));
923 return state.status;
924 }
925
926 return NT_STATUS_OK;
927 }
928
929 struct g_lock_locks_state {
930 int (*fn)(TDB_DATA key, void *private_data);
931 void *private_data;
932 };
933
g_lock_locks_fn(struct db_record * rec,void * priv)934 static int g_lock_locks_fn(struct db_record *rec, void *priv)
935 {
936 TDB_DATA key;
937 struct g_lock_locks_state *state = (struct g_lock_locks_state *)priv;
938
939 key = dbwrap_record_get_key(rec);
940 return state->fn(key, state->private_data);
941 }
942
g_lock_locks(struct g_lock_ctx * ctx,int (* fn)(TDB_DATA key,void * private_data),void * private_data)943 int g_lock_locks(struct g_lock_ctx *ctx,
944 int (*fn)(TDB_DATA key, void *private_data),
945 void *private_data)
946 {
947 struct g_lock_locks_state state;
948 NTSTATUS status;
949 int count;
950
951 state.fn = fn;
952 state.private_data = private_data;
953
954 status = dbwrap_traverse_read(ctx->db, g_lock_locks_fn, &state, &count);
955 if (!NT_STATUS_IS_OK(status)) {
956 return -1;
957 }
958 return count;
959 }
960
961 struct g_lock_dump_state {
962 TALLOC_CTX *mem_ctx;
963 TDB_DATA key;
964 void (*fn)(struct server_id exclusive,
965 size_t num_shared,
966 struct server_id *shared,
967 const uint8_t *data,
968 size_t datalen,
969 void *private_data);
970 void *private_data;
971 NTSTATUS status;
972 };
973
g_lock_dump_fn(TDB_DATA key,TDB_DATA data,void * private_data)974 static void g_lock_dump_fn(TDB_DATA key, TDB_DATA data,
975 void *private_data)
976 {
977 struct g_lock_dump_state *state = private_data;
978 struct g_lock lck = (struct g_lock) { .exclusive.pid = 0 };
979 struct server_id *shared = NULL;
980 size_t i;
981 bool ok;
982
983 ok = g_lock_parse(data.dptr, data.dsize, &lck);
984 if (!ok) {
985 DBG_DEBUG("g_lock_parse failed for %s\n",
986 hex_encode_talloc(talloc_tos(),
987 state->key.dptr,
988 state->key.dsize));
989 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
990 return;
991 }
992
993 shared = talloc_array(
994 state->mem_ctx, struct server_id, lck.num_shared);
995 if (shared == NULL) {
996 DBG_DEBUG("talloc failed\n");
997 state->status = NT_STATUS_NO_MEMORY;
998 return;
999 }
1000
1001 for (i=0; i<lck.num_shared; i++) {
1002 g_lock_get_shared(&lck, i, &shared[i]);
1003 }
1004
1005 state->fn(lck.exclusive,
1006 lck.num_shared,
1007 shared,
1008 lck.data,
1009 lck.datalen,
1010 state->private_data);
1011
1012 TALLOC_FREE(shared);
1013
1014 state->status = NT_STATUS_OK;
1015 }
1016
g_lock_dump(struct g_lock_ctx * ctx,TDB_DATA key,void (* fn)(struct server_id exclusive,size_t num_shared,struct server_id * shared,const uint8_t * data,size_t datalen,void * private_data),void * private_data)1017 NTSTATUS g_lock_dump(struct g_lock_ctx *ctx, TDB_DATA key,
1018 void (*fn)(struct server_id exclusive,
1019 size_t num_shared,
1020 struct server_id *shared,
1021 const uint8_t *data,
1022 size_t datalen,
1023 void *private_data),
1024 void *private_data)
1025 {
1026 struct g_lock_dump_state state = {
1027 .mem_ctx = ctx, .key = key,
1028 .fn = fn, .private_data = private_data
1029 };
1030 NTSTATUS status;
1031
1032 status = dbwrap_parse_record(ctx->db, key, g_lock_dump_fn, &state);
1033 if (!NT_STATUS_IS_OK(status)) {
1034 DBG_DEBUG("dbwrap_parse_record returned %s\n",
1035 nt_errstr(status));
1036 return status;
1037 }
1038 if (!NT_STATUS_IS_OK(state.status)) {
1039 DBG_DEBUG("g_lock_dump_fn returned %s\n",
1040 nt_errstr(state.status));
1041 return state.status;
1042 }
1043 return NT_STATUS_OK;
1044 }
1045