1 /* $OpenBSD: namespace.c,v 1.16 2016/02/04 12:48:06 jca Exp $ */ 2 3 /* 4 * Copyright (c) 2009, 2010 Martin Hedenfalk <martin@bzero.se> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 #include <sys/queue.h> 21 22 #include <assert.h> 23 #include <errno.h> 24 #include <stdio.h> 25 #include <stdlib.h> 26 #include <string.h> 27 #include <zlib.h> 28 29 #include "ldapd.h" 30 31 extern const char *datadir; 32 33 /* Maximum number of requests to queue per namespace during compaction. 34 * After this many requests, we return LDAP_BUSY. 35 */ 36 #define MAX_REQUEST_QUEUE 10000 37 38 static struct btval *namespace_find(struct namespace *ns, char *dn); 39 static void namespace_queue_replay(int fd, short event, void *arg); 40 static int namespace_set_fd(struct namespace *ns, 41 struct btree **bt, int fd, unsigned int flags); 42 43 int 44 namespace_begin_txn(struct namespace *ns, struct btree_txn **data_txn, 45 struct btree_txn **indx_txn, int rdonly) 46 { 47 if (ns->data_db == NULL || ns->indx_db == NULL) { 48 errno = EBUSY; /* namespace is being reopened */ 49 return -1; 50 } 51 52 if ((*data_txn = btree_txn_begin(ns->data_db, rdonly)) == NULL || 53 (*indx_txn = btree_txn_begin(ns->indx_db, rdonly)) == NULL) { 54 if (errno == ESTALE) { 55 if (*data_txn == NULL) 56 namespace_reopen_data(ns); 57 else 58 namespace_reopen_indx(ns); 59 errno = EBUSY; 60 } 61 log_warn("failed to open transaction"); 62 btree_txn_abort(*data_txn); 63 *data_txn = NULL; 64 return -1; 65 } 66 67 return 0; 68 } 69 70 int 71 namespace_begin(struct namespace *ns) 72 { 73 return namespace_begin_txn(ns, &ns->data_txn, &ns->indx_txn, 0); 74 } 75 76 int 77 namespace_commit(struct namespace *ns) 78 { 79 if (ns->indx_txn != NULL && 80 btree_txn_commit(ns->indx_txn) != BT_SUCCESS) { 81 log_warn("%s(indx): commit failed", ns->suffix); 82 btree_txn_abort(ns->data_txn); 83 ns->indx_txn = ns->data_txn = NULL; 84 return -1; 85 } 86 ns->indx_txn = NULL; 87 88 if (ns->data_txn != NULL && 89 btree_txn_commit(ns->data_txn) != BT_SUCCESS) { 90 log_warn("%s(data): commit failed", ns->suffix); 91 ns->data_txn = NULL; 92 return -1; 93 } 94 ns->data_txn = NULL; 95 96 return 0; 97 } 98 99 void 100 namespace_abort(struct namespace *ns) 101 { 102 btree_txn_abort(ns->data_txn); 103 ns->data_txn = NULL; 104 105 btree_txn_abort(ns->indx_txn); 106 ns->indx_txn = NULL; 107 } 108 109 int 110 namespace_open(struct namespace *ns) 111 { 112 unsigned int db_flags = 0; 113 114 assert(ns); 115 assert(ns->suffix); 116 117 if (ns->sync == 0) 118 db_flags |= BT_NOSYNC; 119 120 if (asprintf(&ns->data_path, "%s/%s_data.db", datadir, ns->suffix) < 0) 121 return -1; 122 log_info("opening namespace %s", ns->suffix); 123 ns->data_db = btree_open(ns->data_path, db_flags | BT_REVERSEKEY, 0644); 124 if (ns->data_db == NULL) 125 return -1; 126 127 btree_set_cache_size(ns->data_db, ns->cache_size); 128 129 if (asprintf(&ns->indx_path, "%s/%s_indx.db", datadir, ns->suffix) < 0) 130 return -1; 131 ns->indx_db = btree_open(ns->indx_path, db_flags, 0644); 132 if (ns->indx_db == NULL) 133 return -1; 134 135 btree_set_cache_size(ns->indx_db, ns->index_cache_size); 136 137 /* prepare request queue scheduler */ 138 evtimer_set(&ns->ev_queue, namespace_queue_replay, ns); 139 140 return 0; 141 } 142 143 static int 144 namespace_reopen(const char *path) 145 { 146 struct open_req req; 147 148 log_debug("asking parent to open %s", path); 149 150 memset(&req, 0, sizeof(req)); 151 if (strlcpy(req.path, path, sizeof(req.path)) >= sizeof(req.path)) { 152 log_warnx("%s: path truncated", __func__); 153 return -1; 154 } 155 156 return imsgev_compose(iev_ldapd, IMSG_LDAPD_OPEN, 0, 0, -1, &req, 157 sizeof(req)); 158 } 159 160 int 161 namespace_reopen_data(struct namespace *ns) 162 { 163 if (ns->data_db != NULL) { 164 btree_close(ns->data_db); 165 ns->data_db = NULL; 166 return namespace_reopen(ns->data_path); 167 } 168 return 1; 169 } 170 171 int 172 namespace_reopen_indx(struct namespace *ns) 173 { 174 if (ns->indx_db != NULL) { 175 btree_close(ns->indx_db); 176 ns->indx_db = NULL; 177 return namespace_reopen(ns->indx_path); 178 } 179 return 1; 180 } 181 182 static int 183 namespace_set_fd(struct namespace *ns, struct btree **bt, int fd, 184 unsigned int flags) 185 { 186 log_info("reopening namespace %s (entries)", ns->suffix); 187 btree_close(*bt); 188 if (ns->sync == 0) 189 flags |= BT_NOSYNC; 190 *bt = btree_open_fd(fd, flags); 191 if (*bt == NULL) 192 return -1; 193 return 0; 194 } 195 196 int 197 namespace_set_data_fd(struct namespace *ns, int fd) 198 { 199 return namespace_set_fd(ns, &ns->data_db, fd, BT_REVERSEKEY); 200 } 201 202 int 203 namespace_set_indx_fd(struct namespace *ns, int fd) 204 { 205 return namespace_set_fd(ns, &ns->indx_db, fd, 0); 206 } 207 208 void 209 namespace_close(struct namespace *ns) 210 { 211 struct conn *conn; 212 struct search *search, *next; 213 struct request *req; 214 215 /* Cancel any queued requests for this namespace. 216 */ 217 if (ns->queued_requests > 0) { 218 log_warnx("cancelling %u queued requests on namespace %s", 219 ns->queued_requests, ns->suffix); 220 while ((req = TAILQ_FIRST(&ns->request_queue)) != NULL) { 221 TAILQ_REMOVE(&ns->request_queue, req, next); 222 ldap_respond(req, LDAP_UNAVAILABLE); 223 } 224 } 225 226 /* Cancel any searches on this namespace. 227 */ 228 TAILQ_FOREACH(conn, &conn_list, next) { 229 for (search = TAILQ_FIRST(&conn->searches); search != NULL; 230 search = next) { 231 next = TAILQ_NEXT(search, next); 232 if (search->ns == ns) 233 search_close(search); 234 } 235 } 236 237 free(ns->suffix); 238 btree_close(ns->data_db); 239 btree_close(ns->indx_db); 240 if (evtimer_pending(&ns->ev_queue, NULL)) 241 evtimer_del(&ns->ev_queue); 242 free(ns->data_path); 243 free(ns->indx_path); 244 free(ns); 245 } 246 247 void 248 namespace_remove(struct namespace *ns) 249 { 250 TAILQ_REMOVE(&conf->namespaces, ns, next); 251 namespace_close(ns); 252 } 253 254 static struct btval * 255 namespace_find(struct namespace *ns, char *dn) 256 { 257 struct btval key; 258 static struct btval val; 259 260 if (ns->data_db == NULL) { 261 errno = EBUSY; /* namespace is being reopened */ 262 return NULL; 263 } 264 265 memset(&key, 0, sizeof(key)); 266 memset(&val, 0, sizeof(val)); 267 268 key.data = dn; 269 key.size = strlen(dn); 270 271 if (btree_txn_get(ns->data_db, ns->data_txn, &key, &val) != 0) { 272 if (errno == ENOENT) 273 log_debug("%s: dn not found", dn); 274 else 275 log_warn("%s", dn); 276 277 if (errno == ESTALE) 278 namespace_reopen_data(ns); 279 280 return NULL; 281 } 282 283 return &val; 284 } 285 286 struct ber_element * 287 namespace_get(struct namespace *ns, char *dn) 288 { 289 struct ber_element *elm; 290 struct btval *val; 291 292 if ((val = namespace_find(ns, dn)) == NULL) 293 return NULL; 294 295 elm = namespace_db2ber(ns, val); 296 btval_reset(val); 297 return elm; 298 } 299 300 int 301 namespace_exists(struct namespace *ns, char *dn) 302 { 303 struct btval *val; 304 305 if ((val = namespace_find(ns, dn)) == NULL) 306 return 0; 307 btval_reset(val); 308 return 1; 309 } 310 311 int 312 namespace_ber2db(struct namespace *ns, struct ber_element *root, 313 struct btval *val) 314 { 315 return ber2db(root, val, ns->compression_level); 316 } 317 318 struct ber_element * 319 namespace_db2ber(struct namespace *ns, struct btval *val) 320 { 321 return db2ber(val, ns->compression_level); 322 } 323 324 static int 325 namespace_put(struct namespace *ns, char *dn, struct ber_element *root, 326 int update) 327 { 328 int rc; 329 struct btval key, val; 330 331 assert(ns != NULL); 332 assert(ns->data_txn != NULL); 333 assert(ns->indx_txn != NULL); 334 335 memset(&key, 0, sizeof(key)); 336 key.data = dn; 337 key.size = strlen(dn); 338 339 if (namespace_ber2db(ns, root, &val) != 0) 340 return BT_FAIL; 341 342 rc = btree_txn_put(NULL, ns->data_txn, &key, &val, 343 update ? 0 : BT_NOOVERWRITE); 344 if (rc != BT_SUCCESS) { 345 if (errno == EEXIST) 346 log_debug("%s: already exists", dn); 347 else 348 log_warn("%s", dn); 349 goto done; 350 } 351 352 /* FIXME: if updating, try harder to just update changed indices. 353 */ 354 if (update && (rc = unindex_entry(ns, &key, root)) != BT_SUCCESS) 355 goto done; 356 357 rc = index_entry(ns, &key, root); 358 359 done: 360 btval_reset(&val); 361 return rc; 362 } 363 364 int 365 namespace_add(struct namespace *ns, char *dn, struct ber_element *root) 366 { 367 return namespace_put(ns, dn, root, 0); 368 } 369 370 int 371 namespace_update(struct namespace *ns, char *dn, struct ber_element *root) 372 { 373 return namespace_put(ns, dn, root, 1); 374 } 375 376 int 377 namespace_del(struct namespace *ns, char *dn) 378 { 379 int rc; 380 struct ber_element *root; 381 struct btval key, data; 382 383 assert(ns != NULL); 384 assert(ns->indx_txn != NULL); 385 assert(ns->data_txn != NULL); 386 387 memset(&key, 0, sizeof(key)); 388 memset(&data, 0, sizeof(data)); 389 390 key.data = dn; 391 key.size = strlen(key.data); 392 393 rc = btree_txn_del(NULL, ns->data_txn, &key, &data); 394 if (rc == BT_SUCCESS && (root = namespace_db2ber(ns, &data)) != NULL) 395 rc = unindex_entry(ns, &key, root); 396 397 btval_reset(&data); 398 return rc; 399 } 400 401 int 402 namespace_has_referrals(struct namespace *ns) 403 { 404 return !SLIST_EMPTY(&ns->referrals); 405 } 406 407 struct namespace * 408 namespace_lookup_base(const char *basedn, int include_referrals) 409 { 410 size_t blen, slen; 411 struct namespace *ns, *matched_ns = NULL; 412 413 assert(basedn); 414 blen = strlen(basedn); 415 416 TAILQ_FOREACH(ns, &conf->namespaces, next) { 417 slen = strlen(ns->suffix); 418 if ((include_referrals || !namespace_has_referrals(ns)) && 419 blen >= slen && 420 bcmp(basedn + blen - slen, ns->suffix, slen) == 0) { 421 /* Match the longest namespace suffix. */ 422 if (matched_ns == NULL || 423 strlen(ns->suffix) > strlen(matched_ns->suffix)) 424 matched_ns = ns; 425 } 426 } 427 428 return matched_ns; 429 } 430 431 struct namespace * 432 namespace_for_base(const char *basedn) 433 { 434 return namespace_lookup_base(basedn, 0); 435 } 436 437 struct referrals * 438 namespace_referrals(const char *basedn) 439 { 440 struct namespace *ns; 441 442 if ((ns = namespace_lookup_base(basedn, 1)) != NULL && 443 namespace_has_referrals(ns)) 444 return &ns->referrals; 445 446 if (!SLIST_EMPTY(&conf->referrals)) 447 return &conf->referrals; 448 449 return NULL; 450 } 451 452 int 453 namespace_has_index(struct namespace *ns, const char *attr, 454 enum index_type type) 455 { 456 struct attr_index *ai; 457 458 assert(ns); 459 assert(attr); 460 TAILQ_FOREACH(ai, &ns->indices, next) { 461 if (strcasecmp(attr, ai->attr) == 0 && ai->type == type) 462 return 1; 463 } 464 465 return 0; 466 } 467 468 /* Queues modification requests while the namespace is being reopened. 469 */ 470 int 471 namespace_queue_request(struct namespace *ns, struct request *req) 472 { 473 if (ns->queued_requests > MAX_REQUEST_QUEUE) { 474 log_warn("%u requests alreay queued, sorry"); 475 return -1; 476 } 477 478 TAILQ_INSERT_TAIL(&ns->request_queue, req, next); 479 ns->queued_requests++; 480 481 if (!evtimer_pending(&ns->ev_queue, NULL)) 482 namespace_queue_schedule(ns, 250000); 483 484 return 0; 485 } 486 487 static void 488 namespace_queue_replay(int fd, short event, void *data) 489 { 490 struct namespace *ns = data; 491 struct request *req; 492 493 if (ns->data_db == NULL || ns->indx_db == NULL) { 494 log_debug("%s: database is being reopened", ns->suffix); 495 return; /* Database is being reopened. */ 496 } 497 498 if ((req = TAILQ_FIRST(&ns->request_queue)) == NULL) 499 return; 500 TAILQ_REMOVE(&ns->request_queue, req, next); 501 502 log_debug("replaying queued request"); 503 req->replayed = 1; 504 request_dispatch(req); 505 ns->queued_requests--; 506 507 if (!evtimer_pending(&ns->ev_queue, NULL)) 508 namespace_queue_schedule(ns, 0); 509 } 510 511 void 512 namespace_queue_schedule(struct namespace *ns, unsigned int usec) 513 { 514 struct timeval tv; 515 516 tv.tv_sec = 0; 517 tv.tv_usec = usec; 518 evtimer_add(&ns->ev_queue, &tv); 519 } 520 521 /* Cancel all queued requests from the given connection. Drops matching 522 * requests from all namespaces without sending a response. 523 */ 524 void 525 namespace_cancel_conn(struct conn *conn) 526 { 527 struct namespace *ns; 528 struct request *req, *next; 529 530 TAILQ_FOREACH(ns, &conf->namespaces, next) { 531 for (req = TAILQ_FIRST(&ns->request_queue); req != NULL; 532 req = next) { 533 next = TAILQ_NEXT(req, next); 534 535 if (req->conn == conn) { 536 TAILQ_REMOVE(&ns->request_queue, req, next); 537 request_free(req); 538 } 539 } 540 } 541 } 542 543 int 544 namespace_conn_queue_count(struct conn *conn) 545 { 546 struct namespace *ns; 547 struct request *req; 548 int count = 0; 549 550 TAILQ_FOREACH(ns, &conf->namespaces, next) { 551 TAILQ_FOREACH(req, &ns->request_queue, next) { 552 if (req->conn == conn) 553 count++; 554 } 555 } 556 557 return count; 558 } 559