1 /**
2 * collectd - src/write_kafka.c
3 * Copyright (C) 2014 Pierre-Yves Ritschard
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Pierre-Yves Ritschard <pyr at spootnik.org>
25 */
26
27 #include "collectd.h"
28
29 #include "plugin.h"
30 #include "utils/cmds/putval.h"
31 #include "utils/common/common.h"
32 #include "utils/format_graphite/format_graphite.h"
33 #include "utils/format_json/format_json.h"
34 #include "utils_random.h"
35
36 #include <errno.h>
37 #include <librdkafka/rdkafka.h>
38 #include <stdint.h>
39
40 struct kafka_topic_context {
41 #define KAFKA_FORMAT_JSON 0
42 #define KAFKA_FORMAT_COMMAND 1
43 #define KAFKA_FORMAT_GRAPHITE 2
44 uint8_t format;
45 unsigned int graphite_flags;
46 bool store_rates;
47 rd_kafka_topic_conf_t *conf;
48 rd_kafka_topic_t *topic;
49 rd_kafka_conf_t *kafka_conf;
50 rd_kafka_t *kafka;
51 char *key;
52 char *prefix;
53 char *postfix;
54 char escape_char;
55 char *topic_name;
56 pthread_mutex_t lock;
57 };
58
59 static int kafka_handle(struct kafka_topic_context *);
60 static int kafka_write(const data_set_t *, const value_list_t *, user_data_t *);
61 static int32_t kafka_partition(const rd_kafka_topic_t *, const void *, size_t,
62 int32_t, void *, void *);
63
64 /* Version 0.9.0 of librdkafka deprecates rd_kafka_set_logger() in favor of
65 * rd_kafka_conf_set_log_cb(). This is to make sure we're not using the
66 * deprecated function. */
67 #ifdef HAVE_LIBRDKAFKA_LOG_CB
68 #undef HAVE_LIBRDKAFKA_LOGGER
69 #endif
70
71 #if defined(HAVE_LIBRDKAFKA_LOGGER) || defined(HAVE_LIBRDKAFKA_LOG_CB)
72 static void kafka_log(const rd_kafka_t *, int, const char *, const char *);
73
kafka_log(const rd_kafka_t * rkt,int level,const char * fac,const char * msg)74 static void kafka_log(const rd_kafka_t *rkt, int level, const char *fac,
75 const char *msg) {
76 plugin_log(level, "%s", msg);
77 }
78 #endif
79
kafka_error()80 static rd_kafka_resp_err_t kafka_error() {
81 #if RD_KAFKA_VERSION >= 0x000b00ff
82 return rd_kafka_last_error();
83 #else
84 return rd_kafka_errno2err(errno);
85 #endif
86 }
87
kafka_hash(const char * keydata,size_t keylen)88 static uint32_t kafka_hash(const char *keydata, size_t keylen) {
89 uint32_t hash = 5381;
90 for (; keylen > 0; keylen--)
91 hash = ((hash << 5) + hash) + keydata[keylen - 1];
92 return hash;
93 }
94
95 /* 31 bit -> 4 byte -> 8 byte hex string + null byte */
96 #define KAFKA_RANDOM_KEY_SIZE 9
97 #define KAFKA_RANDOM_KEY_BUFFER \
98 (char[KAFKA_RANDOM_KEY_SIZE]) { "" }
kafka_random_key(char buffer[static KAFKA_RANDOM_KEY_SIZE])99 static char *kafka_random_key(char buffer[static KAFKA_RANDOM_KEY_SIZE]) {
100 ssnprintf(buffer, KAFKA_RANDOM_KEY_SIZE, "%08" PRIX32, cdrand_u());
101 return buffer;
102 }
103
kafka_partition(const rd_kafka_topic_t * rkt,const void * keydata,size_t keylen,int32_t partition_cnt,void * p,void * m)104 static int32_t kafka_partition(const rd_kafka_topic_t *rkt, const void *keydata,
105 size_t keylen, int32_t partition_cnt, void *p,
106 void *m) {
107 uint32_t key = kafka_hash(keydata, keylen);
108 uint32_t target = key % partition_cnt;
109 int32_t i = partition_cnt;
110
111 while (--i > 0 && !rd_kafka_topic_partition_available(rkt, target)) {
112 target = (target + 1) % partition_cnt;
113 }
114 return target;
115 }
116
kafka_handle(struct kafka_topic_context * ctx)117 static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */
118 {
119 char errbuf[1024];
120 rd_kafka_conf_t *conf;
121 rd_kafka_topic_conf_t *topic_conf;
122
123 if (ctx->kafka != NULL && ctx->topic != NULL)
124 return 0;
125
126 if (ctx->kafka == NULL) {
127 if ((conf = rd_kafka_conf_dup(ctx->kafka_conf)) == NULL) {
128 ERROR("write_kafka plugin: cannot duplicate kafka config");
129 return 1;
130 }
131
132 if ((ctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errbuf,
133 sizeof(errbuf))) == NULL) {
134 ERROR("write_kafka plugin: cannot create kafka handle.");
135 return 1;
136 }
137
138 rd_kafka_conf_destroy(ctx->kafka_conf);
139 ctx->kafka_conf = NULL;
140
141 INFO("write_kafka plugin: created KAFKA handle : %s",
142 rd_kafka_name(ctx->kafka));
143
144 #if defined(HAVE_LIBRDKAFKA_LOGGER) && !defined(HAVE_LIBRDKAFKA_LOG_CB)
145 rd_kafka_set_logger(ctx->kafka, kafka_log);
146 #endif
147 }
148
149 if (ctx->topic == NULL) {
150 if ((topic_conf = rd_kafka_topic_conf_dup(ctx->conf)) == NULL) {
151 ERROR("write_kafka plugin: cannot duplicate kafka topic config");
152 return 1;
153 }
154
155 if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name,
156 topic_conf)) == NULL) {
157 ERROR("write_kafka plugin: cannot create topic : %s\n",
158 rd_kafka_err2str(kafka_error()));
159 return errno;
160 }
161
162 rd_kafka_topic_conf_destroy(ctx->conf);
163 ctx->conf = NULL;
164
165 INFO("write_kafka plugin: handle created for topic : %s",
166 rd_kafka_topic_name(ctx->topic));
167 }
168
169 return 0;
170
171 } /* }}} int kafka_handle */
172
kafka_write(const data_set_t * ds,const value_list_t * vl,user_data_t * ud)173 static int kafka_write(const data_set_t *ds, /* {{{ */
174 const value_list_t *vl, user_data_t *ud) {
175 int status = 0;
176 void *key;
177 size_t keylen = 0;
178 char buffer[8192];
179 size_t bfree = sizeof(buffer);
180 size_t bfill = 0;
181 size_t blen = 0;
182 struct kafka_topic_context *ctx = ud->data;
183
184 if ((ds == NULL) || (vl == NULL) || (ctx == NULL))
185 return EINVAL;
186
187 pthread_mutex_lock(&ctx->lock);
188 status = kafka_handle(ctx);
189 pthread_mutex_unlock(&ctx->lock);
190 if (status != 0)
191 return status;
192
193 bzero(buffer, sizeof(buffer));
194
195 switch (ctx->format) {
196 case KAFKA_FORMAT_COMMAND:
197 status = cmd_create_putval(buffer, sizeof(buffer), ds, vl);
198 if (status != 0) {
199 ERROR("write_kafka plugin: cmd_create_putval failed with status %i.",
200 status);
201 return status;
202 }
203 blen = strlen(buffer);
204 break;
205 case KAFKA_FORMAT_JSON:
206 format_json_initialize(buffer, &bfill, &bfree);
207 format_json_value_list(buffer, &bfill, &bfree, ds, vl, ctx->store_rates);
208 format_json_finalize(buffer, &bfill, &bfree);
209 blen = strlen(buffer);
210 break;
211 case KAFKA_FORMAT_GRAPHITE:
212 status =
213 format_graphite(buffer, sizeof(buffer), ds, vl, ctx->prefix,
214 ctx->postfix, ctx->escape_char, ctx->graphite_flags);
215 if (status != 0) {
216 ERROR("write_kafka plugin: format_graphite failed with status %i.",
217 status);
218 return status;
219 }
220 blen = strlen(buffer);
221 break;
222 default:
223 ERROR("write_kafka plugin: invalid format %i.", ctx->format);
224 return -1;
225 }
226
227 key =
228 (ctx->key != NULL) ? ctx->key : kafka_random_key(KAFKA_RANDOM_KEY_BUFFER);
229 keylen = strlen(key);
230
231 rd_kafka_produce(ctx->topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY,
232 buffer, blen, key, keylen, NULL);
233
234 return status;
235 } /* }}} int kafka_write */
236
kafka_topic_context_free(void * p)237 static void kafka_topic_context_free(void *p) /* {{{ */
238 {
239 struct kafka_topic_context *ctx = p;
240
241 if (ctx == NULL)
242 return;
243
244 if (ctx->topic_name != NULL)
245 sfree(ctx->topic_name);
246 if (ctx->topic != NULL)
247 rd_kafka_topic_destroy(ctx->topic);
248 if (ctx->conf != NULL)
249 rd_kafka_topic_conf_destroy(ctx->conf);
250 if (ctx->kafka_conf != NULL)
251 rd_kafka_conf_destroy(ctx->kafka_conf);
252 if (ctx->kafka != NULL)
253 rd_kafka_destroy(ctx->kafka);
254
255 sfree(ctx);
256 } /* }}} void kafka_topic_context_free */
257
kafka_config_topic(rd_kafka_conf_t * conf,oconfig_item_t * ci)258 static void kafka_config_topic(rd_kafka_conf_t *conf,
259 oconfig_item_t *ci) /* {{{ */
260 {
261 int status;
262 struct kafka_topic_context *tctx;
263 char *key = NULL;
264 char *val;
265 char callback_name[DATA_MAX_NAME_LEN];
266 char errbuf[1024];
267 oconfig_item_t *child;
268 rd_kafka_conf_res_t ret;
269
270 if ((tctx = calloc(1, sizeof(*tctx))) == NULL) {
271 ERROR("write_kafka plugin: calloc failed.");
272 return;
273 }
274
275 tctx->escape_char = '.';
276 tctx->store_rates = true;
277 tctx->format = KAFKA_FORMAT_JSON;
278 tctx->key = NULL;
279
280 if ((tctx->kafka_conf = rd_kafka_conf_dup(conf)) == NULL) {
281 sfree(tctx);
282 ERROR("write_kafka plugin: cannot allocate memory for kafka config");
283 return;
284 }
285
286 #ifdef HAVE_LIBRDKAFKA_LOG_CB
287 rd_kafka_conf_set_log_cb(tctx->kafka_conf, kafka_log);
288 #endif
289
290 if ((tctx->conf = rd_kafka_topic_conf_new()) == NULL) {
291 rd_kafka_conf_destroy(tctx->kafka_conf);
292 sfree(tctx);
293 ERROR("write_kafka plugin: cannot create topic configuration.");
294 return;
295 }
296
297 if (ci->values_num != 1) {
298 WARNING("kafka topic name needed.");
299 goto errout;
300 }
301
302 if (ci->values[0].type != OCONFIG_TYPE_STRING) {
303 WARNING("kafka topic needs a string argument.");
304 goto errout;
305 }
306
307 if ((tctx->topic_name = strdup(ci->values[0].value.string)) == NULL) {
308 ERROR("write_kafka plugin: cannot copy topic name.");
309 goto errout;
310 }
311
312 for (int i = 0; i < ci->children_num; i++) {
313 /*
314 * The code here could be simplified but makes room
315 * for easy adding of new options later on.
316 */
317 child = &ci->children[i];
318 status = 0;
319
320 if (strcasecmp("Property", child->key) == 0) {
321 if (child->values_num != 2) {
322 WARNING("kafka properties need both a key and a value.");
323 goto errout;
324 }
325 if (child->values[0].type != OCONFIG_TYPE_STRING ||
326 child->values[1].type != OCONFIG_TYPE_STRING) {
327 WARNING("kafka properties needs string arguments.");
328 goto errout;
329 }
330 key = child->values[0].value.string;
331 val = child->values[1].value.string;
332 ret =
333 rd_kafka_topic_conf_set(tctx->conf, key, val, errbuf, sizeof(errbuf));
334 if (ret != RD_KAFKA_CONF_OK) {
335 WARNING("cannot set kafka topic property %s to %s: %s.", key, val,
336 errbuf);
337 goto errout;
338 }
339
340 } else if (strcasecmp("Key", child->key) == 0) {
341 if (cf_util_get_string(child, &tctx->key) != 0)
342 continue;
343 if (strcasecmp("Random", tctx->key) == 0) {
344 sfree(tctx->key);
345 tctx->key = strdup(kafka_random_key(KAFKA_RANDOM_KEY_BUFFER));
346 }
347 } else if (strcasecmp("Format", child->key) == 0) {
348 status = cf_util_get_string(child, &key);
349 if (status != 0)
350 goto errout;
351
352 assert(key != NULL);
353
354 if (strcasecmp(key, "Command") == 0) {
355 tctx->format = KAFKA_FORMAT_COMMAND;
356
357 } else if (strcasecmp(key, "Graphite") == 0) {
358 tctx->format = KAFKA_FORMAT_GRAPHITE;
359
360 } else if (strcasecmp(key, "Json") == 0) {
361 tctx->format = KAFKA_FORMAT_JSON;
362
363 } else {
364 WARNING("write_kafka plugin: Invalid format string: %s", key);
365 }
366
367 sfree(key);
368
369 } else if (strcasecmp("StoreRates", child->key) == 0) {
370 status = cf_util_get_boolean(child, &tctx->store_rates);
371 (void)cf_util_get_flag(child, &tctx->graphite_flags,
372 GRAPHITE_STORE_RATES);
373
374 } else if (strcasecmp("GraphiteSeparateInstances", child->key) == 0) {
375 status = cf_util_get_flag(child, &tctx->graphite_flags,
376 GRAPHITE_SEPARATE_INSTANCES);
377
378 } else if (strcasecmp("GraphiteAlwaysAppendDS", child->key) == 0) {
379 status = cf_util_get_flag(child, &tctx->graphite_flags,
380 GRAPHITE_ALWAYS_APPEND_DS);
381
382 } else if (strcasecmp("GraphitePreserveSeparator", child->key) == 0) {
383 status = cf_util_get_flag(child, &tctx->graphite_flags,
384 GRAPHITE_PRESERVE_SEPARATOR);
385
386 } else if (strcasecmp("GraphiteUseTags", child->key) == 0) {
387 status =
388 cf_util_get_flag(child, &tctx->graphite_flags, GRAPHITE_USE_TAGS);
389
390 } else if (strcasecmp("GraphitePrefix", child->key) == 0) {
391 status = cf_util_get_string(child, &tctx->prefix);
392 } else if (strcasecmp("GraphitePostfix", child->key) == 0) {
393 status = cf_util_get_string(child, &tctx->postfix);
394 } else if (strcasecmp("GraphiteEscapeChar", child->key) == 0) {
395 char *tmp_buff = NULL;
396 status = cf_util_get_string(child, &tmp_buff);
397 if (strlen(tmp_buff) > 1)
398 WARNING("write_kafka plugin: The option \"GraphiteEscapeChar\" handles "
399 "only one character. Others will be ignored.");
400 tctx->escape_char = tmp_buff[0];
401 sfree(tmp_buff);
402 } else {
403 WARNING("write_kafka plugin: Invalid directive: %s.", child->key);
404 }
405
406 if (status != 0)
407 break;
408 }
409
410 rd_kafka_topic_conf_set_partitioner_cb(tctx->conf, kafka_partition);
411 rd_kafka_topic_conf_set_opaque(tctx->conf, tctx);
412
413 ssnprintf(callback_name, sizeof(callback_name), "write_kafka/%s",
414 tctx->topic_name);
415
416 status = plugin_register_write(callback_name, kafka_write,
417 &(user_data_t){
418 .data = tctx,
419 .free_func = kafka_topic_context_free,
420 });
421 if (status != 0) {
422 WARNING("write_kafka plugin: plugin_register_write (\"%s\") "
423 "failed with status %i.",
424 callback_name, status);
425 goto errout;
426 }
427
428 pthread_mutex_init(&tctx->lock, /* attr = */ NULL);
429
430 return;
431 errout:
432 if (tctx->topic_name != NULL)
433 free(tctx->topic_name);
434 if (tctx->conf != NULL)
435 rd_kafka_topic_conf_destroy(tctx->conf);
436 if (tctx->kafka_conf != NULL)
437 rd_kafka_conf_destroy(tctx->kafka_conf);
438 sfree(tctx);
439 } /* }}} int kafka_config_topic */
440
kafka_config(oconfig_item_t * ci)441 static int kafka_config(oconfig_item_t *ci) /* {{{ */
442 {
443 oconfig_item_t *child;
444 rd_kafka_conf_t *conf;
445 rd_kafka_conf_res_t ret;
446 char errbuf[1024];
447
448 if ((conf = rd_kafka_conf_new()) == NULL) {
449 WARNING("cannot allocate kafka configuration.");
450 return -1;
451 }
452 for (int i = 0; i < ci->children_num; i++) {
453 child = &ci->children[i];
454
455 if (strcasecmp("Topic", child->key) == 0) {
456 kafka_config_topic(conf, child);
457 } else if (strcasecmp(child->key, "Property") == 0) {
458 char *key = NULL;
459 char *val = NULL;
460
461 if (child->values_num != 2) {
462 WARNING("kafka properties need both a key and a value.");
463 goto errout;
464 }
465 if (child->values[0].type != OCONFIG_TYPE_STRING ||
466 child->values[1].type != OCONFIG_TYPE_STRING) {
467 WARNING("kafka properties needs string arguments.");
468 goto errout;
469 }
470 if ((key = strdup(child->values[0].value.string)) == NULL) {
471 WARNING("cannot allocate memory for attribute key.");
472 goto errout;
473 }
474 if ((val = strdup(child->values[1].value.string)) == NULL) {
475 WARNING("cannot allocate memory for attribute value.");
476 sfree(key);
477 goto errout;
478 }
479 ret = rd_kafka_conf_set(conf, key, val, errbuf, sizeof(errbuf));
480 if (ret != RD_KAFKA_CONF_OK) {
481 WARNING("cannot set kafka property %s to %s: %s", key, val, errbuf);
482 sfree(key);
483 sfree(val);
484 goto errout;
485 }
486 sfree(key);
487 sfree(val);
488 } else {
489 WARNING("write_kafka plugin: Ignoring unknown "
490 "configuration option \"%s\" at top level.",
491 child->key);
492 }
493 }
494 if (conf != NULL)
495 rd_kafka_conf_destroy(conf);
496 return 0;
497 errout:
498 if (conf != NULL)
499 rd_kafka_conf_destroy(conf);
500 return -1;
501 } /* }}} int kafka_config */
502
module_register(void)503 void module_register(void) {
504 plugin_register_complex_config("write_kafka", kafka_config);
505 }
506