1 /*
2     pmacct (Promiscuous mode IP Accounting package)
3     pmacct is Copyright (C) 2003-2019 by Paolo Lucente
4 */
5 
6 /*
7     This program is free software; you can redistribute it and/or modify
8     it under the terms of the GNU General Public License as published by
9     the Free Software Foundation; either version 2 of the License, or
10     (at your option) any later version.
11 
12     This program is distributed in the hope that it will be useful,
13     but WITHOUT ANY WARRANTY; without even the implied warranty of
14     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15     GNU General Public License for more details.
16 
17     You should have received a copy of the GNU General Public License
18     along with this program; if not, write to the Free Software
19     Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21 
22 /* includes */
23 #include "pmacct.h"
24 #include "imt_plugin.h"
25 #include "ip_flow.h"
26 #include "classifier.h"
27 #include "bgp/bgp_packet.h"
28 #include "bgp/bgp.h"
29 
30 /* functions */
build_query_server(char * path_ptr)31 int build_query_server(char *path_ptr)
32 {
33   struct sockaddr_un sAddr;
34   int sd, rc;
35 
36   sd=socket(AF_UNIX, SOCK_STREAM, 0);
37   if (sd < 0) {
38     Log(LOG_ERR, "ERROR ( %s/%s ): cannot open socket.\n", config.name, config.type);
39     exit_gracefully(1);
40   }
41 
42   sAddr.sun_family = AF_UNIX;
43   strcpy(sAddr.sun_path, path_ptr);
44   unlink(path_ptr);
45 
46   rc = bind(sd, (struct sockaddr *) &sAddr,sizeof(sAddr));
47   if (rc < 0) {
48     Log(LOG_ERR, "ERROR ( %s/%s ): cannot bind to file %s .\n", config.name, config.type, path_ptr);
49     exit_gracefully(1);
50   }
51 
52   chmod(path_ptr, S_IRUSR|S_IWUSR|S_IXUSR|
53                   S_IRGRP|S_IWGRP|S_IXGRP|
54                   S_IROTH|S_IWOTH|S_IXOTH);
55 
56   setnonblocking(sd);
57   listen(sd, 1);
58   Log(LOG_INFO, "OK ( %s/%s ): waiting for data on: '%s'\n", config.name, config.type, path_ptr);
59 
60   return sd;
61 }
62 
63 
process_query_data(int sd,unsigned char * buf,int len,struct extra_primitives * extras,int datasize,int forked)64 void process_query_data(int sd, unsigned char *buf, int len, struct extra_primitives *extras, int datasize, int forked)
65 {
66   struct acc *acc_elem = 0;
67   struct bucket_desc bd;
68   struct query_header *q, *uq;
69   struct query_entry request;
70   struct reply_buffer rb;
71   unsigned char *elem, *bufptr;
72   int following_chain=0;
73   unsigned int idx;
74   struct pkt_data dummy;
75   struct pkt_bgp_primitives dummy_pbgp;
76   struct pkt_legacy_bgp_primitives dummy_plbgp;
77   struct pkt_nat_primitives dummy_pnat;
78   struct pkt_mpls_primitives dummy_pmpls;
79   struct pkt_tunnel_primitives dummy_ptun;
80   char *dummy_pcust = NULL, *custbuf = NULL;
81   struct pkt_vlen_hdr_primitives dummy_pvlen;
82   char emptybuf[LARGEBUFLEN];
83   int reset_counter, offset = PdataSz;
84 
85   dummy_pcust = malloc(config.cpptrs.len);
86   custbuf = malloc(config.cpptrs.len);
87   if (!dummy_pcust || !custbuf) {
88     Log(LOG_ERR, "ERROR ( %s/%s ): Unable to malloc() dummy_pcust. Exiting.\n", config.name, config.type);
89     exit_gracefully(1);
90   }
91 
92   memset(&dummy, 0, sizeof(struct pkt_data));
93   memset(&dummy_pbgp, 0, sizeof(struct pkt_bgp_primitives));
94   memset(&dummy_plbgp, 0, sizeof(struct pkt_legacy_bgp_primitives));
95   memset(&dummy_pnat, 0, sizeof(struct pkt_nat_primitives));
96   memset(&dummy_pmpls, 0, sizeof(struct pkt_mpls_primitives));
97   memset(&dummy_ptun, 0, sizeof(struct pkt_tunnel_primitives));
98   memset(dummy_pcust, 0, config.cpptrs.len);
99   memset(custbuf, 0, config.cpptrs.len);
100   memset(&dummy_pvlen, 0, sizeof(struct pkt_vlen_hdr_primitives));
101 
102   memset(emptybuf, 0, LARGEBUFLEN);
103   memset(&rb, 0, sizeof(struct reply_buffer));
104   memcpy(rb.buf, buf, sizeof(struct query_header));
105   rb.len = LARGEBUFLEN-sizeof(struct query_header);
106   rb.packed = sizeof(struct query_header);
107 
108   /* arranging some pointer */
109   uq = (struct query_header *) buf;
110   q = (struct query_header *) rb.buf;
111   rb.ptr = rb.buf+sizeof(struct query_header);
112   bufptr = buf+sizeof(struct query_header);
113   q->ip_sz = sizeof(acc_elem->primitives.src_ip);
114   q->cnt_sz = sizeof(acc_elem->bytes_counter);
115   q->datasize = datasize;
116 
117   if (extras->off_pkt_bgp_primitives) {
118     q->extras.off_pkt_bgp_primitives = offset;
119     offset += sizeof(struct pkt_bgp_primitives);
120   }
121   else q->extras.off_pkt_bgp_primitives = 0;
122   if (extras->off_pkt_lbgp_primitives) {
123     q->extras.off_pkt_lbgp_primitives = offset;
124     offset += sizeof(struct pkt_legacy_bgp_primitives);
125   }
126   else q->extras.off_pkt_lbgp_primitives = 0;
127   if (extras->off_pkt_nat_primitives) {
128     q->extras.off_pkt_nat_primitives = offset;
129     offset += sizeof(struct pkt_nat_primitives);
130   }
131   else q->extras.off_pkt_nat_primitives = 0;
132   if (extras->off_pkt_mpls_primitives) {
133     q->extras.off_pkt_mpls_primitives = offset;
134     offset += sizeof(struct pkt_mpls_primitives);
135   }
136   else q->extras.off_pkt_mpls_primitives = 0;
137   if (extras->off_pkt_tun_primitives) {
138     q->extras.off_pkt_tun_primitives = offset;
139     offset += sizeof(struct pkt_tunnel_primitives);
140   }
141   else q->extras.off_pkt_tun_primitives = 0;
142   if (extras->off_custom_primitives) {
143     q->extras.off_custom_primitives = offset;
144     offset += config.cpptrs.len;
145   }
146   else q->extras.off_custom_primitives = 0;
147   if (extras->off_pkt_vlen_hdr_primitives) {
148     q->extras.off_pkt_vlen_hdr_primitives = offset;
149     offset += sizeof(struct pkt_vlen_hdr_primitives);
150     /* XXX: handle variable legnth part of this structure */
151   }
152   else q->extras.off_pkt_vlen_hdr_primitives = 0;
153 
154   Log(LOG_DEBUG, "DEBUG ( %s/%s ): Processing data received from client ...\n", config.name, config.type);
155 
156   if (config.imt_plugin_passwd) {
157     if (!strncmp(config.imt_plugin_passwd, q->passwd, MIN(strlen(config.imt_plugin_passwd), 8)));
158     else return;
159   }
160 
161   elem = (unsigned char *) a;
162 
163   reset_counter = q->type & WANT_RESET;
164 
165   if (q->type & WANT_STATS) {
166     q->what_to_count = config.what_to_count;
167     q->what_to_count_2 = config.what_to_count_2;
168     for (idx = 0; idx < config.buckets; idx++) {
169       if (!following_chain) acc_elem = (struct acc *) elem;
170       if (!test_zero_elem(acc_elem)) {
171 	enQueue_elem(sd, &rb, acc_elem, PdataSz, datasize);
172 
173         if (extras->off_pkt_bgp_primitives && acc_elem->pbgp) {
174           enQueue_elem(sd, &rb, acc_elem->pbgp, PbgpSz, datasize - extras->off_pkt_bgp_primitives);
175         }
176 
177         if (extras->off_pkt_lbgp_primitives) {
178           if (acc_elem->clbgp) {
179             struct pkt_legacy_bgp_primitives tmp_plbgp;
180 
181             cache_to_pkt_legacy_bgp_primitives(&tmp_plbgp, acc_elem->clbgp);
182             enQueue_elem(sd, &rb, &tmp_plbgp, PlbgpSz, datasize - extras->off_pkt_lbgp_primitives);
183           }
184         }
185 
186         if (extras->off_pkt_nat_primitives && acc_elem->pnat) {
187           enQueue_elem(sd, &rb, acc_elem->pnat, PnatSz, datasize - extras->off_pkt_nat_primitives);
188 	}
189 
190         if (extras->off_pkt_mpls_primitives && acc_elem->pmpls) {
191           enQueue_elem(sd, &rb, acc_elem->pmpls, PmplsSz, datasize - extras->off_pkt_mpls_primitives);
192 	}
193 
194         if (extras->off_pkt_tun_primitives && acc_elem->ptun) {
195           enQueue_elem(sd, &rb, acc_elem->ptun, PtunSz, datasize - extras->off_pkt_tun_primitives);
196 	}
197 
198         if (extras->off_custom_primitives && acc_elem->pcust) {
199           enQueue_elem(sd, &rb, acc_elem->pcust, config.cpptrs.len, datasize - extras->off_custom_primitives);
200         }
201 
202         if (extras->off_pkt_vlen_hdr_primitives && acc_elem->pvlen) {
203           enQueue_elem(sd, &rb, acc_elem->pvlen, PvhdrSz + acc_elem->pvlen->tot_len, datasize - extras->off_pkt_vlen_hdr_primitives);
204         }
205       }
206       if (acc_elem->next != NULL) {
207         Log(LOG_DEBUG, "DEBUG ( %s/%s ): Following chain in reply ...\n", config.name, config.type);
208         acc_elem = acc_elem->next;
209         following_chain = TRUE;
210         idx--;
211       }
212       else {
213         elem += sizeof(struct acc);
214         following_chain = FALSE;
215       }
216     }
217     if (rb.packed) send(sd, rb.buf, rb.packed, 0); /* send remainder data */
218   }
219   else if (q->type & WANT_STATUS) {
220     for (idx = 0; idx < config.buckets; idx++) {
221 
222       /* Administrativia */
223       following_chain = FALSE;
224       bd.num = 0;
225       bd.howmany = 0;
226       acc_elem = (struct acc *) elem;
227 
228       do {
229         if (following_chain) acc_elem = acc_elem->next;
230         if (!test_zero_elem(acc_elem)) bd.howmany++;
231         bd.num = idx; /* we need to avoid this redundancy */
232         following_chain = TRUE;
233       } while (acc_elem->next != NULL);
234 
235       enQueue_elem(sd, &rb, &bd, sizeof(struct bucket_desc), sizeof(struct bucket_desc));
236       elem += sizeof(struct acc);
237     }
238     if (rb.packed) send(sd, rb.buf, rb.packed, 0);
239   }
240   else if (q->type & WANT_MATCH || q->type & WANT_COUNTER) {
241     unsigned int j;
242 
243     q->what_to_count = config.what_to_count;
244     q->what_to_count_2 = config.what_to_count_2;
245     for (j = 0; j < uq->num; j++, bufptr += sizeof(struct query_entry)) {
246       memcpy(&request, bufptr, sizeof(struct query_entry));
247       Log(LOG_DEBUG, "DEBUG ( %s/%s ): Searching into accounting structure ...\n", config.name, config.type);
248       if (request.what_to_count == config.what_to_count && request.what_to_count_2 == config.what_to_count_2) {
249         struct pkt_data pd_dummy;
250 	struct primitives_ptrs prim_ptrs;
251 
252 	memset(&pd_dummy, 0, sizeof(pd_dummy));
253 	memset(&prim_ptrs, 0, sizeof(prim_ptrs));
254 	memcpy(&pd_dummy.primitives, &request.data, sizeof(struct pkt_primitives));
255 	prim_ptrs.data = &pd_dummy;
256 	prim_ptrs.pbgp = &request.pbgp;
257 	prim_ptrs.plbgp = &request.plbgp;
258 	prim_ptrs.pnat = &request.pnat;
259 	prim_ptrs.pmpls = &request.pmpls;
260 	prim_ptrs.ptun = &request.ptun;
261 	prim_ptrs.pcust = request.pcust;
262 	prim_ptrs.pvlen = request.pvlen;
263 
264         acc_elem = search_accounting_structure(&prim_ptrs);
265         if (acc_elem) {
266 	  if (!test_zero_elem(acc_elem)) {
267 	    enQueue_elem(sd, &rb, acc_elem, PdataSz, datasize);
268 
269             if (extras->off_pkt_bgp_primitives && acc_elem->pbgp) {
270               enQueue_elem(sd, &rb, acc_elem->pbgp, PbgpSz, datasize - extras->off_pkt_bgp_primitives);
271             }
272 
273             if (extras->off_pkt_lbgp_primitives) {
274               if (acc_elem->clbgp) {
275                 struct pkt_legacy_bgp_primitives tmp_plbgp;
276 
277                 cache_to_pkt_legacy_bgp_primitives(&tmp_plbgp, acc_elem->clbgp);
278                 enQueue_elem(sd, &rb, &tmp_plbgp, PlbgpSz, datasize - extras->off_pkt_lbgp_primitives);
279               }
280             }
281 
282             if (extras->off_pkt_nat_primitives && acc_elem->pnat) {
283               enQueue_elem(sd, &rb, acc_elem->pnat, PnatSz, datasize - extras->off_pkt_nat_primitives);
284             }
285 
286 	    if (extras->off_pkt_mpls_primitives && acc_elem->pmpls) {
287 	      enQueue_elem(sd, &rb, acc_elem->pmpls, PmplsSz, datasize - extras->off_pkt_mpls_primitives);
288 	    }
289 
290             if (extras->off_pkt_tun_primitives && acc_elem->ptun) {
291               enQueue_elem(sd, &rb, acc_elem->ptun, PtunSz, datasize - extras->off_pkt_tun_primitives);
292             }
293 
294 	    if (extras->off_custom_primitives && acc_elem->pcust) {
295 	      enQueue_elem(sd, &rb, acc_elem->pcust, config.cpptrs.len, datasize - extras->off_custom_primitives);
296 	    }
297 
298 	    if (extras->off_pkt_vlen_hdr_primitives && acc_elem->pvlen) {
299 	      enQueue_elem(sd, &rb, acc_elem->pvlen, PvhdrSz + acc_elem->pvlen->tot_len, datasize - extras->off_pkt_vlen_hdr_primitives);
300 	    }
301 
302 	    if (reset_counter) {
303 	      if (forked) set_reset_flag(acc_elem);
304 	      else reset_counters(acc_elem);
305 	    }
306 	  }
307 	  else {
308 	    if (q->type & WANT_COUNTER) {
309 	      enQueue_elem(sd, &rb, &dummy, PdataSz, datasize);
310 
311 	      if (extras->off_pkt_bgp_primitives)
312 		enQueue_elem(sd, &rb, &dummy_pbgp, PbgpSz, datasize - extras->off_pkt_bgp_primitives);
313 
314               if (extras->off_pkt_lbgp_primitives)
315                 enQueue_elem(sd, &rb, &dummy_plbgp, PlbgpSz, datasize - extras->off_pkt_lbgp_primitives);
316 
317 	      if (extras->off_pkt_nat_primitives)
318 		enQueue_elem(sd, &rb, &dummy_pnat, PnatSz, datasize - extras->off_pkt_nat_primitives);
319 
320 	      if (extras->off_pkt_mpls_primitives)
321 		enQueue_elem(sd, &rb, &dummy_pmpls, PmplsSz, datasize - extras->off_pkt_mpls_primitives);
322 
323 	      if (extras->off_pkt_tun_primitives)
324 		enQueue_elem(sd, &rb, &dummy_ptun, PtunSz, datasize - extras->off_pkt_tun_primitives);
325 
326 	      if (extras->off_custom_primitives)
327 		enQueue_elem(sd, &rb, &dummy_pcust, config.cpptrs.len, datasize - extras->off_custom_primitives);
328 
329 	      if (extras->off_pkt_vlen_hdr_primitives)
330 		enQueue_elem(sd, &rb, &dummy_pvlen, PvhdrSz, datasize - extras->off_pkt_vlen_hdr_primitives);
331 	    }
332 	  }
333         }
334 	else {
335 	  if (q->type & WANT_COUNTER) {
336 	    enQueue_elem(sd, &rb, &dummy, PdataSz, datasize);
337 
338 	    if (extras->off_pkt_bgp_primitives)
339 	      enQueue_elem(sd, &rb, &dummy_pbgp, PbgpSz, datasize - extras->off_pkt_bgp_primitives);
340 
341 	    if (extras->off_pkt_lbgp_primitives)
342 	      enQueue_elem(sd, &rb, &dummy_plbgp, PlbgpSz, datasize - extras->off_pkt_lbgp_primitives);
343 
344 	    if (extras->off_pkt_nat_primitives)
345 	      enQueue_elem(sd, &rb, &dummy_pnat, PnatSz, datasize - extras->off_pkt_nat_primitives);
346 
347 	    if (extras->off_pkt_mpls_primitives)
348 	      enQueue_elem(sd, &rb, &dummy_pmpls, PmplsSz, datasize - extras->off_pkt_mpls_primitives);
349 
350 	    if (extras->off_pkt_tun_primitives)
351 	      enQueue_elem(sd, &rb, &dummy_ptun, PtunSz, datasize - extras->off_pkt_tun_primitives);
352 
353             if (extras->off_custom_primitives)
354               enQueue_elem(sd, &rb, &dummy_pcust, config.cpptrs.len, datasize - extras->off_custom_primitives);
355 
356             if (extras->off_pkt_vlen_hdr_primitives)
357               enQueue_elem(sd, &rb, &dummy_pvlen, PvhdrSz, datasize - extras->off_pkt_vlen_hdr_primitives);
358 	  }
359 	}
360       }
361       else {
362         struct pkt_primitives tbuf;
363 	struct pkt_bgp_primitives bbuf;
364 	struct pkt_legacy_bgp_primitives lbbuf;
365 	struct pkt_nat_primitives nbuf;
366 	struct pkt_mpls_primitives mbuf;
367 	struct pkt_tunnel_primitives ubuf;
368 	struct pkt_data abuf;
369         following_chain = FALSE;
370 	elem = (unsigned char *) a;
371 	memset(&abuf, 0, sizeof(abuf));
372 
373         for (idx = 0; idx < config.buckets; idx++) {
374           if (!following_chain) acc_elem = (struct acc *) elem;
375 	  if (!test_zero_elem(acc_elem)) {
376 	    /* XXX: support for custom and vlen primitives */
377 	    mask_elem(&tbuf, &bbuf, &lbbuf, &nbuf, &mbuf, &ubuf, acc_elem, request.what_to_count, request.what_to_count_2, extras);
378             if (!memcmp(&tbuf, &request.data, sizeof(struct pkt_primitives)) &&
379 		!memcmp(&bbuf, &request.pbgp, sizeof(struct pkt_bgp_primitives)) &&
380 		!memcmp(&lbbuf, &request.plbgp, sizeof(struct pkt_legacy_bgp_primitives)) &&
381 		!memcmp(&nbuf, &request.pnat, sizeof(struct pkt_nat_primitives)) &&
382 		!memcmp(&mbuf, &request.pmpls, sizeof(struct pkt_mpls_primitives)) &&
383 		!memcmp(&ubuf, &request.ptun, sizeof(struct pkt_tunnel_primitives))) {
384 	      if (q->type & WANT_COUNTER) Accumulate_Counters(&abuf, acc_elem);
385 	      else {
386 		enQueue_elem(sd, &rb, acc_elem, PdataSz, datasize); /* q->type == WANT_MATCH */
387 
388                 if (extras->off_pkt_bgp_primitives && acc_elem->pbgp) {
389                   enQueue_elem(sd, &rb, acc_elem->pbgp, PbgpSz, datasize - extras->off_pkt_bgp_primitives);
390                 }
391 
392                 if (extras->off_pkt_lbgp_primitives) {
393                   if (acc_elem->clbgp) {
394                     struct pkt_legacy_bgp_primitives tmp_plbgp;
395 
396                     cache_to_pkt_legacy_bgp_primitives(&tmp_plbgp, acc_elem->clbgp);
397                     enQueue_elem(sd, &rb, &tmp_plbgp, PlbgpSz, datasize - extras->off_pkt_lbgp_primitives);
398                   }
399                 }
400 
401                 if (extras->off_pkt_nat_primitives && acc_elem->pnat) {
402                   enQueue_elem(sd, &rb, acc_elem->pnat, PnatSz, datasize - extras->off_pkt_nat_primitives);
403                 }
404 		if (extras->off_pkt_mpls_primitives && acc_elem->pmpls) {
405 		  enQueue_elem(sd, &rb, acc_elem->pmpls, PmplsSz, datasize - extras->off_pkt_mpls_primitives);
406 		}
407                 if (extras->off_pkt_tun_primitives && acc_elem->ptun) {
408                   enQueue_elem(sd, &rb, acc_elem->ptun, PtunSz, datasize - extras->off_pkt_tun_primitives);
409                 }
410                 if (extras->off_custom_primitives && acc_elem->pcust) {
411                   enQueue_elem(sd, &rb, acc_elem->pcust, config.cpptrs.len, datasize - extras->off_custom_primitives);
412                 }
413 		if (extras->off_pkt_vlen_hdr_primitives && acc_elem->pvlen) {
414 		  enQueue_elem(sd, &rb, acc_elem->pvlen, PvhdrSz + acc_elem->pvlen->tot_len, datasize - extras->off_pkt_vlen_hdr_primitives);
415 		}
416 	      }
417 	      if (reset_counter) set_reset_flag(acc_elem);
418 	    }
419           }
420           if (acc_elem->next) {
421             acc_elem = acc_elem->next;
422             following_chain = TRUE;
423             idx--;
424           }
425           else {
426             elem += sizeof(struct acc);
427             following_chain = FALSE;
428           }
429         }
430 	if (q->type & WANT_COUNTER) enQueue_elem(sd, &rb, &abuf, PdataSz, PdataSz); /* enqueue accumulated data */
431       }
432     }
433     if (rb.packed) send(sd, rb.buf, rb.packed, 0); /* send remainder data */
434   }
435   else if (q->type & WANT_CLASS_TABLE) {
436     struct stripped_class dummy;
437     u_int32_t idx = 0, max = 0;
438 
439     /* XXX: we should try using pmct_get_max_entries() */
440     max = q->num = config.classifier_table_num;
441     if (!q->num && class) max = q->num = MAX_CLASSIFIERS;
442 
443     while (idx < max) {
444       enQueue_elem(sd, &rb, &class[idx], sizeof(struct stripped_class), sizeof(struct stripped_class));
445       idx++;
446     }
447 
448     memset(&dummy, 0, sizeof(dummy));
449     enQueue_elem(sd, &rb, &dummy, sizeof(dummy), sizeof(dummy));
450     if (rb.packed) send(sd, rb.buf, rb.packed, 0); /* send remainder data */
451   }
452   else if (q->type & WANT_CUSTOM_PRIMITIVES_TABLE) {
453     struct imt_custom_primitives custom_primitives_registry;
454     u_int32_t idx;
455 
456     /* compsing new structure */
457     memset(&custom_primitives_registry, 0, sizeof(custom_primitives_registry));
458     for (idx = 0; idx < config.cpptrs.num; idx++) {
459       strlcpy(custom_primitives_registry.primitive[idx].name, config.cpptrs.primitive[idx].name, MAX_CUSTOM_PRIMITIVE_NAMELEN);
460       custom_primitives_registry.primitive[idx].off = config.cpptrs.primitive[idx].off;
461       custom_primitives_registry.primitive[idx].field_type = config.cpptrs.primitive[idx].ptr->field_type;
462       custom_primitives_registry.primitive[idx].len = config.cpptrs.primitive[idx].ptr->len;
463       custom_primitives_registry.primitive[idx].semantics = config.cpptrs.primitive[idx].ptr->semantics;
464       custom_primitives_registry.primitive[idx].type = config.cpptrs.primitive[idx].ptr->type;
465     }
466     custom_primitives_registry.num = config.cpptrs.num;
467     custom_primitives_registry.len = config.cpptrs.len;
468 
469     if (idx) enQueue_elem(sd, &rb, &custom_primitives_registry, sizeof(custom_primitives_registry), sizeof(custom_primitives_registry));
470     else {
471       memset(&dummy, 0, sizeof(dummy));
472       enQueue_elem(sd, &rb, &dummy, sizeof(dummy), sizeof(dummy));
473     }
474     if (rb.packed) send(sd, rb.buf, rb.packed, 0); /* send remainder data */
475   }
476   else if (q->type & WANT_ERASE_LAST_TSTAMP) {
477     enQueue_elem(sd, &rb, &table_reset_stamp, sizeof(table_reset_stamp), sizeof(table_reset_stamp));
478     if (rb.packed) send(sd, rb.buf, rb.packed, 0); /* send remainder data */
479   }
480 
481   /* wait a bit due to setnonblocking() then send EOF */
482   usleep(1000);
483   send(sd, emptybuf, LARGEBUFLEN, 0);
484 
485   if (dummy_pcust) free(dummy_pcust);
486   if (custbuf) free(custbuf);
487 }
488 
mask_elem(struct pkt_primitives * d1,struct pkt_bgp_primitives * d2,struct pkt_legacy_bgp_primitives * d5,struct pkt_nat_primitives * d3,struct pkt_mpls_primitives * d4,struct pkt_tunnel_primitives * d6,struct acc * src,pm_cfgreg_t w,pm_cfgreg_t w2,struct extra_primitives * extras)489 void mask_elem(struct pkt_primitives *d1, struct pkt_bgp_primitives *d2, struct pkt_legacy_bgp_primitives *d5,
490 		struct pkt_nat_primitives *d3, struct pkt_mpls_primitives *d4, struct pkt_tunnel_primitives *d6,
491 		struct acc *src, pm_cfgreg_t w, pm_cfgreg_t w2, struct extra_primitives *extras)
492 {
493   struct pkt_primitives *s1 = &src->primitives;
494   struct pkt_bgp_primitives *s2 = src->pbgp;
495   struct pkt_legacy_bgp_primitives tmp_plbgp;
496   struct pkt_legacy_bgp_primitives *s5 = &tmp_plbgp;
497   struct pkt_nat_primitives *s3 = src->pnat;
498   struct pkt_mpls_primitives *s4 = src->pmpls;
499   struct pkt_tunnel_primitives *s6 = src->ptun;
500 
501   cache_to_pkt_legacy_bgp_primitives(s5, src->clbgp);
502 
503   memset(d1, 0, sizeof(struct pkt_primitives));
504   memset(d2, 0, sizeof(struct pkt_bgp_primitives));
505   memset(d5, 0, sizeof(struct pkt_legacy_bgp_primitives));
506   memset(d3, 0, sizeof(struct pkt_nat_primitives));
507   memset(d4, 0, sizeof(struct pkt_mpls_primitives));
508   memset(d6, 0, sizeof(struct pkt_tunnel_primitives));
509 
510 #if defined (HAVE_L2)
511   if (w & COUNT_SRC_MAC) memcpy(d1->eth_shost, s1->eth_shost, ETH_ADDR_LEN);
512   if (w & COUNT_DST_MAC) memcpy(d1->eth_dhost, s1->eth_dhost, ETH_ADDR_LEN);
513   if (w & COUNT_VLAN) d1->vlan_id = s1->vlan_id;
514   if (w & COUNT_COS) d1->cos = s1->cos;
515   if (w & COUNT_ETHERTYPE) d1->etype = s1->etype;
516 #endif
517   if (w & COUNT_SRC_HOST) memcpy(&d1->src_ip, &s1->src_ip, sizeof(d1->src_ip));
518   if (w & COUNT_DST_HOST) memcpy(&d1->dst_ip, &s1->dst_ip, sizeof(d1->dst_ip));
519   if (w & COUNT_SRC_NET) memcpy(&d1->src_net, &s1->src_net, sizeof(d1->src_net));
520   if (w & COUNT_DST_NET) memcpy(&d1->dst_net, &s1->dst_net, sizeof(d1->dst_net));
521   if (w & COUNT_SRC_NMASK) d1->src_nmask = s1->src_nmask;
522   if (w & COUNT_DST_NMASK) d1->dst_nmask = s1->dst_nmask;
523   if (w & COUNT_SRC_AS) d1->src_as = s1->src_as;
524   if (w & COUNT_DST_AS) d1->dst_as = s1->dst_as;
525   if (w & COUNT_SRC_PORT) d1->src_port = s1->src_port;
526   if (w & COUNT_DST_PORT) d1->dst_port = s1->dst_port;
527   if (w & COUNT_IP_TOS) d1->tos = s1->tos;
528   if (w & COUNT_IP_PROTO) d1->proto = s1->proto;
529   if (w & COUNT_IN_IFACE) d1->ifindex_in = s1->ifindex_in;
530   if (w & COUNT_OUT_IFACE) d1->ifindex_out = s1->ifindex_out;
531   if (w & COUNT_TAG) d1->tag = s1->tag;
532   if (w & COUNT_TAG2) d1->tag2 = s1->tag2;
533   if (w & COUNT_CLASS) d1->class = s1->class;
534   if (w2 & COUNT_EXPORT_PROTO_SEQNO) memcpy(&d1->export_proto_seqno, &s1->export_proto_seqno, sizeof(d1->export_proto_seqno));
535   if (w2 & COUNT_EXPORT_PROTO_VERSION) memcpy(&d1->export_proto_version, &s1->export_proto_version, sizeof(d1->export_proto_version));
536   if (w2 & COUNT_EXPORT_PROTO_SYSID) memcpy(&d1->export_proto_sysid, &s1->export_proto_sysid, sizeof(d1->export_proto_sysid));
537 
538 #if defined (WITH_GEOIP) || defined (WITH_GEOIPV2)
539   if (w2 & COUNT_SRC_HOST_COUNTRY) memcpy(&d1->src_ip_country, &s1->src_ip_country, sizeof(d1->src_ip_country));
540   if (w2 & COUNT_DST_HOST_COUNTRY) memcpy(&d1->dst_ip_country, &s1->dst_ip_country, sizeof(d1->dst_ip_country));
541   if (w2 & COUNT_SRC_HOST_POCODE) memcpy(&d1->src_ip_pocode, &s1->src_ip_pocode, sizeof(d1->src_ip_pocode));
542   if (w2 & COUNT_DST_HOST_POCODE) memcpy(&d1->dst_ip_pocode, &s1->dst_ip_pocode, sizeof(d1->dst_ip_pocode));
543   if (w2 & COUNT_SRC_HOST_COORDS) {
544     memcpy(&d1->src_ip_lat, &s1->src_ip_lat, sizeof(d1->src_ip_lat));
545     memcpy(&d1->src_ip_lon, &s1->src_ip_lon, sizeof(d1->src_ip_lon));
546   }
547   if (w2 & COUNT_DST_HOST_COORDS) {
548     memcpy(&d1->dst_ip_lat, &s1->dst_ip_lat, sizeof(d1->dst_ip_lat));
549     memcpy(&d1->dst_ip_lon, &s1->dst_ip_lon, sizeof(d1->dst_ip_lon));
550   }
551 #endif
552 
553 #if defined (WITH_NDPI)
554   if (w2 & COUNT_NDPI_CLASS) memcpy(&d1->ndpi_class, &s1->class, sizeof(d1->ndpi_class));
555 #endif
556 
557   if (w2 & COUNT_SAMPLING_RATE) d1->sampling_rate = s1->sampling_rate;
558   if (w2 & COUNT_SAMPLING_DIRECTION) memcpy(&d1->sampling_direction, &s1->sampling_direction, sizeof(d1->sampling_direction));
559 
560   if (extras->off_pkt_bgp_primitives && s2) {
561     if (w & COUNT_LOCAL_PREF) d2->local_pref = s2->local_pref;
562     if (w & COUNT_SRC_LOCAL_PREF) d2->src_local_pref = s2->src_local_pref;
563     if (w & COUNT_MED) d2->med = s2->med;
564     if (w & COUNT_SRC_MED) d2->src_med = s2->src_med;
565     if (w2 & COUNT_DST_ROA) d2->dst_roa = s2->dst_roa;
566     if (w2 & COUNT_SRC_ROA) d2->src_roa = s2->src_roa;
567     if (w & COUNT_PEER_SRC_AS) d2->peer_src_as = s2->peer_src_as;
568     if (w & COUNT_PEER_DST_AS) d2->peer_dst_as = s2->peer_dst_as;
569     if (w & COUNT_PEER_SRC_IP) memcpy(&d2->peer_src_ip, &s2->peer_src_ip, sizeof(d2->peer_src_ip));
570     if (w & COUNT_PEER_DST_IP) memcpy(&d2->peer_dst_ip, &s2->peer_dst_ip, sizeof(d2->peer_dst_ip));
571     if (w & COUNT_MPLS_VPN_RD) memcpy(&d2->mpls_vpn_rd, &s2->mpls_vpn_rd, sizeof(rd_t));
572     if (w2 & COUNT_MPLS_PW_ID) memcpy(&d2->mpls_pw_id, &s2->mpls_pw_id, sizeof(d2->mpls_pw_id));
573   }
574 
575   if (extras->off_pkt_lbgp_primitives && s5) {
576     if (w & COUNT_STD_COMM) strlcpy(d5->std_comms, s5->std_comms, MAX_BGP_STD_COMMS);
577     if (w & COUNT_EXT_COMM) strlcpy(d5->ext_comms, s5->ext_comms, MAX_BGP_EXT_COMMS);
578     if (w2 & COUNT_LRG_COMM) strlcpy(d5->lrg_comms, s5->lrg_comms, MAX_BGP_LRG_COMMS);
579     if (w & COUNT_AS_PATH) strlcpy(d5->as_path, s5->as_path, MAX_BGP_ASPATH);
580     if (w & COUNT_SRC_STD_COMM) strlcpy(d5->src_std_comms, s5->src_std_comms, MAX_BGP_STD_COMMS);
581     if (w & COUNT_SRC_EXT_COMM) strlcpy(d5->src_ext_comms, s5->src_ext_comms, MAX_BGP_EXT_COMMS);
582     if (w2 & COUNT_SRC_LRG_COMM) strlcpy(d5->src_lrg_comms, s5->src_lrg_comms, MAX_BGP_LRG_COMMS);
583     if (w & COUNT_SRC_AS_PATH) strlcpy(d5->src_as_path, s5->src_as_path, MAX_BGP_ASPATH);
584   }
585 
586   if (extras->off_pkt_nat_primitives && s3) {
587     if (w2 & COUNT_POST_NAT_SRC_HOST) memcpy(&d3->post_nat_src_ip, &s3->post_nat_src_ip, sizeof(d3->post_nat_src_ip));
588     if (w2 & COUNT_POST_NAT_DST_HOST) memcpy(&d3->post_nat_src_ip, &s3->post_nat_dst_ip, sizeof(d3->post_nat_dst_ip));
589     if (w2 & COUNT_POST_NAT_SRC_PORT) d3->post_nat_src_port = s3->post_nat_src_port;
590     if (w2 & COUNT_POST_NAT_DST_PORT) d3->post_nat_dst_port = s3->post_nat_dst_port;
591     if (w2 & COUNT_NAT_EVENT) d3->nat_event = s3->nat_event;
592     if (w2 & COUNT_TIMESTAMP_START) memcpy(&d3->timestamp_start, &s3->timestamp_start, sizeof(struct timeval));
593     if (w2 & COUNT_TIMESTAMP_END) memcpy(&d3->timestamp_end, &s3->timestamp_end, sizeof(struct timeval));
594     if (w2 & COUNT_TIMESTAMP_ARRIVAL) memcpy(&d3->timestamp_arrival, &s3->timestamp_arrival, sizeof(struct timeval));
595   }
596 
597   if (extras->off_pkt_mpls_primitives && s4) {
598     if (w2 & COUNT_MPLS_LABEL_TOP) d4->mpls_label_top = s4->mpls_label_top;
599     if (w2 & COUNT_MPLS_LABEL_BOTTOM) d4->mpls_label_bottom = s4->mpls_label_bottom;
600     if (w2 & COUNT_MPLS_STACK_DEPTH) d4->mpls_stack_depth = s4->mpls_stack_depth;
601   }
602 
603   if (extras->off_pkt_tun_primitives && s6) {
604     if (w2 & COUNT_TUNNEL_SRC_MAC) memcpy(&d6->tunnel_eth_shost, &s6->tunnel_eth_shost, sizeof(d6->tunnel_eth_shost));
605     if (w2 & COUNT_TUNNEL_DST_MAC) memcpy(&d6->tunnel_eth_dhost, &s6->tunnel_eth_dhost, sizeof(d6->tunnel_eth_dhost));
606     if (w2 & COUNT_TUNNEL_SRC_HOST) memcpy(&d6->tunnel_src_ip, &s6->tunnel_src_ip, sizeof(d6->tunnel_src_ip));
607     if (w2 & COUNT_TUNNEL_DST_HOST) memcpy(&d6->tunnel_src_ip, &s6->tunnel_dst_ip, sizeof(d6->tunnel_dst_ip));
608     if (w2 & COUNT_TUNNEL_IP_PROTO) memcpy(&d6->tunnel_proto, &s6->tunnel_proto, sizeof(d6->tunnel_proto));
609     if (w2 & COUNT_TUNNEL_IP_TOS) memcpy(&d6->tunnel_tos, &s6->tunnel_tos, sizeof(d6->tunnel_tos));
610     if (w2 & COUNT_TUNNEL_SRC_PORT) memcpy(&d6->tunnel_src_port, &s6->tunnel_src_port, sizeof(d6->tunnel_src_port));
611     if (w2 & COUNT_TUNNEL_DST_PORT) memcpy(&d6->tunnel_dst_port, &s6->tunnel_dst_port, sizeof(d6->tunnel_dst_port));
612     if (w2 & COUNT_VXLAN) memcpy(&d6->tunnel_id, &s6->tunnel_id, sizeof(d6->tunnel_id));
613   }
614 }
615 
enQueue_elem(int sd,struct reply_buffer * rb,void * elem,int size,int tot_size)616 void enQueue_elem(int sd, struct reply_buffer *rb, void *elem, int size, int tot_size)
617 {
618   if ((rb->packed + tot_size) < rb->len) {
619     memcpy(rb->ptr, elem, size);
620     rb->ptr += size;
621     rb->packed += size;
622   }
623   else {
624     send(sd, rb->buf, rb->packed, 0);
625     rb->len = LARGEBUFLEN;
626     memset(rb->buf, 0, sizeof(rb->buf));
627     rb->packed = 0;
628     rb->ptr = rb->buf;
629     memcpy(rb->ptr, elem, size);
630     rb->ptr += size;
631     rb->packed += size;
632   }
633 }
634 
Accumulate_Counters(struct pkt_data * abuf,struct acc * elem)635 void Accumulate_Counters(struct pkt_data *abuf, struct acc *elem)
636 {
637   abuf->pkt_len += elem->bytes_counter;
638   abuf->pkt_num += elem->packet_counter;
639   abuf->flo_num += elem->flow_counter;
640   abuf->time_start.tv_sec++; /* XXX: this unused field works as counter of how much entries we are accumulating */
641 }
642 
test_zero_elem(struct acc * elem)643 int test_zero_elem(struct acc *elem)
644 {
645   if (elem && elem->flow_type && !elem->reset_flag) return FALSE;
646 
647 /*
648   if (elem) {
649     if (elem->flow_type == NF9_FTYPE_NAT_EVENT) {
650       if (elem->pnat && elem->pnat->nat_event) return FALSE;
651       else return TRUE;
652     }
653     else {
654       if (elem->bytes_counter && !elem->reset_flag) return FALSE;
655       else return TRUE;
656     }
657   }
658 */
659 
660   return TRUE;
661 }
662