1 /*
2 pmacct (Promiscuous mode IP Accounting package)
3 pmacct is Copyright (C) 2003-2020 by Paolo Lucente
4 */
5
6 /*
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22 /* includes */
23 #include "pmacct.h"
24 #include "addr.h"
25 #include "pmacct-data.h"
26 #include "pmacct-dlt.h"
27 #include "pretag_handlers.h"
28 #include "plugin_hooks.h"
29 #include "pkt_handlers.h"
30 #include "ip_frag.h"
31 #include "ip_flow.h"
32 #include "net_aggr.h"
33 #include "thread_pool.h"
34 #include "isis/isis.h"
35 #include "bgp/bgp.h"
36 #include "bmp/bmp.h"
37 #if defined (WITH_NDPI)
38 #include "ndpi/ndpi.h"
39 #endif
40
41 struct tunnel_entry tunnel_handlers_list[] = {
42 {"gtp", gtp_tunnel_func, gtp_tunnel_configurator},
43 {"", NULL, NULL},
44 };
45
pm_pcap_cb(u_char * user,const struct pcap_pkthdr * pkthdr,const u_char * buf)46 void pm_pcap_cb(u_char *user, const struct pcap_pkthdr *pkthdr, const u_char *buf)
47 {
48 struct packet_ptrs pptrs;
49 struct pm_pcap_callback_data *cb_data = (struct pm_pcap_callback_data *) user;
50 struct pm_pcap_device *device = cb_data->device;
51 struct plugin_requests req;
52 u_int32_t iface32 = 0;
53 u_int32_t ifacePresent = 0;
54
55 memset(&req, 0, sizeof(req));
56
57 if (cb_data->sig.is_set) sigprocmask(SIG_BLOCK, &cb_data->sig.set, NULL);
58
59 /* We process the packet with the appropriate
60 data link layer function */
61 if (buf) {
62 memset(&pptrs, 0, sizeof(pptrs));
63
64 pptrs.pkthdr = (struct pcap_pkthdr *) pkthdr;
65 pptrs.packet_ptr = (u_char *) buf;
66 pptrs.f_agent = cb_data->f_agent;
67 pptrs.bpas_table = cb_data->bpas_table;
68 pptrs.blp_table = cb_data->blp_table;
69 pptrs.bmed_table = cb_data->bmed_table;
70 pptrs.bta_table = cb_data->bta_table;
71 pptrs.flow_type = NF9_FTYPE_TRAFFIC;
72
73 assert(cb_data);
74
75 if (cb_data->has_tun_prims) {
76 struct packet_ptrs *tpptrs;
77
78 pptrs.tun_pptrs = malloc(sizeof(struct packet_ptrs));
79 memset(pptrs.tun_pptrs, 0, sizeof(struct packet_ptrs));
80 tpptrs = (struct packet_ptrs *) pptrs.tun_pptrs;
81
82 tpptrs->pkthdr = malloc(sizeof(struct pcap_pkthdr));
83 memcpy(&tpptrs->pkthdr, &pptrs.pkthdr, sizeof(struct pcap_pkthdr));
84
85 tpptrs->packet_ptr = (u_char *) buf;
86 tpptrs->flow_type = NF9_FTYPE_TRAFFIC;
87 }
88
89 /* direction */
90 if (cb_data->device &&
91 cb_data->device->pcap_if &&
92 cb_data->device->pcap_if->direction) {
93 pptrs.direction = cb_data->device->pcap_if->direction;
94 }
95 else if (config.pcap_direction) {
96 pptrs.direction = config.pcap_direction;
97 }
98 else pptrs.direction = FALSE;
99
100 /* input interface */
101 if (cb_data->ifindex_in) {
102 pptrs.ifindex_in = cb_data->ifindex_in;
103 }
104 else if (cb_data->device &&
105 cb_data->device->id &&
106 cb_data->device->pcap_if &&
107 cb_data->device->pcap_if->direction) {
108 if (cb_data->device->pcap_if->direction == PCAP_D_IN) {
109 pptrs.ifindex_in = cb_data->device->id;
110 }
111 }
112 else if (cb_data->device->id &&
113 config.pcap_direction == PCAP_D_IN) {
114 pptrs.ifindex_in = cb_data->device->id;
115 }
116 else pptrs.ifindex_in = 0;
117
118 /* output interface */
119 if (cb_data->ifindex_out) {
120 pptrs.ifindex_out = cb_data->ifindex_out;
121 }
122 else if (cb_data->device &&
123 cb_data->device->id &&
124 cb_data->device->pcap_if &&
125 cb_data->device->pcap_if->direction) {
126 if (cb_data->device->pcap_if->direction == PCAP_D_OUT) {
127 pptrs.ifindex_out = cb_data->device->id;
128 }
129 }
130 else if (cb_data->device->id && config.pcap_direction == PCAP_D_OUT) {
131 pptrs.ifindex_out = cb_data->device->id;
132 }
133 else pptrs.ifindex_out = 0;
134
135 if (config.decode_arista_trailer) {
136 memcpy(&ifacePresent, buf + pkthdr->len - 8, 4);
137 if (ifacePresent == 1) {
138 memcpy(&iface32, buf + pkthdr->len - 4, 4);
139 pptrs.ifindex_out = iface32;
140 }
141 }
142
143 (*device->data->handler)(pkthdr, &pptrs);
144 if (pptrs.iph_ptr) {
145 if ((*pptrs.l3_handler)(&pptrs)) {
146
147 #if defined (WITH_NDPI)
148 if (config.classifier_ndpi && pm_ndpi_wfl) {
149 pptrs.ndpi_class = pm_ndpi_workflow_process_packet(pm_ndpi_wfl, &pptrs);
150 }
151 #endif
152
153 if (config.nfacctd_isis) {
154 isis_srcdst_lookup(&pptrs);
155 }
156 if (config.bgp_daemon) {
157 BTA_find_id((struct id_table *)pptrs.bta_table, &pptrs, &pptrs.bta, &pptrs.bta2);
158 bgp_srcdst_lookup(&pptrs, FUNC_TYPE_BGP);
159 }
160 if (config.bgp_daemon_peer_as_src_map) PM_find_id((struct id_table *)pptrs.bpas_table, &pptrs, &pptrs.bpas, NULL);
161 if (config.bgp_daemon_src_local_pref_map) PM_find_id((struct id_table *)pptrs.blp_table, &pptrs, &pptrs.blp, NULL);
162 if (config.bgp_daemon_src_med_map) PM_find_id((struct id_table *)pptrs.bmed_table, &pptrs, &pptrs.bmed, NULL);
163 if (config.bmp_daemon) {
164 BTA_find_id((struct id_table *)pptrs.bta_table, &pptrs, &pptrs.bta, &pptrs.bta2);
165 bmp_srcdst_lookup(&pptrs);
166 }
167
168 set_index_pkt_ptrs(&pptrs);
169 exec_plugins(&pptrs, &req);
170 }
171 }
172 }
173
174 if (reload_map) {
175 bta_map_caching = FALSE;
176 sampling_map_caching = FALSE;
177
178 load_networks(config.networks_file, &nt, &nc);
179
180 if (config.bgp_daemon && config.bgp_daemon_peer_as_src_map)
181 load_id_file(MAP_BGP_PEER_AS_SRC, config.bgp_daemon_peer_as_src_map, (struct id_table *)cb_data->bpas_table, &req, &bpas_map_allocated);
182 if (config.bgp_daemon && config.bgp_daemon_src_local_pref_map)
183 load_id_file(MAP_BGP_SRC_LOCAL_PREF, config.bgp_daemon_src_local_pref_map, (struct id_table *)cb_data->blp_table, &req, &blp_map_allocated);
184 if (config.bgp_daemon && config.bgp_daemon_src_med_map)
185 load_id_file(MAP_BGP_SRC_MED, config.bgp_daemon_src_med_map, (struct id_table *)cb_data->bmed_table, &req, &bmed_map_allocated);
186 if (config.bgp_daemon)
187 load_id_file(MAP_BGP_TO_XFLOW_AGENT, config.bgp_daemon_to_xflow_agent_map, (struct id_table *)cb_data->bta_table, &req, &bta_map_allocated);
188
189 reload_map = FALSE;
190 gettimeofday(&reload_map_tstamp, NULL);
191 }
192
193 if (reload_log) {
194 reload_logs();
195 reload_log = FALSE;
196 }
197
198 if (cb_data->has_tun_prims && pptrs.tun_pptrs) {
199 struct packet_ptrs *tpptrs = (struct packet_ptrs *) pptrs.tun_pptrs;
200
201 if (tpptrs->pkthdr) free(tpptrs->pkthdr);
202 free(pptrs.tun_pptrs);
203 }
204
205 if (cb_data->sig.is_set) sigprocmask(SIG_UNBLOCK, &cb_data->sig.set, NULL);
206 }
207
ip_handler(register struct packet_ptrs * pptrs)208 int ip_handler(register struct packet_ptrs *pptrs)
209 {
210 register u_int8_t len = 0;
211 register u_int16_t caplen = ((struct pcap_pkthdr *)pptrs->pkthdr)->caplen;
212 register unsigned char *ptr;
213 register u_int16_t off = pptrs->iph_ptr-pptrs->packet_ptr, off_l4;
214 int ret = TRUE, num, is_fragment = 0;
215
216 /* len: number of 32bit words forming the header */
217 len = IP_HL(((struct pm_iphdr *) pptrs->iph_ptr));
218 len <<= 2;
219 ptr = pptrs->iph_ptr+len;
220 off += len;
221
222 /* check len */
223 if (off > caplen) return FALSE; /* IP packet truncated */
224 pptrs->l4_proto = ((struct pm_iphdr *)pptrs->iph_ptr)->ip_p;
225 pptrs->payload_ptr = NULL;
226 off_l4 = off;
227
228 /* check fragments if needed */
229 if (config.handle_fragments) {
230 if (pptrs->l4_proto == IPPROTO_TCP || pptrs->l4_proto == IPPROTO_UDP) {
231 if (off+MyTLHdrSz > caplen) {
232 Log(LOG_INFO, "INFO ( %s/core ): short IPv4 packet read (%u/%u/frags). Snaplen issue ?\n",
233 config.name, caplen, off+MyTLHdrSz);
234 return FALSE;
235 }
236 pptrs->tlh_ptr = ptr;
237
238 if (((struct pm_iphdr *)pptrs->iph_ptr)->ip_off & htons(IP_MF|IP_OFFMASK)) {
239 is_fragment = TRUE;
240 ret = ip_fragment_handler(pptrs);
241 if (!ret) {
242 if (!config.ext_sampling_rate) goto quit;
243 else {
244 pptrs->tlh_ptr = dummy_tlhdr;
245 pptrs->tcp_flags = FALSE;
246 if (off < caplen) pptrs->payload_ptr = ptr;
247 ret = TRUE;
248 goto quit;
249 }
250 }
251 }
252
253 /* Let's handle both fragments and packets. If we are facing any subsequent frag
254 our pointer is in place; we handle unknown L4 protocols likewise. In case of
255 "entire" TCP/UDP packets we have to jump the L4 header instead */
256 if (((struct pm_iphdr *)pptrs->iph_ptr)->ip_off & htons(IP_OFFMASK));
257 else if (pptrs->l4_proto == IPPROTO_UDP) {
258 ptr += UDPHdrSz;
259 off += UDPHdrSz;
260 }
261 else if (pptrs->l4_proto == IPPROTO_TCP) {
262 ptr += ((struct pm_tcphdr *)pptrs->tlh_ptr)->th_off << 2;
263 off += ((struct pm_tcphdr *)pptrs->tlh_ptr)->th_off << 2;
264 }
265
266 if (off < caplen) {
267 pptrs->payload_ptr = ptr;
268
269 if (pptrs->l4_proto == IPPROTO_UDP) {
270 u_int16_t dst_port = ntohs(((struct pm_udphdr *)pptrs->tlh_ptr)->uh_dport);
271
272 if (dst_port == UDP_PORT_VXLAN && (off + sizeof(struct vxlan_hdr) <= caplen)) {
273 struct vxlan_hdr *vxhdr = (struct vxlan_hdr *) pptrs->payload_ptr;
274
275 if (vxhdr->flags & VXLAN_FLAG_I) pptrs->vxlan_ptr = vxhdr->vni;
276 pptrs->payload_ptr += sizeof(struct vxlan_hdr);
277
278 if (pptrs->tun_pptrs) {
279 struct packet_ptrs *tpptrs = (struct packet_ptrs *) pptrs->tun_pptrs;
280
281 tpptrs->pkthdr->caplen = (pptrs->pkthdr->caplen - (pptrs->payload_ptr - pptrs->packet_ptr));
282 tpptrs->packet_ptr = pptrs->payload_ptr;
283
284 eth_handler(tpptrs->pkthdr, tpptrs);
285 if (tpptrs->iph_ptr) ((*tpptrs->l3_handler)(tpptrs));
286 }
287 }
288 }
289 }
290 }
291 else {
292 pptrs->tlh_ptr = dummy_tlhdr;
293 if (off < caplen) pptrs->payload_ptr = ptr;
294 }
295
296 if (config.handle_flows) {
297 pptrs->tcp_flags = FALSE;
298
299 if (pptrs->l4_proto == IPPROTO_TCP) {
300 if (off_l4+TCPFlagOff+1 > caplen) {
301 Log(LOG_INFO, "INFO ( %s/core ): short IPv4 packet read (%u/%u/flows). Snaplen issue ?\n",
302 config.name, caplen, off_l4+TCPFlagOff+1);
303 return FALSE;
304 }
305 if (((struct pm_tcphdr *)pptrs->tlh_ptr)->th_flags & TH_SYN) pptrs->tcp_flags |= TH_SYN;
306 if (((struct pm_tcphdr *)pptrs->tlh_ptr)->th_flags & TH_FIN) pptrs->tcp_flags |= TH_FIN;
307 if (((struct pm_tcphdr *)pptrs->tlh_ptr)->th_flags & TH_RST) pptrs->tcp_flags |= TH_RST;
308 if (((struct pm_tcphdr *)pptrs->tlh_ptr)->th_flags & TH_ACK && pptrs->tcp_flags) pptrs->tcp_flags |= TH_ACK;
309 }
310
311 ip_flow_handler(pptrs);
312 }
313
314 /* XXX: optimize/short circuit here! */
315 pptrs->tcp_flags = FALSE;
316 if (pptrs->l4_proto == IPPROTO_TCP && off_l4+TCPFlagOff+1 <= caplen)
317 pptrs->tcp_flags = ((struct pm_tcphdr *)pptrs->tlh_ptr)->th_flags;
318
319 /* tunnel handlers here */
320 if (config.tunnel0 && !pptrs->tun_stack) {
321 for (num = 0; pptrs->payload_ptr && !is_fragment && tunnel_registry[0][num].tf; num++) {
322 if (tunnel_registry[0][num].proto == pptrs->l4_proto) {
323 if (!tunnel_registry[0][num].port || (pptrs->tlh_ptr && tunnel_registry[0][num].port == ntohs(((struct pm_tlhdr *)pptrs->tlh_ptr)->dst_port))) {
324 pptrs->tun_stack = num;
325 ret = (*tunnel_registry[0][num].tf)(pptrs);
326 }
327 }
328 }
329 }
330 else if (pptrs->tun_stack) {
331 if (tunnel_registry[pptrs->tun_stack][pptrs->tun_layer].proto == pptrs->l4_proto) {
332 if (!tunnel_registry[pptrs->tun_stack][pptrs->tun_layer].port || (pptrs->tlh_ptr && tunnel_registry[pptrs->tun_stack][pptrs->tun_layer].port == ntohs(((struct pm_tlhdr *)pptrs->tlh_ptr)->dst_port))) {
333 ret = (*tunnel_registry[pptrs->tun_stack][pptrs->tun_layer].tf)(pptrs);
334 }
335 }
336 }
337 }
338
339 quit:
340 return ret;
341 }
342
ip6_handler(register struct packet_ptrs * pptrs)343 int ip6_handler(register struct packet_ptrs *pptrs)
344 {
345 struct ip6_frag *fhdr = NULL;
346 register u_int16_t caplen = ((struct pcap_pkthdr *)pptrs->pkthdr)->caplen;
347 u_int16_t plen = ntohs(((struct ip6_hdr *)pptrs->iph_ptr)->ip6_plen);
348 u_int16_t off = pptrs->iph_ptr-pptrs->packet_ptr, off_l4;
349 u_int32_t advance;
350 u_int8_t nh;
351 u_char *ptr = pptrs->iph_ptr;
352 int ret = TRUE;
353
354 /* length checks */
355 if (off+IP6HdrSz > caplen) return FALSE; /* IP packet truncated */
356 if (plen == 0 && ((struct ip6_hdr *)pptrs->iph_ptr)->ip6_nxt != IPPROTO_NONE) {
357 Log(LOG_INFO, "INFO ( %s/core ): NULL IPv6 payload length. Jumbo packets are currently not supported.\n", config.name);
358 return FALSE;
359 }
360
361 pptrs->l4_proto = 0;
362 pptrs->payload_ptr = NULL;
363 nh = ((struct ip6_hdr *)pptrs->iph_ptr)->ip6_nxt;
364 advance = IP6HdrSz;
365
366 while ((off+advance <= caplen) && advance) {
367 off += advance;
368 ptr += advance;
369
370 switch(nh) {
371 case IPPROTO_HOPOPTS:
372 case IPPROTO_DSTOPTS:
373 case IPPROTO_ROUTING:
374 case IPPROTO_MOBILITY:
375 nh = ((struct ip6_ext *)ptr)->ip6e_nxt;
376 advance = (((struct ip6_ext *)ptr)->ip6e_len + 1) << 3;
377 break;
378 case IPPROTO_AH:
379 nh = ((struct ip6_ext *)ptr)->ip6e_nxt;
380 advance = sizeof(struct ah)+(((struct ah *)ptr)->ah_len << 2); /* hdr + sumlen */
381 break;
382 case IPPROTO_FRAGMENT:
383 fhdr = (struct ip6_frag *) ptr;
384 nh = ((struct ip6_ext *)ptr)->ip6e_nxt;
385 advance = sizeof(struct ip6_frag);
386 break;
387 /* XXX: case IPPROTO_ESP: */
388 /* XXX: case IPPROTO_IPCOMP: */
389 default:
390 pptrs->tlh_ptr = ptr;
391 pptrs->l4_proto = nh;
392 goto end;
393 }
394 }
395
396 end:
397
398 off_l4 = off;
399 if (config.handle_fragments) {
400 if (pptrs->l4_proto == IPPROTO_TCP || pptrs->l4_proto == IPPROTO_UDP) {
401 if (off+MyTLHdrSz > caplen) {
402 Log(LOG_INFO, "INFO ( %s/core ): short IPv6 packet read (%u/%u/frags). Snaplen issue ?\n",
403 config.name, caplen, off+MyTLHdrSz);
404 return FALSE;
405 }
406
407 if (fhdr && (fhdr->ip6f_offlg & htons(IP6F_MORE_FRAG|IP6F_OFF_MASK))) {
408 ret = ip6_fragment_handler(pptrs, fhdr);
409 if (!ret) {
410 if (!config.ext_sampling_rate) goto quit;
411 else {
412 pptrs->tlh_ptr = dummy_tlhdr;
413 pptrs->tcp_flags = FALSE;
414 if (off < caplen) pptrs->payload_ptr = ptr;
415 ret = TRUE;
416 goto quit;
417 }
418 }
419 }
420
421 /* Let's handle both fragments and packets. If we are facing any subsequent frag
422 our pointer is in place; we handle unknown L4 protocols likewise. In case of
423 "entire" TCP/UDP packets we have to jump the L4 header instead */
424 if (fhdr && (fhdr->ip6f_offlg & htons(IP6F_OFF_MASK)));
425 else if (pptrs->l4_proto == IPPROTO_UDP) {
426 ptr += UDPHdrSz;
427 off += UDPHdrSz;
428 }
429 else if (pptrs->l4_proto == IPPROTO_TCP) {
430 ptr += ((struct pm_tcphdr *)pptrs->tlh_ptr)->th_off << 2;
431 off += ((struct pm_tcphdr *)pptrs->tlh_ptr)->th_off << 2;
432 }
433
434 if (off < caplen) {
435 pptrs->payload_ptr = ptr;
436
437 if (pptrs->l4_proto == IPPROTO_UDP) {
438 u_int16_t dst_port = ntohs(((struct pm_udphdr *)pptrs->tlh_ptr)->uh_dport);
439
440 if (dst_port == UDP_PORT_VXLAN && (off + sizeof(struct vxlan_hdr) <= caplen)) {
441 struct vxlan_hdr *vxhdr = (struct vxlan_hdr *) pptrs->payload_ptr;
442
443 if (vxhdr->flags & VXLAN_FLAG_I) pptrs->vxlan_ptr = vxhdr->vni;
444 pptrs->payload_ptr += sizeof(struct vxlan_hdr);
445
446 if (pptrs->tun_pptrs) {
447 struct packet_ptrs *tpptrs = (struct packet_ptrs *) pptrs->tun_pptrs;
448
449 tpptrs->pkthdr->caplen = (pptrs->pkthdr->caplen - (pptrs->payload_ptr - pptrs->packet_ptr));
450 tpptrs->packet_ptr = pptrs->payload_ptr;
451
452 eth_handler(tpptrs->pkthdr, tpptrs);
453 if (tpptrs->iph_ptr) ((*tpptrs->l3_handler)(tpptrs));
454 }
455 }
456 }
457 }
458 }
459 else {
460 pptrs->tlh_ptr = dummy_tlhdr;
461 if (off < caplen) pptrs->payload_ptr = ptr;
462 }
463
464 if (config.handle_flows) {
465 pptrs->tcp_flags = FALSE;
466
467 if (pptrs->l4_proto == IPPROTO_TCP) {
468 if (off_l4+TCPFlagOff+1 > caplen) {
469 Log(LOG_INFO, "INFO ( %s/core ): short IPv6 packet read (%u/%u/flows). Snaplen issue ?\n",
470 config.name, caplen, off_l4+TCPFlagOff+1);
471 return FALSE;
472 }
473 if (((struct pm_tcphdr *)pptrs->tlh_ptr)->th_flags & TH_SYN) pptrs->tcp_flags |= TH_SYN;
474 if (((struct pm_tcphdr *)pptrs->tlh_ptr)->th_flags & TH_FIN) pptrs->tcp_flags |= TH_FIN;
475 if (((struct pm_tcphdr *)pptrs->tlh_ptr)->th_flags & TH_RST) pptrs->tcp_flags |= TH_RST;
476 if (((struct pm_tcphdr *)pptrs->tlh_ptr)->th_flags & TH_ACK && pptrs->tcp_flags) pptrs->tcp_flags |= TH_ACK;
477 }
478
479 ip_flow6_handler(pptrs);
480 }
481
482 /* XXX: optimize/short circuit here! */
483 pptrs->tcp_flags = FALSE;
484 if (pptrs->l4_proto == IPPROTO_TCP && off_l4+TCPFlagOff+1 <= caplen)
485 pptrs->tcp_flags = ((struct pm_tcphdr *)pptrs->tlh_ptr)->th_flags;
486 }
487
488 quit:
489 return TRUE;
490 }
491
PM_find_id(struct id_table * t,struct packet_ptrs * pptrs,pm_id_t * tag,pm_id_t * tag2)492 int PM_find_id(struct id_table *t, struct packet_ptrs *pptrs, pm_id_t *tag, pm_id_t *tag2)
493 {
494 int x;
495 pm_id_t ret = 0;
496
497 if (!t) return 0;
498
499 pretag_init_vars(pptrs, t);
500 if (tag) *tag = 0;
501 if (tag2) *tag2 = 0;
502 if (pptrs) {
503 pptrs->have_tag = FALSE;
504 pptrs->have_tag2 = FALSE;
505 }
506
507 /* Giving a first try with index(es) */
508 if (config.maps_index && pretag_index_have_one(t)) {
509 struct id_entry *index_results[ID_TABLE_INDEX_RESULTS];
510 u_int32_t iterator;
511 int num_results;
512
513 num_results = pretag_index_lookup(t, pptrs, index_results, ID_TABLE_INDEX_RESULTS);
514
515 for (iterator = 0; index_results[iterator] && iterator < num_results; iterator++) {
516 ret = pretag_entry_process(index_results[iterator], pptrs, tag, tag2);
517 if (!(ret & PRETAG_MAP_RCODE_JEQ)) return ret;
518 }
519
520 /* if we have at least one index we trust we did a good job */
521 return ret;
522 }
523
524 for (x = 0; x < t->ipv4_num; x++) {
525 ret = pretag_entry_process(&t->e[x], pptrs, tag, tag2);
526
527 if (!ret || ret > TRUE) {
528 if (ret & PRETAG_MAP_RCODE_JEQ) {
529 x = t->e[x].jeq.ptr->pos;
530 x--; // yes, it will be automagically incremented by the for() cycle
531 }
532 else break;
533 }
534 }
535
536 return ret;
537 }
538
PM_print_stats(time_t now)539 void PM_print_stats(time_t now)
540 {
541 int device_idx;
542
543 Log(LOG_NOTICE, "NOTICE ( %s/%s ): +++\n", config.name, config.type);
544
545 if (config.pcap_if || config.pcap_interfaces_map) {
546 for (device_idx = 0; device_idx < devices.num; device_idx++) {
547 if (pcap_stats(devices.list[device_idx].dev_desc, &ps) < 0) {
548 Log(LOG_INFO, "INFO ( %s/%s ): stats [%s,%u] time=%ld error='pcap_stats(): %s'\n",
549 config.name, config.type, devices.list[device_idx].str, devices.list[device_idx].id,
550 (long)now, pcap_geterr(devices.list[device_idx].dev_desc));
551 }
552
553 Log(LOG_NOTICE, "NOTICE ( %s/%s ): stats [%s,%u] time=%ld received_packets=%u dropped_packets=%u\n",
554 config.name, config.type, devices.list[device_idx].str, devices.list[device_idx].id,
555 (long)now, ps.ps_recv, ps.ps_drop);
556 }
557 }
558
559 Log(LOG_NOTICE, "NOTICE ( %s/%s ): ---\n", config.name, config.type);
560 }
561
compute_once()562 void compute_once()
563 {
564 struct pkt_data dummy;
565
566 CounterSz = sizeof(dummy.pkt_len);
567 PdataSz = sizeof(struct pkt_data);
568 PpayloadSz = sizeof(struct pkt_payload);
569 PextrasSz = sizeof(struct pkt_extras);
570 PbgpSz = sizeof(struct pkt_bgp_primitives);
571 PlbgpSz = sizeof(struct pkt_legacy_bgp_primitives);
572 PnatSz = sizeof(struct pkt_nat_primitives);
573 PmplsSz = sizeof(struct pkt_mpls_primitives);
574 PtunSz = sizeof(struct pkt_tunnel_primitives);
575 PvhdrSz = sizeof(struct pkt_vlen_hdr_primitives);
576 PmLabelTSz = sizeof(pm_label_t);
577 PtLabelTSz = sizeof(pt_label_t);
578 ChBufHdrSz = sizeof(struct ch_buf_hdr);
579 CharPtrSz = sizeof(char *);
580 IP4HdrSz = sizeof(struct pm_iphdr);
581 MyTLHdrSz = sizeof(struct pm_tlhdr);
582 TCPFlagOff = 13;
583 MyTCPHdrSz = TCPFlagOff+1;
584 PptrsSz = sizeof(struct packet_ptrs);
585 UDPHdrSz = 8;
586 CSSz = sizeof(struct class_st);
587 IpFlowCmnSz = sizeof(struct ip_flow_common);
588 HostAddrSz = sizeof(struct host_addr);
589 IP6HdrSz = sizeof(struct ip6_hdr);
590 IP6AddrSz = sizeof(struct in6_addr);
591 }
592
tunnel_registry_init()593 void tunnel_registry_init()
594 {
595 if (config.tunnel0) {
596 char *tun_string = config.tunnel0, *tun_entry = NULL, *tun_type = NULL;
597 int th_index = 0 /* tunnel handler index */, tr_index = 0 /* tunnel registry index */;
598
599 while ((tun_entry = extract_token(&tun_string, ';'))) {
600 tun_type = extract_token(&tun_entry, ',');
601
602 for (th_index = 0; strcmp(tunnel_handlers_list[th_index].type, ""); th_index++) {
603 if (!strcmp(tunnel_handlers_list[th_index].type, tun_type)) {
604 if (tr_index < TUNNEL_REGISTRY_ENTRIES) {
605 (*tunnel_handlers_list[th_index].tc)(&tunnel_registry[0][tr_index], tun_entry);
606 tr_index++;
607 }
608 break;
609 }
610 }
611 }
612 }
613 }
614
gtp_tunnel_configurator(struct tunnel_handler * th,char * opts)615 int gtp_tunnel_configurator(struct tunnel_handler *th, char *opts)
616 {
617 th->proto = IPPROTO_UDP;
618 th->port = atoi(opts);
619
620 if (th->port) {
621 th->tf = gtp_tunnel_func;
622 }
623 else {
624 th->tf = NULL;
625 Log(LOG_WARNING, "WARN ( %s/core ): GTP tunnel handler not loaded due to invalid options: '%s'\n", config.name, opts);
626 }
627
628 return 0;
629 }
630
gtp_tunnel_func(register struct packet_ptrs * pptrs)631 int gtp_tunnel_func(register struct packet_ptrs *pptrs)
632 {
633 register u_int16_t caplen = ((struct pcap_pkthdr *)pptrs->pkthdr)->caplen;
634 struct pm_gtphdr_v0 *gtp_hdr_v0 = (struct pm_gtphdr_v0 *) pptrs->payload_ptr;
635 u_int16_t off = pptrs->payload_ptr-pptrs->packet_ptr;
636 u_int16_t gtp_hdr_len, gtp_version;
637 u_char *ptr = pptrs->payload_ptr;
638 int ret, trial;
639
640 gtp_version = (gtp_hdr_v0->flags >> 5) & 0x07;
641
642 switch (gtp_version) {
643 case 0:
644 gtp_hdr_len = 4;
645 break;
646 case 1:
647 gtp_hdr_len = 8;
648 break;
649 default:
650 Log(LOG_INFO, "INFO ( %s/core ): unsupported GTP version %u\n", config.name, gtp_version);
651 return FALSE;
652 }
653
654 if (off + gtp_hdr_len < caplen) {
655 off += gtp_hdr_len;
656 ptr += gtp_hdr_len;
657 ret = 0; trial = 0;
658
659 while (!ret && trial < MAX_GTP_TRIALS) {
660 pptrs->iph_ptr = ptr;
661 pptrs->tlh_ptr = NULL; pptrs->payload_ptr = NULL;
662 pptrs->l4_proto = 0; pptrs->tcp_flags = 0;
663
664 /* same trick used for MPLS BoS in ll.c: let's look at the first
665 payload byte to guess which protocol we are speaking about */
666 switch (*pptrs->iph_ptr) {
667 case 0x45:
668 case 0x46:
669 case 0x47:
670 case 0x48:
671 case 0x49:
672 case 0x4a:
673 case 0x4b:
674 case 0x4c:
675 case 0x4d:
676 case 0x4e:
677 case 0x4f:
678 pptrs->tun_layer++;
679 ret = ip_handler(pptrs);
680 break;
681 case 0x60:
682 case 0x61:
683 case 0x62:
684 case 0x63:
685 case 0x64:
686 case 0x65:
687 case 0x66:
688 case 0x67:
689 case 0x68:
690 case 0x69:
691 case 0x6a:
692 case 0x6b:
693 case 0x6c:
694 case 0x6d:
695 case 0x6e:
696 case 0x6f:
697 pptrs->tun_layer++;
698 ret = ip6_handler(pptrs);
699 break;
700 default:
701 ret = FALSE;
702 break;
703 }
704
705 /* next loop increment */
706 off++; ptr++; trial++;
707 }
708 }
709 else {
710 Log(LOG_INFO, "INFO ( %s/core ): short GTP packet read (%u/%u/tunnel). Snaplen issue ?\n",
711 config.name, caplen, off + gtp_hdr_len);
712 return FALSE;
713 }
714
715 return ret;
716 }
717
reset_index_pkt_ptrs(struct packet_ptrs * pptrs)718 void reset_index_pkt_ptrs(struct packet_ptrs *pptrs)
719 {
720 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_PACKET_PTR] = NULL;
721 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_MAC_PTR] = NULL;
722 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_VLAN_PTR] = NULL;
723 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_MPLS_PTR] = NULL;
724 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_L3_PTR] = NULL;
725 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_L4_PTR] = NULL;
726 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_PAYLOAD_PTR] = NULL;
727
728 pptrs->pkt_proto[CUSTOM_PRIMITIVE_L3_PTR] = FALSE;
729 pptrs->pkt_proto[CUSTOM_PRIMITIVE_L4_PTR] = FALSE;
730 }
731
set_index_pkt_ptrs(struct packet_ptrs * pptrs)732 void set_index_pkt_ptrs(struct packet_ptrs *pptrs)
733 {
734 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_PACKET_PTR] = pptrs->packet_ptr;
735 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_MAC_PTR] = pptrs->mac_ptr;
736 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_VLAN_PTR] = pptrs->vlan_ptr;
737 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_MPLS_PTR] = pptrs->mpls_ptr;
738 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_L3_PTR] = pptrs->iph_ptr;
739 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_L4_PTR] = pptrs->tlh_ptr;
740 pptrs->pkt_data_ptrs[CUSTOM_PRIMITIVE_PAYLOAD_PTR] = pptrs->payload_ptr;
741
742 pptrs->pkt_proto[CUSTOM_PRIMITIVE_L3_PTR] = pptrs->l3_proto;
743 pptrs->pkt_proto[CUSTOM_PRIMITIVE_L4_PTR] = pptrs->l4_proto;
744 }
745
recvfrom_savefile(struct pm_pcap_device * device,void ** buf,struct sockaddr * src_addr,struct timeval ** ts,int * round,struct packet_ptrs * savefile_pptrs)746 ssize_t recvfrom_savefile(struct pm_pcap_device *device, void **buf, struct sockaddr *src_addr, struct timeval **ts, int *round, struct packet_ptrs *savefile_pptrs)
747 {
748 ssize_t ret = 0;
749 int pm_pcap_ret;
750
751 read_packet:
752 pm_pcap_ret = pcap_next_ex(device->dev_desc, &savefile_pptrs->pkthdr, (const u_char **)&savefile_pptrs->packet_ptr);
753
754 if (pm_pcap_ret == 1 /* all good */) device->errors = FALSE;
755 else if (pm_pcap_ret == -1 /* failed reading next packet */) {
756 device->errors++;
757 if (device->errors == PCAP_SAVEFILE_MAX_ERRORS) {
758 Log(LOG_ERR, "ERROR ( %s/core ): pcap_ext_ex() max errors reached (%u). Exiting.\n", config.name, PCAP_SAVEFILE_MAX_ERRORS);
759 exit_gracefully(1);
760 }
761 else {
762 Log(LOG_WARNING, "WARN ( %s/core ): pcap_ext_ex() failed: %s. Skipping packet.\n", config.name, pcap_geterr(device->dev_desc));
763 return 0;
764 }
765 }
766 else if (pm_pcap_ret == -2 /* last packet in a pcap_savefile */) {
767 pcap_close(device->dev_desc);
768
769 if (config.pcap_sf_replay < 0 ||
770 (config.pcap_sf_replay > 0 && (*round) < config.pcap_sf_replay)) {
771 (*round)++;
772 open_pcap_savefile(device, config.pcap_savefile);
773 if (config.pcap_sf_delay) sleep(config.pcap_sf_delay);
774
775 goto read_packet;
776 }
777
778 if (config.pcap_sf_wait) {
779 fill_pipe_buffer();
780 Log(LOG_INFO, "INFO ( %s/core ): finished reading PCAP capture file\n", config.name);
781 wait(NULL);
782 }
783
784 stop_all_childs();
785 }
786 else {
787 Log(LOG_ERR, "ERROR ( %s/core ): unexpected return code from pcap_next_ex(). Exiting.\n", config.name);
788 exit_gracefully(1);
789 }
790
791 (*device->data->handler)(savefile_pptrs->pkthdr, savefile_pptrs);
792 if (savefile_pptrs->iph_ptr) {
793 (*savefile_pptrs->l3_handler)(savefile_pptrs);
794 if (savefile_pptrs->payload_ptr) {
795 if (ts) (*ts) = &savefile_pptrs->pkthdr->ts;
796 (*buf) = savefile_pptrs->payload_ptr;
797 ret = savefile_pptrs->pkthdr->caplen - (savefile_pptrs->payload_ptr - savefile_pptrs->packet_ptr);
798
799 if (savefile_pptrs->l4_proto == IPPROTO_UDP || savefile_pptrs->l4_proto == IPPROTO_TCP) {
800 if (savefile_pptrs->l3_proto == ETHERTYPE_IP) {
801 raw_to_sa((struct sockaddr *)src_addr, (u_char *) &((struct pm_iphdr *)savefile_pptrs->iph_ptr)->ip_src.s_addr,
802 (u_int16_t) ((struct pm_udphdr *)savefile_pptrs->tlh_ptr)->uh_sport, AF_INET);
803 }
804 else if (savefile_pptrs->l3_proto == ETHERTYPE_IPV6) {
805 raw_to_sa((struct sockaddr *)src_addr, (u_char *) &((struct ip6_hdr *)savefile_pptrs->iph_ptr)->ip6_src,
806 (u_int16_t) ((struct pm_udphdr *)savefile_pptrs->tlh_ptr)->uh_sport, AF_INET6);
807 }
808 }
809 }
810 }
811
812 return ret;
813 }
814
recvfrom_rawip(unsigned char * buf,size_t len,struct sockaddr * src_addr,struct packet_ptrs * local_pptrs)815 ssize_t recvfrom_rawip(unsigned char *buf, size_t len, struct sockaddr *src_addr, struct packet_ptrs *local_pptrs)
816 {
817 ssize_t ret = 0;
818
819 local_pptrs->packet_ptr = buf;
820 local_pptrs->pkthdr->caplen = len;
821
822 raw_handler(local_pptrs->pkthdr, local_pptrs);
823
824 if (local_pptrs->iph_ptr) {
825 (*local_pptrs->l3_handler)(local_pptrs);
826 if (local_pptrs->payload_ptr) {
827 ret = local_pptrs->pkthdr->caplen - (local_pptrs->payload_ptr - local_pptrs->packet_ptr);
828
829 if (local_pptrs->l4_proto == IPPROTO_UDP) {
830 if (local_pptrs->l3_proto == ETHERTYPE_IP) {
831 raw_to_sa((struct sockaddr *)src_addr, (u_char *) &((struct pm_iphdr *)local_pptrs->iph_ptr)->ip_src.s_addr,
832 (u_int16_t) ((struct pm_udphdr *)local_pptrs->tlh_ptr)->uh_sport, AF_INET);
833 }
834 else if (local_pptrs->l3_proto == ETHERTYPE_IPV6) {
835 raw_to_sa((struct sockaddr *)src_addr, (u_char *) &((struct ip6_hdr *)local_pptrs->iph_ptr)->ip6_src,
836 (u_int16_t) ((struct pm_udphdr *)local_pptrs->tlh_ptr)->uh_sport, AF_INET6);
837 }
838 }
839
840 /* last action: cut L3 and L4 off the packet */
841 memmove(buf, local_pptrs->payload_ptr, ret);
842 }
843 }
844
845 return ret;
846 }
847
pm_pcap_add_filter(struct pm_pcap_device * dev_ptr)848 void pm_pcap_add_filter(struct pm_pcap_device *dev_ptr)
849 {
850 /* pcap library stuff */
851 struct bpf_program filter;
852
853 memset(&filter, 0, sizeof(filter));
854 if (pcap_compile(dev_ptr->dev_desc, &filter, config.clbuf, 0, PCAP_NETMASK_UNKNOWN) < 0) {
855 Log(LOG_WARNING, "WARN ( %s/core ): %s (going on without a filter)\n", config.name, pcap_geterr(dev_ptr->dev_desc));
856 }
857 else {
858 if (pcap_setfilter(dev_ptr->dev_desc, &filter) < 0) {
859 Log(LOG_WARNING, "WARN ( %s/core ): %s (going on without a filter)\n", config.name, pcap_geterr(dev_ptr->dev_desc));
860 }
861 else pcap_freecode(&filter);
862 }
863 }
864