1 /* Copyright (C) 2012-2020 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 /**
19  * \file
20  *
21  - * \author nPulse Technologies, LLC.
22  - * \author Matt Keeler <mk@npulsetech.com>
23  *  *
24  * Support for NAPATECH adapter with the 3GD Driver/API.
25  * Requires libntapi from Napatech A/S.
26  *
27  */
28 #include "suricata-common.h"
29 #include "suricata.h"
30 #include "threadvars.h"
31 #include "util-optimize.h"
32 #include "tm-queuehandlers.h"
33 #include "tm-threads.h"
34 #include "tm-modules.h"
35 #include "util-privs.h"
36 #include "tmqh-packetpool.h"
37 #include "util-napatech.h"
38 #include "source-napatech.h"
39 
40 #ifndef HAVE_NAPATECH
41 
42 TmEcode NoNapatechSupportExit(ThreadVars*, const void*, void**);
43 
TmModuleNapatechStreamRegister(void)44 void TmModuleNapatechStreamRegister(void)
45 {
46     tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream";
47     tmm_modules[TMM_RECEIVENAPATECH].ThreadInit = NoNapatechSupportExit;
48     tmm_modules[TMM_RECEIVENAPATECH].Func = NULL;
49     tmm_modules[TMM_RECEIVENAPATECH].ThreadExitPrintStats = NULL;
50     tmm_modules[TMM_RECEIVENAPATECH].ThreadDeinit = NULL;
51     tmm_modules[TMM_RECEIVENAPATECH].cap_flags = SC_CAP_NET_ADMIN;
52 }
53 
TmModuleNapatechDecodeRegister(void)54 void TmModuleNapatechDecodeRegister(void)
55 {
56     tmm_modules[TMM_DECODENAPATECH].name = "NapatechDecode";
57     tmm_modules[TMM_DECODENAPATECH].ThreadInit = NoNapatechSupportExit;
58     tmm_modules[TMM_DECODENAPATECH].Func = NULL;
59     tmm_modules[TMM_DECODENAPATECH].ThreadExitPrintStats = NULL;
60     tmm_modules[TMM_DECODENAPATECH].ThreadDeinit = NULL;
61     tmm_modules[TMM_DECODENAPATECH].cap_flags = 0;
62     tmm_modules[TMM_DECODENAPATECH].flags = TM_FLAG_DECODE_TM;
63 }
64 
NoNapatechSupportExit(ThreadVars * tv,const void * initdata,void ** data)65 TmEcode NoNapatechSupportExit(ThreadVars *tv, const void *initdata, void **data)
66 {
67     SCLogError(SC_ERR_NAPATECH_NOSUPPORT,
68             "Error creating thread %s: you do not have support for Napatech adapter "
69             "enabled please recompile with --enable-napatech",
70             tv->name);
71     exit(EXIT_FAILURE);
72 }
73 
74 #else /* Implied we do have NAPATECH support */
75 
76 
77 #include <numa.h>
78 #include <nt.h>
79 
80 extern int max_pending_packets;
81 
82 typedef struct NapatechThreadVars_
83 {
84     ThreadVars *tv;
85     NtNetStreamRx_t rx_stream;
86     uint16_t stream_id;
87     int hba;
88     TmSlot *slot;
89 } NapatechThreadVars;
90 
91 #ifdef NAPATECH_ENABLE_BYPASS
92 static int NapatechBypassCallback(Packet *p);
93 #endif
94 
95 TmEcode NapatechStreamThreadInit(ThreadVars *, const void *, void **);
96 void NapatechStreamThreadExitStats(ThreadVars *, void *);
97 TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot);
98 
99 TmEcode NapatechDecodeThreadInit(ThreadVars *, const void *, void **);
100 TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data);
101 TmEcode NapatechDecode(ThreadVars *, Packet *, void *);
102 
103 /* These are used as the threads are exiting to get a comprehensive count of
104  * all the packets received and dropped.
105  */
106 SC_ATOMIC_DECLARE(uint64_t, total_packets);
107 SC_ATOMIC_DECLARE(uint64_t, total_drops);
108 SC_ATOMIC_DECLARE(uint16_t, total_tallied);
109 
110 /* Streams are counted as they are instantiated in order to know when all threads
111  * are running*/
112 SC_ATOMIC_DECLARE(uint16_t, stream_count);
113 
114 SC_ATOMIC_DECLARE(uint16_t, numa0_count);
115 SC_ATOMIC_DECLARE(uint16_t, numa1_count);
116 SC_ATOMIC_DECLARE(uint16_t, numa2_count);
117 SC_ATOMIC_DECLARE(uint16_t, numa3_count);
118 
119 SC_ATOMIC_DECLARE(uint64_t, flow_callback_cnt);
120 SC_ATOMIC_DECLARE(uint64_t, flow_callback_handled_pkts);
121 SC_ATOMIC_DECLARE(uint64_t, flow_callback_udp_pkts);
122 SC_ATOMIC_DECLARE(uint64_t, flow_callback_tcp_pkts);
123 SC_ATOMIC_DECLARE(uint64_t, flow_callback_unhandled_pkts);
124 
125 /**
126  * \brief Register the Napatech  receiver (reader) module.
127  */
TmModuleNapatechStreamRegister(void)128 void TmModuleNapatechStreamRegister(void)
129 {
130     tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream";
131     tmm_modules[TMM_RECEIVENAPATECH].ThreadInit = NapatechStreamThreadInit;
132     tmm_modules[TMM_RECEIVENAPATECH].Func = NULL;
133     tmm_modules[TMM_RECEIVENAPATECH].PktAcqLoop = NapatechPacketLoop;
134     tmm_modules[TMM_RECEIVENAPATECH].PktAcqBreakLoop = NULL;
135     tmm_modules[TMM_RECEIVENAPATECH].ThreadExitPrintStats = NapatechStreamThreadExitStats;
136     tmm_modules[TMM_RECEIVENAPATECH].ThreadDeinit = NapatechStreamThreadDeinit;
137     tmm_modules[TMM_RECEIVENAPATECH].cap_flags = SC_CAP_NET_RAW;
138     tmm_modules[TMM_RECEIVENAPATECH].flags = TM_FLAG_RECEIVE_TM;
139 
140     SC_ATOMIC_INIT(total_packets);
141     SC_ATOMIC_INIT(total_drops);
142     SC_ATOMIC_INIT(total_tallied);
143     SC_ATOMIC_INIT(stream_count);
144 
145     SC_ATOMIC_INIT(numa0_count);
146     SC_ATOMIC_INIT(numa1_count);
147     SC_ATOMIC_INIT(numa2_count);
148     SC_ATOMIC_INIT(numa3_count);
149 
150     SC_ATOMIC_INIT(flow_callback_cnt);
151     SC_ATOMIC_INIT(flow_callback_handled_pkts);
152     SC_ATOMIC_INIT(flow_callback_udp_pkts);
153     SC_ATOMIC_INIT(flow_callback_tcp_pkts);
154     SC_ATOMIC_INIT(flow_callback_unhandled_pkts);
155 }
156 
157 /**
158  * \brief Register the Napatech decoder module.
159  */
TmModuleNapatechDecodeRegister(void)160 void TmModuleNapatechDecodeRegister(void)
161 {
162     tmm_modules[TMM_DECODENAPATECH].name = "NapatechDecode";
163     tmm_modules[TMM_DECODENAPATECH].ThreadInit = NapatechDecodeThreadInit;
164     tmm_modules[TMM_DECODENAPATECH].Func = NapatechDecode;
165     tmm_modules[TMM_DECODENAPATECH].ThreadExitPrintStats = NULL;
166     tmm_modules[TMM_DECODENAPATECH].ThreadDeinit = NapatechDecodeThreadDeinit;
167     tmm_modules[TMM_DECODENAPATECH].cap_flags = 0;
168     tmm_modules[TMM_DECODENAPATECH].flags = TM_FLAG_DECODE_TM;
169 }
170 
171 #ifdef NAPATECH_ENABLE_BYPASS
172 /**
173  * \brief template of IPv4 header
174  */
175 struct ipv4_hdr
176 {
177     uint8_t version_ihl; /**< version and header length */
178     uint8_t type_of_service; /**< type of service */
179     uint16_t total_length; /**< length of packet */
180     uint16_t packet_id; /**< packet ID */
181     uint16_t fragment_offset; /**< fragmentation offset */
182     uint8_t time_to_live; /**< time to live */
183     uint8_t next_proto_id; /**< protocol ID */
184     uint16_t hdr_checksum; /**< header checksum */
185     uint32_t src_addr; /**< source address */
186     uint32_t dst_addr; /**< destination address */
187 } __attribute__ ((__packed__));
188 
189 /**
190  * \brief template of IPv6 header
191  */
192 struct ipv6_hdr
193 {
194     uint32_t vtc_flow; /**< IP version, traffic class & flow label. */
195     uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */
196     uint8_t proto; /**< Protocol, next header. */
197     uint8_t hop_limits; /**< Hop limits. */
198     uint8_t src_addr[16]; /**< IP address of source host. */
199     uint8_t dst_addr[16]; /**< IP address of destination host(s). */
200 } __attribute__ ((__packed__));
201 
202 /**
203  * \brief template of UDP header
204  */
205 struct udp_hdr
206 {
207     uint16_t src_port; /**< UDP source port. */
208     uint16_t dst_port; /**< UDP destination port. */
209     uint16_t dgram_len; /**< UDP datagram length */
210     uint16_t dgram_cksum; /**< UDP datagram checksum */
211 } __attribute__ ((__packed__));
212 
213 /**
214  * \brief template of TCP header
215  */
216 struct tcp_hdr
217 {
218     uint16_t src_port; /**< TCP source port. */
219     uint16_t dst_port; /**< TCP destination port. */
220     uint32_t sent_seq; /**< TX data sequence number. */
221     uint32_t recv_ack; /**< RX data acknowledgement sequence number. */
222     uint8_t data_off; /**< Data offset. */
223     uint8_t tcp_flags; /**< TCP flags */
224     uint16_t rx_win; /**< RX flow control window. */
225     uint16_t cksum; /**< TCP checksum. */
226     uint16_t tcp_urp; /**< TCP urgent pointer, if any. */
227 } __attribute__ ((__packed__));
228 
229 
230 /*  The hardware will assign a "color" value indicating what filters are matched
231  * by a given packet.  These constants indicate what bits are set in the color
232  * field for different protocols
233  *
234  */
235 #define RTE_PTYPE_L2_ETHER                  0x10000000
236 #define RTE_PTYPE_L3_IPV4                   0x01000000
237 #define RTE_PTYPE_L3_IPV6                   0x04000000
238 #define RTE_PTYPE_L4_TCP                    0x00100000
239 #define RTE_PTYPE_L4_UDP                    0x00200000
240 
241 /* These masks are used to extract layer 3 and layer 4 protocol
242  * values from the color field in the packet descriptor.
243  */
244 #define RTE_PTYPE_L3_MASK                   0x0f000000
245 #define RTE_PTYPE_L4_MASK                   0x00f00000
246 
247 #define COLOR_IS_SPAN                       0x00001000
248 
249 static int is_inline = 0;
250 static int inline_port_map[MAX_PORTS] = { -1 };
251 
252 /**
253  * \brief Binds two ports together for inline operation.
254  *
255  * Get the ID of an adapter on which a given port resides.
256  *
257  * \param port one of the ports in a pairing.
258  * \param peer the other port in a pairing.
259  * \return ID of the adapter.
260  *
261  */
NapatechSetPortmap(int port,int peer)262 int NapatechSetPortmap(int port, int peer)
263 {
264     if ((inline_port_map[port] == -1) && (inline_port_map[peer] == -1)) {
265         inline_port_map[port] = peer;
266         inline_port_map[peer] = port;
267     } else {
268         SCLogError(SC_ERR_NAPATECH_PARSE_CONFIG,
269                 "Port pairing is already configured.");
270         return 0;
271     }
272     return 1;
273 }
274 
275 /**
276  * \brief Returns the ID of the adapter
277  *
278  * Get the ID of an adapter on which a given port resides.
279  *
280  * \param port for which adapter ID is requested.
281  * \return ID of the adapter.
282  *
283  */
NapatechGetAdapter(uint8_t port)284 int NapatechGetAdapter(uint8_t port)
285 {
286     static int port_adapter_map[MAX_PORTS] = { -1 };
287     int status;
288     NtInfo_t h_info; /* Info handle */
289     NtInfoStream_t h_info_stream; /* Info stream handle */
290 
291     if (unlikely(port_adapter_map[port] == -1)) {
292         if ((status = NT_InfoOpen(&h_info_stream, "ExampleInfo")) != NT_SUCCESS) {
293             NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status);
294             return -1;
295         }
296         /* Read the system info */
297         h_info.cmd = NT_INFO_CMD_READ_PORT_V9;
298         h_info.u.port_v9.portNo = (uint8_t) port;
299         if ((status = NT_InfoRead(h_info_stream, &h_info)) != NT_SUCCESS) {
300             /* Get the status code as text */
301             NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status);
302             NT_InfoClose(h_info_stream);
303             return -1;
304         }
305         port_adapter_map[port] = h_info.u.port_v9.data.adapterNo;
306     }
307     return port_adapter_map[port];
308 }
309 
310 /**
311  * \brief IPv4 4-tuple convenience structure
312  */
313 struct IPv4Tuple4
314 {
315     uint32_t sa; /*!< Source address */
316     uint32_t da; /*!< Destination address */
317     uint16_t sp; /*!< Source port */
318     uint16_t dp; /*!< Destination port */
319 };
320 
321 /**
322  * \brief IPv6 4-tuple convenience structure
323  */
324 struct IPv6Tuple4
325 {
326     uint8_t sa[16]; /*!< Source address */
327     uint8_t da[16]; /*!< Destination address */
328     uint16_t sp;    /*!< Source port */
329     uint16_t dp;    /*!< Destination port */
330 };
331 
332 
333 /**
334  * \brief Compares the byte order value of two IPv6 addresses.
335  *
336  *
337  * \param addr_a The first address to compare
338  * \param addr_b The second adress to compare
339  *
340  * \return -1 if addr_a < addr_b
341  *          1 if addr_a > addr_b
342  *          0 if addr_a == addr_b
343  */
CompareIPv6Addr(uint8_t addr_a[16],uint8_t addr_b[16])344 static int CompareIPv6Addr(uint8_t addr_a[16], uint8_t addr_b[16]) {
345     uint16_t pos;
346     for (pos = 0; pos < 16; ++pos) {
347         if (addr_a[pos] < addr_b[pos]) {
348             return -1;
349         } else if (addr_a[pos] > addr_b[pos]) {
350             return 1;
351         } /* else they are equal - check next position*/
352     }
353 
354     /* if we get here the addresses are equal */
355     return 0;
356 }
357 
358 /**
359  * \brief  Initializes the FlowStreams used to program flow data.
360  *
361  * Opens a FlowStream on the adapter associated with the rx port.  This
362  * FlowStream is subsequently used to program the adapter with
363  * flows to bypass.
364  *
365  * \return the flow stream handle, NULL if failure.
366  */
InitFlowStream(int adapter,int stream_id)367 static NtFlowStream_t InitFlowStream(int adapter, int stream_id)
368 {
369     int status;
370     NtFlowStream_t hFlowStream;
371 
372     NtFlowAttr_t attr;
373     char flow_name[80];
374 
375     NT_FlowOpenAttrInit(&attr);
376     NT_FlowOpenAttrSetAdapterNo(&attr, adapter);
377 
378     snprintf(flow_name, sizeof(flow_name), "Flow_stream_%d", stream_id );
379     SCLogDebug("Opening flow programming stream:  %s", flow_name);
380     if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) {
381         SCLogWarning(SC_WARN_COMPATIBILITY,
382                 "Napatech bypass functionality not supported by the FPGA version on adapter %d - disabling support.",
383                 adapter);
384         return NULL;
385     }
386     return hFlowStream;
387 }
388 
389 /**
390  * \brief Callback function to process Bypass events on Napatech Adapter.
391  *
392  * Callback function that sets up the Flow tables on the Napatech card
393  * so that subsequent packets from this flow are bypassed on the hardware.
394  *
395  * \param p packet containing information about the flow to be bypassed
396  * \param is_inline indicates if Suricata is being run in inline mode.
397  *
398  * \return Error code indicating success (1) or failure (0).
399  *
400  */
ProgramFlow(Packet * p,int is_inline)401 static int ProgramFlow(Packet *p, int is_inline)
402 {
403     NtFlow_t flow_match;
404     memset(&flow_match, 0, sizeof(flow_match));
405 
406     NapatechPacketVars *ntpv = &(p->ntpv);
407 
408     /*
409      * The hardware decoder will "color" the packets according to the protocols
410      * in the packet and the port the packet arrived on.  packet_type gets
411      * these bits and we mask out layer3, layer4, and is_span to determine
412      * the protocols and if the packet is coming in from a SPAN port.
413      */
414     uint32_t packet_type = ((ntpv->dyn3->color_hi << 14) & 0xFFFFC000) | ntpv->dyn3->color_lo;
415     uint8_t *packet = (uint8_t *) ntpv->dyn3 + ntpv->dyn3->descrLength;
416 
417     uint32_t layer3 = packet_type & RTE_PTYPE_L3_MASK;
418     uint32_t layer4 = packet_type & RTE_PTYPE_L4_MASK;
419     uint32_t is_span = packet_type & COLOR_IS_SPAN;
420 
421     /*
422      * When we're programming the flows to arrive on a span port,
423      * where upstream and downstream packets arrive on the same port,
424      * the hardware is configured to swap the source and dest
425      * fields if the src addr > dest addr.  We need to program the
426      * flow tables to match.  We'll compare addresses and set
427      * do_swap accordingly.
428      */
429 
430     uint32_t do_swap = 0;
431 
432     SC_ATOMIC_ADD(flow_callback_cnt, 1);
433 
434     /* Only bypass TCP and UDP */
435     if (PKT_IS_TCP(p)) {
436         SC_ATOMIC_ADD(flow_callback_tcp_pkts, 1);
437     } else if PKT_IS_UDP(p) {
438         SC_ATOMIC_ADD(flow_callback_udp_pkts, 1);
439     } else {
440         SC_ATOMIC_ADD(flow_callback_unhandled_pkts, 1);
441     }
442 
443     struct IPv4Tuple4 v4Tuple;
444     struct IPv6Tuple4 v6Tuple;
445     struct ipv4_hdr *pIPv4_hdr = NULL;
446     struct ipv6_hdr *pIPv6_hdr = NULL;
447 
448     switch (layer3) {
449         case RTE_PTYPE_L3_IPV4:
450         {
451             pIPv4_hdr = (struct ipv4_hdr *) (packet + ntpv->dyn3->offset0);
452             if (!is_span) {
453                 v4Tuple.sa = pIPv4_hdr->src_addr;
454                 v4Tuple.da = pIPv4_hdr->dst_addr;
455             } else {
456                 do_swap = (htonl(pIPv4_hdr->src_addr) > htonl(pIPv4_hdr->dst_addr));
457                 if (!do_swap) {
458                     /* already in order */
459                     v4Tuple.sa = pIPv4_hdr->src_addr;
460                     v4Tuple.da = pIPv4_hdr->dst_addr;
461                 } else { /* swap */
462                     v4Tuple.sa = pIPv4_hdr->dst_addr;
463                     v4Tuple.da = pIPv4_hdr->src_addr;
464                 }
465             }
466             break;
467         }
468         case RTE_PTYPE_L3_IPV6:
469         {
470             pIPv6_hdr = (struct ipv6_hdr *) (packet + ntpv->dyn3->offset0);
471             do_swap = (CompareIPv6Addr(pIPv6_hdr->src_addr, pIPv6_hdr->dst_addr) > 0);
472 
473             if (!is_span) {
474                 memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
475                 memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
476             } else {
477                 /* sort src/dest address before programming */
478                 if (!do_swap) {
479                     /* already in order */
480                     memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
481                     memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
482                 } else { /* swap the addresses */
483                     memcpy(&(v6Tuple.sa), pIPv6_hdr->dst_addr, 16);
484                     memcpy(&(v6Tuple.da), pIPv6_hdr->src_addr, 16);
485                 }
486             }
487             break;
488         }
489         default:
490         {
491             return 0;
492         }
493     }
494 
495     switch (layer4) {
496         case RTE_PTYPE_L4_TCP:
497         {
498             struct tcp_hdr *tcp_hdr = (struct tcp_hdr *) (packet + ntpv->dyn3->offset1);
499             if (layer3 == RTE_PTYPE_L3_IPV4) {
500                 if (!is_span) {
501                     v4Tuple.dp = tcp_hdr->dst_port;
502                     v4Tuple.sp = tcp_hdr->src_port;
503                     flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
504                 } else {
505                     if (!do_swap) {
506                         v4Tuple.sp = tcp_hdr->src_port;
507                         v4Tuple.dp = tcp_hdr->dst_port;
508                     } else {
509                         v4Tuple.sp = tcp_hdr->dst_port;
510                         v4Tuple.dp = tcp_hdr->src_port;
511                     }
512                     flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
513                 }
514                 memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple));
515             } else {
516                 if (!is_span) {
517                     v6Tuple.dp = tcp_hdr->dst_port;
518                     v6Tuple.sp = tcp_hdr->src_port;
519                     flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
520                 } else {
521                     if (!do_swap) {
522                         v6Tuple.sp = tcp_hdr->src_port;
523                         v6Tuple.dp = tcp_hdr->dst_port;
524                     } else {
525                         v6Tuple.dp = tcp_hdr->src_port;
526                         v6Tuple.sp = tcp_hdr->dst_port;
527                     }
528                     flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
529                 }
530                 memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple));
531             }
532             flow_match.ipProtocolField = 6;
533             break;
534         }
535         case RTE_PTYPE_L4_UDP:
536         {
537             struct udp_hdr *udp_hdr = (struct udp_hdr *) (packet + ntpv->dyn3->offset1);
538             if (layer3 == RTE_PTYPE_L3_IPV4) {
539                 if (!is_span) {
540                     v4Tuple.dp = udp_hdr->dst_port;
541                     v4Tuple.sp = udp_hdr->src_port;
542                     flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
543                 } else {
544                     if (!do_swap) {
545                         v4Tuple.sp = udp_hdr->src_port;
546                         v4Tuple.dp = udp_hdr->dst_port;
547                     } else {
548                         v4Tuple.dp = udp_hdr->src_port;
549                         v4Tuple.sp = udp_hdr->dst_port;
550                     }
551                     flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
552                 }
553                 memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple));
554             } else { /* layer3 is IPV6 */
555                 if (!is_span) {
556                     v6Tuple.dp = udp_hdr->dst_port;
557                     v6Tuple.sp = udp_hdr->src_port;
558                     flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
559                 } else {
560                     if (!do_swap) {
561                         v6Tuple.sp = udp_hdr->src_port;
562                         v6Tuple.dp = udp_hdr->dst_port;
563                     } else {
564                         v6Tuple.dp = udp_hdr->src_port;
565                         v6Tuple.sp = udp_hdr->dst_port;
566                     }
567                     flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
568                 }
569                 memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple));
570             }
571             flow_match.ipProtocolField = 17;
572             break;
573         }
574         default:
575         {
576             return 0;
577         }
578     }
579 
580     flow_match.op = 1;  /* program flow */
581     flow_match.gfi = 1; /* Generate FlowInfo records */
582     flow_match.tau = 1; /* tcp automatic unlearn */
583 
584     if (PACKET_TEST_ACTION(p, ACTION_DROP)) {
585         flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
586     } else {
587         if (is_inline) {
588             flow_match.keySetId = NAPATECH_FLOWTYPE_PASS;
589         } else {
590             flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
591         }
592     }
593 
594     if (NT_FlowWrite(ntpv->flow_stream, &flow_match, -1) != NT_SUCCESS) {
595         if (!(suricata_ctl_flags & SURICATA_STOP)) {
596             SCLogError(SC_ERR_NAPATECH_OPEN_FAILED,"NT_FlowWrite failed!.");
597             exit(EXIT_FAILURE);
598         }
599     }
600 
601     return 1;
602 }
603 
604 /**
605  * \brief     Callback from Suricata when a flow that should be bypassed
606  *            is identified.
607  */
608 
NapatechBypassCallback(Packet * p)609 static int NapatechBypassCallback(Packet *p)
610 {
611     NapatechPacketVars *ntpv = &(p->ntpv);
612 
613     /*
614      *  Since, at this point, we don't know what action to take,
615      *  simply mark this packet as one that should be
616      *  bypassed when the packet is returned by suricata with a
617      *  pass/drop verdict.
618      */
619     ntpv->bypass = 1;
620 
621     return 1;
622 }
623 
624 #endif
625 
626 /**
627  * \brief   Initialize the Napatech receiver thread, generate a single
628  *          NapatechThreadVar structure for each thread, this will
629  *          contain a NtNetStreamRx_t stream handle which is used when the
630  *          thread executes to acquire the packets.
631  *
632  * \param tv        Thread variable to ThreadVars
633  * \param initdata  Initial data to the adapter passed from the user,
634  *                  this is processed by the user.
635  *
636  *                  For now, we assume that we have only a single name for the NAPATECH
637  *                  adapter.
638  *
639  * \param data      data pointer gets populated with
640  *
641  */
NapatechStreamThreadInit(ThreadVars * tv,const void * initdata,void ** data)642 TmEcode NapatechStreamThreadInit(ThreadVars *tv, const void *initdata, void **data)
643 {
644     SCEnter();
645     struct NapatechStreamDevConf *conf = (struct NapatechStreamDevConf *) initdata;
646     uint16_t stream_id = conf->stream_id;
647     *data = NULL;
648 
649     NapatechThreadVars *ntv = SCCalloc(1, sizeof (NapatechThreadVars));
650     if (unlikely(ntv == NULL)) {
651         FatalError(SC_ERR_FATAL,
652                    "Failed to allocate memory for NAPATECH  thread vars.");
653     }
654 
655     memset(ntv, 0, sizeof (NapatechThreadVars));
656     ntv->stream_id = stream_id;
657     ntv->tv = tv;
658     ntv->hba = conf->hba;
659     SCLogDebug("Started processing packets from NAPATECH  Stream: %lu", ntv->stream_id);
660 
661     *data = (void *) ntv;
662     SCReturnInt(TM_ECODE_OK);
663 }
664 
665 /**
666  * \brief Callback to indicate that the packet buffer can be returned to the hardware.
667  *
668  *  Called when Suricata is done processing the packet.  Before the packet is released
669  *  this also checks the action to see if the packet should be dropped and programs the
670  *  flow hardware if the flow is to be bypassed and the Napatech packet buffer is released.
671  *
672  *
673  * \param p Packet to return to the system.
674  *
675  */
NapatechReleasePacket(struct Packet_ * p)676 static void NapatechReleasePacket(struct Packet_ *p)
677 {
678     /*
679      * If the packet is to be dropped we need to set the wirelength
680      * before releasing the Napatech buffer back to NTService.
681      */
682 #ifdef NAPATECH_ENABLE_BYPASS
683     if (is_inline && PACKET_TEST_ACTION(p, ACTION_DROP)) {
684         p->ntpv.dyn3->wireLength = 0;
685     }
686 
687     /*
688      *  If this flow is to be programmed for hardware bypass we do it now.  This is done
689      *  here because the action is not available in the packet structure at the time of the
690      *  bypass callback and it needs to be done before we release the packet structure.
691      */
692     if (p->ntpv.bypass == 1) {
693         ProgramFlow(p, is_inline);
694     }
695 #endif
696 
697     NT_NetRxRelease(p->ntpv.rx_stream, p->ntpv.nt_packet_buf);
698     PacketFreeOrRelease(p);
699 }
700 
701 /**
702  * \brief Returns the NUMA node associated with the currently running thread.
703  *
704  * \return ID of the NUMA node.
705  *
706  */
GetNumaNode(void)707 static int GetNumaNode(void)
708 {
709     int cpu = 0;
710     int node = 0;
711 
712 #if defined(__linux__)
713     cpu = sched_getcpu();
714     node = numa_node_of_cpu(cpu);
715 #else
716     SCLogWarning(SC_ERR_NAPATECH_NOSUPPORT,
717             "Auto configuration of NUMA node is not supported on this OS.");
718 #endif
719 
720     return node;
721 }
722 
723 /**
724  * \brief Outputs hints on the optimal host-buffer configuration to aid tuning.
725  *
726  * \param log_level of the currently running instance.
727  *
728  */
RecommendNUMAConfig(SCLogLevel log_level)729 static void RecommendNUMAConfig(SCLogLevel log_level)
730 {
731     char string0[16];
732     char string1[16];
733     char string2[16];
734     char string3[16];
735     int set_cpu_affinity = 0;
736 
737     if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) {
738         set_cpu_affinity = 0;
739     }
740 
741     if (set_cpu_affinity) {
742         SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
743                 "Minimum host buffers that should be defined in ntservice.ini:");
744 
745         SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, "   NUMA Node 0: %d",
746                 (SC_ATOMIC_GET(numa0_count)));
747 
748         if (numa_max_node() >= 1)
749             SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
750                     "   NUMA Node 1: %d ", (SC_ATOMIC_GET(numa1_count)));
751 
752         if (numa_max_node() >= 2)
753             SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
754                     "   NUMA Node 2: %d ", (SC_ATOMIC_GET(numa2_count)));
755 
756         if (numa_max_node() >= 3)
757             SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
758                     "   NUMA Node 3: %d ", (SC_ATOMIC_GET(numa3_count)));
759 
760         snprintf(string0, 16, "[%d, 16, 0]", SC_ATOMIC_GET(numa0_count));
761         snprintf(string1, 16, (numa_max_node() >= 1 ? ",[%d, 16, 1]" : ""),
762                 SC_ATOMIC_GET(numa1_count));
763         snprintf(string2, 16, (numa_max_node() >= 2 ? ",[%d, 16, 2]" : ""),
764                 SC_ATOMIC_GET(numa2_count));
765         snprintf(string3, 16, (numa_max_node() >= 3 ? ",[%d, 16, 3]" : ""),
766                 SC_ATOMIC_GET(numa3_count));
767 
768         SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
769                 "E.g.: HostBuffersRx=%s%s%s%s", string0, string1, string2,
770                 string3);
771     } else if (log_level == SC_LOG_ERROR) {
772         SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED,
773                 "Or, try running /opt/napatech3/bin/ntpl -e \"delete=all\" to clean-up stream NUMA config.");
774     }
775 }
776 
777 /**
778  * \brief   Main Napatechpacket processing loop
779  *
780  * \param tv     Thread variable to ThreadVars
781  * \param data   Pointer to NapatechThreadVars with data specific to Napatech
782  * \param slot   TMSlot where this instance is running.
783  *
784  */
NapatechPacketLoop(ThreadVars * tv,void * data,void * slot)785 TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot)
786 {
787     int32_t status;
788     char error_buffer[100];
789     uint64_t pkt_ts;
790     NtNetBuf_t packet_buffer;
791     NapatechThreadVars *ntv = (NapatechThreadVars *) data;
792     uint64_t hba_pkt_drops = 0;
793     uint64_t hba_byte_drops = 0;
794     uint16_t hba_pkt = 0;
795     int numa_node = -1;
796     int set_cpu_affinity = 0;
797     int closer = 0;
798     int is_autoconfig = 0;
799 
800     /* This just keeps the startup output more orderly. */
801     usleep(200000 * ntv->stream_id);
802 
803 #ifdef NAPATECH_ENABLE_BYPASS
804     NtFlowStream_t flow_stream[MAX_ADAPTERS] = { 0 };
805 
806     /* Get a FlowStream handle for each adapter so we can efficiently find the
807      * correct handle corresponding to the port on which a packet is received.
808      */
809     int adapter = 0;
810     for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
811         flow_stream[adapter] = InitFlowStream(adapter, ntv->stream_id);
812     }
813 #endif
814 
815     if (ConfGetBool("napatech.auto-config", &is_autoconfig) == 0) {
816         is_autoconfig = 0;
817     }
818 
819     if (is_autoconfig) {
820         numa_node = GetNumaNode();
821         switch (numa_node) {
822         case 0:
823             SC_ATOMIC_ADD(numa0_count, 1);
824             break;
825         case 1:
826             SC_ATOMIC_ADD(numa1_count, 1);
827             break;
828         case 2:
829             SC_ATOMIC_ADD(numa2_count, 1);
830             break;
831         case 3:
832             SC_ATOMIC_ADD(numa3_count, 1);
833             break;
834         default:
835             break;
836         }
837 
838         if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) {
839             set_cpu_affinity = 0;
840         }
841 
842         if (set_cpu_affinity) {
843             NapatechSetupNuma(ntv->stream_id, numa_node);
844         }
845 
846         numa_node = GetNumaNode();
847         SC_ATOMIC_ADD(stream_count, 1);
848         if (SC_ATOMIC_GET(stream_count) == NapatechGetNumConfiguredStreams()) {
849 
850 #ifdef NAPATECH_ENABLE_BYPASS
851             if (ConfGetBool("napatech.inline", &is_inline) == 0) {
852                 is_inline = 0;
853             }
854 
855             /* Initialize the port map before we setup traffic filters */
856             for (int i = 0; i < MAX_PORTS; ++i) {
857                 inline_port_map[i] = -1;
858             }
859 #endif
860             /* The last thread to run sets up and deletes the streams */
861             status = NapatechSetupTraffic(NapatechGetNumFirstStream(),
862                     NapatechGetNumLastStream());
863 
864             closer = 1;
865 
866             if (status == 0x20002061) {
867                 SCLogError(SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED,
868                         "Check host buffer configuration in ntservice.ini.");
869                 RecommendNUMAConfig(SC_LOG_ERROR);
870                 exit(EXIT_FAILURE);
871 
872             } else if (status == 0x20000008) {
873                         FatalError(SC_ERR_FATAL,
874                                    "Check napatech.ports in the suricata config file.");
875             }
876             RecommendNUMAConfig(SC_LOG_PERF);
877             SCLogNotice("Napatech packet input engine started.");
878         }
879     } // is_autoconfig
880 
881     SCLogInfo(
882             "Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d   stream: %3u ",
883             sched_getcpu(), numa_node, ntv->stream_id);
884 
885     if (ntv->hba > 0) {
886         char *s_hbad_pkt = SCCalloc(1, 32);
887         if (unlikely(s_hbad_pkt == NULL)) {
888                     FatalError(SC_ERR_FATAL,
889                                "Failed to allocate memory for NAPATECH stream counter.");
890         }
891         snprintf(s_hbad_pkt, 32, "nt%d.hba_drop", ntv->stream_id);
892         hba_pkt = StatsRegisterCounter(s_hbad_pkt, tv);
893         StatsSetupPrivate(tv);
894         StatsSetUI64(tv, hba_pkt, 0);
895     }
896     SCLogDebug("Opening NAPATECH Stream: %lu for processing", ntv->stream_id);
897 
898     if ((status = NT_NetRxOpen(&(ntv->rx_stream), "SuricataStream",
899             NT_NET_INTERFACE_PACKET, ntv->stream_id, ntv->hba)) != NT_SUCCESS) {
900 
901         NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status);
902         SCFree(ntv);
903         SCReturnInt(TM_ECODE_FAILED);
904     }
905     TmSlot *s = (TmSlot *) slot;
906     ntv->slot = s->slot_next;
907 
908     while (!(suricata_ctl_flags & SURICATA_STOP)) {
909         /* make sure we have at least one packet in the packet pool, to prevent
910          * us from alloc'ing packets at line rate */
911         PacketPoolWait();
912 
913         /* Napatech returns packets 1 at a time */
914         status = NT_NetRxGet(ntv->rx_stream, &packet_buffer, 1000);
915         if (unlikely(
916                 status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) {
917             if (status == NT_STATUS_TIMEOUT) {
918                 TmThreadsCaptureHandleTimeout(tv, NULL);
919             }
920             continue;
921         } else if (unlikely(status != NT_SUCCESS)) {
922             NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status);
923             SCLogInfo("Failed to read from Napatech Stream %d: %s",
924                     ntv->stream_id, error_buffer);
925             break;
926         }
927 
928         Packet *p = PacketGetFromQueueOrAlloc();
929 #ifdef NAPATECH_ENABLE_BYPASS
930         p->ntpv.bypass = 0;
931 #endif
932 
933         p->ntpv.rx_stream = ntv->rx_stream;
934 
935         if (unlikely(p == NULL)) {
936             NT_NetRxRelease(ntv->rx_stream, packet_buffer);
937             SCReturnInt(TM_ECODE_FAILED);
938         }
939 
940         pkt_ts = NT_NET_GET_PKT_TIMESTAMP(packet_buffer);
941 
942         /*
943          * Handle the different timestamp forms that the napatech cards could use
944          *   - NT_TIMESTAMP_TYPE_NATIVE is not supported due to having an base
945          *     of 0 as opposed to NATIVE_UNIX which has a base of 1/1/1970
946          */
947         switch (NT_NET_GET_PKT_TIMESTAMP_TYPE(packet_buffer)) {
948             case NT_TIMESTAMP_TYPE_NATIVE_UNIX:
949                 p->ts.tv_sec = pkt_ts / 100000000;
950                 p->ts.tv_usec = ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0);
951                 break;
952             case NT_TIMESTAMP_TYPE_PCAP:
953                 p->ts.tv_sec = pkt_ts >> 32;
954                 p->ts.tv_usec = pkt_ts & 0xFFFFFFFF;
955                 break;
956             case NT_TIMESTAMP_TYPE_PCAP_NANOTIME:
957                 p->ts.tv_sec = pkt_ts >> 32;
958                 p->ts.tv_usec = ((pkt_ts & 0xFFFFFFFF) / 1000) + ((pkt_ts % 1000) > 500 ? 1 : 0);
959                 break;
960             case NT_TIMESTAMP_TYPE_NATIVE_NDIS:
961                 /* number of seconds between 1/1/1601 and 1/1/1970 */
962                 p->ts.tv_sec = (pkt_ts / 100000000) - 11644473600;
963                 p->ts.tv_usec = ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0);
964                 break;
965             default:
966                 SCLogError(SC_ERR_NAPATECH_TIMESTAMP_TYPE_NOT_SUPPORTED,
967                         "Packet from Napatech Stream: %u does not have a supported timestamp format",
968                         ntv->stream_id);
969                 NT_NetRxRelease(ntv->rx_stream, packet_buffer);
970                 SCReturnInt(TM_ECODE_FAILED);
971         }
972 
973         if (unlikely(ntv->hba > 0)) {
974             NtNetRx_t stat_cmd;
975             stat_cmd.cmd = NT_NETRX_READ_CMD_STREAM_DROP;
976             /* Update drop counter */
977             if (unlikely((status = NT_NetRxRead(ntv->rx_stream, &stat_cmd)) != NT_SUCCESS)) {
978                 NAPATECH_ERROR(SC_ERR_NAPATECH_OPEN_FAILED, status);
979                 SCLogInfo("Couldn't retrieve drop statistics from the RX stream: %u",
980                         ntv->stream_id);
981             } else {
982                 hba_pkt_drops = stat_cmd.u.streamDrop.pktsDropped;
983 
984                 StatsSetUI64(tv, hba_pkt, hba_pkt_drops);
985             }
986             StatsSyncCountersIfSignalled(tv);
987         }
988 
989 #ifdef NAPATECH_ENABLE_BYPASS
990         p->ntpv.dyn3 = _NT_NET_GET_PKT_DESCR_PTR_DYN3(packet_buffer);
991         p->BypassPacketsFlow = (NapatechIsBypassSupported() ? NapatechBypassCallback : NULL);
992         NT_NET_SET_PKT_TXPORT(packet_buffer, inline_port_map[p->ntpv.dyn3->rxPort]);
993         p->ntpv.flow_stream = flow_stream[NapatechGetAdapter(p->ntpv.dyn3->rxPort)];
994 
995 #endif
996 
997         p->ReleasePacket = NapatechReleasePacket;
998         p->ntpv.nt_packet_buf = packet_buffer;
999         p->ntpv.stream_id = ntv->stream_id;
1000         p->datalink = LINKTYPE_ETHERNET;
1001 
1002         if (unlikely(PacketSetData(p, (uint8_t *)NT_NET_GET_PKT_L2_PTR(packet_buffer), NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) {
1003             TmqhOutputPacketpool(ntv->tv, p);
1004             SCReturnInt(TM_ECODE_FAILED);
1005         }
1006 
1007         if (unlikely(TmThreadsSlotProcessPkt(ntv->tv, ntv->slot, p) != TM_ECODE_OK)) {
1008             SCReturnInt(TM_ECODE_FAILED);
1009         }
1010 
1011         /*
1012          * At this point the packet and the Napatech Packet Buffer have been returned
1013          * to the system in the NapatechReleasePacket() Callback.
1014          */
1015 
1016         StatsSyncCountersIfSignalled(tv);
1017     } // while
1018 
1019     if (closer) {
1020         NapatechDeleteFilters();
1021     }
1022 
1023     if (unlikely(ntv->hba > 0)) {
1024         SCLogInfo("Host Buffer Allowance Drops - pkts: %ld,  bytes: %ld", hba_pkt_drops, hba_byte_drops);
1025     }
1026 
1027     SCReturnInt(TM_ECODE_OK);
1028 }
1029 
1030 /**
1031  * \brief Print some stats to the log at program exit.
1032  *
1033  * \param tv Pointer to ThreadVars.
1034  * \param data Pointer to data, ErfFileThreadVars.
1035  */
NapatechStreamThreadExitStats(ThreadVars * tv,void * data)1036 void NapatechStreamThreadExitStats(ThreadVars *tv, void *data)
1037 {
1038     NapatechThreadVars *ntv = (NapatechThreadVars *) data;
1039     NapatechCurrentStats stat = NapatechGetCurrentStats(ntv->stream_id);
1040 
1041     double percent = 0;
1042     if (stat.current_drop_packets > 0)
1043         percent = (((double) stat.current_drop_packets)
1044                   / (stat.current_packets + stat.current_drop_packets)) * 100;
1045 
1046     SCLogInfo("nt%lu - pkts: %lu; drop: %lu (%5.2f%%); bytes: %lu",
1047                  (uint64_t) ntv->stream_id, stat.current_packets,
1048                   stat.current_drop_packets, percent, stat.current_bytes);
1049 
1050     SC_ATOMIC_ADD(total_packets, stat.current_packets);
1051     SC_ATOMIC_ADD(total_drops, stat.current_drop_packets);
1052     SC_ATOMIC_ADD(total_tallied, 1);
1053 
1054     if (SC_ATOMIC_GET(total_tallied) == NapatechGetNumConfiguredStreams()) {
1055         if (SC_ATOMIC_GET(total_drops) > 0)
1056             percent = (((double) SC_ATOMIC_GET(total_drops)) / (SC_ATOMIC_GET(total_packets)
1057                          + SC_ATOMIC_GET(total_drops))) * 100;
1058 
1059         SCLogInfo(" ");
1060         SCLogInfo("--- Total Packets: %ld  Total Dropped: %ld (%5.2f%%)",
1061                 SC_ATOMIC_GET(total_packets), SC_ATOMIC_GET(total_drops), percent);
1062 
1063 #ifdef NAPATECH_ENABLE_BYPASS
1064         SCLogInfo("--- BypassCB - Total: %ld,  UDP: %ld,  TCP: %ld,  Unhandled: %ld",
1065                 SC_ATOMIC_GET(flow_callback_cnt),
1066                 SC_ATOMIC_GET(flow_callback_udp_pkts),
1067                 SC_ATOMIC_GET(flow_callback_tcp_pkts),
1068                 SC_ATOMIC_GET(flow_callback_unhandled_pkts));
1069 #endif
1070     }
1071 }
1072 
1073 /**
1074  * \brief   Deinitializes the NAPATECH card.
1075  * \param   tv pointer to ThreadVars
1076  * \param   data pointer that gets cast into PcapThreadVars for ptv
1077  */
NapatechStreamThreadDeinit(ThreadVars * tv,void * data)1078 TmEcode NapatechStreamThreadDeinit(ThreadVars *tv, void *data)
1079 {
1080     SCEnter();
1081     NapatechThreadVars *ntv = (NapatechThreadVars *) data;
1082 
1083     SCLogDebug("Closing Napatech Stream: %d", ntv->stream_id);
1084     NT_NetRxClose(ntv->rx_stream);
1085 
1086     SCReturnInt(TM_ECODE_OK);
1087 }
1088 
1089 /**
1090  * \brief   This function passes off to link type decoders.
1091  *
1092  * NapatechDecode decodes packets from Napatech and passes
1093  * them off to the proper link type decoder.
1094  *
1095  * \param t pointer to ThreadVars
1096  * \param p pointer to the current packet
1097  * \param data pointer that gets cast into PcapThreadVars for ptv
1098  */
NapatechDecode(ThreadVars * tv,Packet * p,void * data)1099 TmEcode NapatechDecode(ThreadVars *tv, Packet *p, void *data)
1100 {
1101     SCEnter();
1102 
1103     DecodeThreadVars *dtv = (DecodeThreadVars *) data;
1104 
1105     BUG_ON(PKT_IS_PSEUDOPKT(p));
1106 
1107     // update counters
1108     DecodeUpdatePacketCounters(tv, dtv, p);
1109 
1110     switch (p->datalink) {
1111         case LINKTYPE_ETHERNET:
1112             DecodeEthernet(tv, dtv, p, GET_PKT_DATA(p), GET_PKT_LEN(p));
1113             break;
1114         default:
1115             SCLogError(SC_ERR_DATALINK_UNIMPLEMENTED,
1116                     "Datalink type %" PRId32 " not yet supported in module NapatechDecode",
1117                     p->datalink);
1118             break;
1119     }
1120 
1121     PacketDecodeFinalize(tv, dtv, p);
1122     SCReturnInt(TM_ECODE_OK);
1123 }
1124 
1125 /**
1126  * \brief   Initialization of Napatech Thread.
1127  *
1128  * \param t pointer to ThreadVars
1129  * \param initdata - unused.
1130  * \param data pointer that gets cast into DecoderThreadVars
1131  */
NapatechDecodeThreadInit(ThreadVars * tv,const void * initdata,void ** data)1132 TmEcode NapatechDecodeThreadInit(ThreadVars *tv, const void *initdata, void **data)
1133 {
1134     SCEnter();
1135     DecodeThreadVars *dtv = NULL;
1136     dtv = DecodeThreadVarsAlloc(tv);
1137     if (dtv == NULL) {
1138         SCReturnInt(TM_ECODE_FAILED);
1139     }
1140 
1141     DecodeRegisterPerfCounters(dtv, tv);
1142     *data = (void *) dtv;
1143     SCReturnInt(TM_ECODE_OK);
1144 }
1145 
1146 /**
1147  * \brief   Deinitialization of Napatech Thread.
1148  *
1149  * \param tv pointer to ThreadVars
1150  * \param data pointer that gets cast into DecoderThreadVars
1151  */
NapatechDecodeThreadDeinit(ThreadVars * tv,void * data)1152 TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data)
1153 {
1154     if (data != NULL) {
1155         DecodeThreadVarsFree(tv, data);
1156     }
1157     SCReturnInt(TM_ECODE_OK);
1158 }
1159 
1160 #endif /* HAVE_NAPATECH */
1161