1 /*
2  * Copyright (c) 2015-2019, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  * Redistributions of source code must retain the above copyright notice,
8  *    this list of conditions and the following disclaimer.
9  *  * Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *  * Neither the name of Intel Corporation nor the names of its contributors
13  *    may be used to endorse or promote products derived from this software
14  *    without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /** \file
30  * \brief Scratch and associated data structures.
31  *
32  * This header gets pulled into many places (many deep, slow to compile
33  * places). Try to keep the included headers under control.
34  */
35 
36 #ifndef SCRATCH_H_DA6D4FC06FF410
37 #define SCRATCH_H_DA6D4FC06FF410
38 
39 #include "hs_common.h"
40 #include "ue2common.h"
41 #include "rose/rose_types.h"
42 
43 #ifdef __cplusplus
44 extern "C"
45 {
46 #endif
47 
48 UNUSED static const u32 SCRATCH_MAGIC = 0x544F4259;
49 
50 struct fatbit;
51 struct hs_scratch;
52 struct RoseEngine;
53 struct mq;
54 
55 struct queue_match {
56     /** \brief used to store the current location of an (suf|out)fix match in
57      * the current buffer.
58      *
59      * As (suf|out)fixes always run in the main buffer and never in history
60      * this number will always be positive (matches at 0 belong to previous
61      * write). Hence we can get away with a size_t rather than the usual s64a
62      * for a location. */
63     size_t loc;
64 
65     u32 queue; /**< queue index. */
66 };
67 
68 struct catchup_pq {
69     struct queue_match *qm;
70     u32 qm_size; /**< current size of the priority queue */
71 };
72 
73 /** \brief Status flag: user requested termination. */
74 #define STATUS_TERMINATED   (1U << 0)
75 
76 /** \brief Status flag: it has been determined that it is not possible for this
77  * stream to raise any more matches.
78  *
79  * This may be because all its exhaustion keys are on or for other reasons
80  * (anchored sections not matching). */
81 #define STATUS_EXHAUSTED    (1U << 1)
82 
83 /** \brief Status flag: Rose requires rebuild as delay literal matched in
84  * history. */
85 #define STATUS_DELAY_DIRTY  (1U << 2)
86 
87 /** \brief Status flag: Unexpected Rose program error. */
88 #define STATUS_ERROR        (1U << 3)
89 
90 /** \brief Core information about the current scan, used everywhere. */
91 struct core_info {
92     void *userContext; /**< user-supplied context */
93 
94     /** \brief user-supplied match callback */
95     int (HS_CDECL *userCallback)(unsigned int id, unsigned long long from,
96                                  unsigned long long to, unsigned int flags,
97                                  void *ctx);
98 
99     const struct RoseEngine *rose;
100     char *state; /**< full stream state */
101     char *exhaustionVector; /**< pointer to evec for this stream */
102     char *logicalVector; /**< pointer to lvec for this stream */
103     char *combVector; /**< pointer to cvec for this stream */
104     const u8 *buf; /**< main scan buffer */
105     size_t len; /**< length of main scan buffer in bytes */
106     const u8 *hbuf; /**< history buffer */
107     size_t hlen; /**< length of history buffer in bytes. */
108     u64a buf_offset; /**< stream offset, for the base of the buffer */
109     u8 status; /**< stream status bitmask, using STATUS_ flags above */
110 };
111 
112 /** \brief Rose state information. */
113 struct RoseContext {
114     u8 mpv_inactive;
115     u64a groups;
116     u64a lit_offset_adjust; /**< offset to add to matches coming from hwlm */
117     u64a delayLastEndOffset; /**< end of the last match from FDR used by delay
118                               * code */
119     u64a lastEndOffset; /**< end of the last match from FDR/anchored DFAs used
120                          * by history code. anchored DFA matches update this
121                          * when they are inserted into the literal match
122                          * stream */
123     u64a lastMatchOffset; /**< last match offset report up out of rose;
124                            * used _only_ for debugging, asserts */
125     u64a lastCombMatchOffset; /**< last match offset of active combinations */
126     u64a minMatchOffset; /**< the earliest offset that we are still allowed to
127                           * report */
128     u64a minNonMpvMatchOffset; /**< the earliest offset that non-mpv engines are
129                                 * still allowed to report */
130     u64a next_mpv_offset; /**< earliest offset that the MPV can next report a
131                            * match, cleared if top events arrive */
132     u32 filledDelayedSlots;
133     u32 curr_qi;    /**< currently executing main queue index during
134                      * \ref nfaQueueExec */
135 
136     /**
137      * \brief Buffer for caseful long literal support, used in streaming mode
138      * only.
139      *
140      * If a long literal prefix was at the end of the buffer at the end of a
141      * stream write, then the long lit table hashes it and stores the result in
142      * stream state. At the start of the next write, this value is used to set
143      * this buffer to the matching prefix string (stored in the bytecode.
144      */
145     const u8 *ll_buf;
146 
147     /** \brief Length in bytes of the string pointed to by ll_buf. */
148     size_t ll_len;
149 
150     /** \brief Caseless version of ll_buf. */
151     const u8 *ll_buf_nocase;
152 
153     /** \brief Length in bytes of the string pointed to by ll_buf_nocase. */
154     size_t ll_len_nocase;
155 };
156 
157 struct match_deduper {
158     struct fatbit *log[2]; /**< even, odd logs */
159     struct fatbit *som_log[2]; /**< even, odd fatbit logs for som */
160     u64a *som_start_log[2]; /**< even, odd start offset logs for som */
161     u32 dkey_count;
162     u32 log_size;
163     u64a current_report_offset;
164     u8 som_log_dirty;
165 };
166 
167 /** \brief Hyperscan scratch region header.
168  *
169  * NOTE: there is no requirement that scratch is 16-byte aligned, as it is
170  * allocated by a malloc equivalent, possibly supplied by the user.
171  */
172 struct ALIGN_CL_DIRECTIVE hs_scratch {
173     u32 magic;
174     u8 in_use; /**< non-zero when being used by an API call. */
175     u32 queueCount;
176     u32 activeQueueArraySize; /**< size of active queue array fatbit in bytes */
177     u32 bStateSize; /**< sizeof block mode states */
178     u32 tStateSize; /**< sizeof transient rose states */
179     u32 fullStateSize; /**< size of uncompressed nfa state */
180     struct RoseContext tctxt;
181     char *bstate; /**< block mode states */
182     char *tstate; /**< state for transient roses */
183     char *fullState; /**< uncompressed NFA state */
184     struct mq *queues;
185     struct fatbit *aqa; /**< active queue array; fatbit of queues that are valid
186                          * & active */
187     struct fatbit **delay_slots;
188     struct fatbit **al_log;
189     u64a al_log_sum;
190     struct catchup_pq catchup_pq;
191     struct core_info core_info;
192     struct match_deduper deduper;
193     u32 anchored_literal_region_len;
194     u32 anchored_literal_fatbit_size; /**< size of each anch fatbit in bytes */
195     struct fatbit *handled_roles; /**< fatbit of ROLES (not states) already
196                                    * handled by this literal */
197     u64a *som_store; /**< array of som locations */
198     u64a *som_attempted_store; /**< array of som locations for fail stores */
199     struct fatbit *som_set_now; /**< fatbit, true if the som location was set
200                                  * based on a match at the current offset */
201     struct fatbit *som_attempted_set; /**< fatbit, true if the som location
202                             * would have been set at the current offset if the
203                             * location had been writable */
204     u64a som_set_now_offset; /**< offset at which som_set_now represents */
205     u32 som_store_count;
206     u32 som_fatbit_size; /**< size of som location fatbit structures in bytes */
207     u32 handledKeyFatbitSize; /**< size of handled_keys fatbit in bytes */
208     u32 delay_fatbit_size; /**< size of each delay fatbit in bytes */
209     u32 scratchSize;
210     char *scratch_alloc; /* user allocated scratch object */
211     u64a *fdr_conf; /**< FDR confirm value */
212     u8 fdr_conf_offset; /**< offset where FDR/Teddy front end matches
213                          * in buffer */
214 };
215 
216 /* array of fatbit ptr; TODO: why not an array of fatbits? */
217 static really_inline
getAnchoredLiteralLog(struct hs_scratch * scratch)218 struct fatbit **getAnchoredLiteralLog(struct hs_scratch *scratch) {
219     return scratch->al_log;
220 }
221 
222 static really_inline
getDelaySlots(struct hs_scratch * scratch)223 struct fatbit **getDelaySlots(struct hs_scratch *scratch) {
224     return scratch->delay_slots;
225 }
226 
227 static really_inline
told_to_stop_matching(const struct hs_scratch * scratch)228 char told_to_stop_matching(const struct hs_scratch *scratch) {
229     return scratch->core_info.status & STATUS_TERMINATED;
230 }
231 
232 static really_inline
can_stop_matching(const struct hs_scratch * scratch)233 char can_stop_matching(const struct hs_scratch *scratch) {
234     return scratch->core_info.status &
235            (STATUS_TERMINATED | STATUS_EXHAUSTED | STATUS_ERROR);
236 }
237 
238 static really_inline
internal_matching_error(const struct hs_scratch * scratch)239 char internal_matching_error(const struct hs_scratch *scratch) {
240     return scratch->core_info.status & STATUS_ERROR;
241 }
242 
243 /**
244  * \brief Mark scratch as in use.
245  *
246  * Returns non-zero if it was already in use, zero otherwise.
247  */
248 static really_inline
markScratchInUse(struct hs_scratch * scratch)249 char markScratchInUse(struct hs_scratch *scratch) {
250     DEBUG_PRINTF("marking scratch as in use\n");
251     assert(scratch && scratch->magic == SCRATCH_MAGIC);
252     if (scratch->in_use) {
253         DEBUG_PRINTF("scratch already in use!\n");
254         return 1;
255     }
256     scratch->in_use = 1;
257     return 0;
258 }
259 
260 /**
261  * \brief Mark scratch as no longer in use.
262  */
263 static really_inline
unmarkScratchInUse(struct hs_scratch * scratch)264 void unmarkScratchInUse(struct hs_scratch *scratch) {
265     DEBUG_PRINTF("marking scratch as not in use\n");
266     assert(scratch && scratch->magic == SCRATCH_MAGIC);
267     assert(scratch->in_use == 1);
268     scratch->in_use = 0;
269 }
270 
271 #ifdef __cplusplus
272 } /* extern "C" */
273 #endif
274 
275 #endif /* SCRATCH_H_DA6D4FC06FF410 */
276 
277