1 /*
2 * Copyright (c) 2015-2020, Intel Corporation
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** \file
30 \brief Dispatches NFA engine API calls to the appropriate engines
31 */
32 #include "nfa_api.h"
33
34 #include "nfa_api_queue.h"
35 #include "nfa_internal.h"
36 #include "ue2common.h"
37
38 // Engine implementations.
39 #include "castle.h"
40 #include "gough.h"
41 #include "lbr.h"
42 #include "limex.h"
43 #include "mcclellan.h"
44 #include "mcsheng.h"
45 #include "mpv.h"
46 #include "sheng.h"
47 #include "tamarama.h"
48
49 #define DISPATCH_CASE(dc_ltype, dc_ftype, dc_func_call) \
50 case dc_ltype: \
51 return nfaExec##dc_ftype##dc_func_call; \
52 break
53
54 // general framework calls
55
56 #define DISPATCH_BY_NFA_TYPE(dbnt_func) \
57 switch (nfa->type) { \
58 DISPATCH_CASE(LIMEX_NFA_32, LimEx32, dbnt_func); \
59 DISPATCH_CASE(LIMEX_NFA_64, LimEx64, dbnt_func); \
60 DISPATCH_CASE(LIMEX_NFA_128, LimEx128, dbnt_func); \
61 DISPATCH_CASE(LIMEX_NFA_256, LimEx256, dbnt_func); \
62 DISPATCH_CASE(LIMEX_NFA_384, LimEx384, dbnt_func); \
63 DISPATCH_CASE(LIMEX_NFA_512, LimEx512, dbnt_func); \
64 DISPATCH_CASE(MCCLELLAN_NFA_8, McClellan8, dbnt_func); \
65 DISPATCH_CASE(MCCLELLAN_NFA_16, McClellan16, dbnt_func); \
66 DISPATCH_CASE(GOUGH_NFA_8, Gough8, dbnt_func); \
67 DISPATCH_CASE(GOUGH_NFA_16, Gough16, dbnt_func); \
68 DISPATCH_CASE(MPV_NFA, Mpv, dbnt_func); \
69 DISPATCH_CASE(LBR_NFA_DOT, LbrDot, dbnt_func); \
70 DISPATCH_CASE(LBR_NFA_VERM, LbrVerm, dbnt_func); \
71 DISPATCH_CASE(LBR_NFA_NVERM, LbrNVerm, dbnt_func); \
72 DISPATCH_CASE(LBR_NFA_SHUF, LbrShuf, dbnt_func); \
73 DISPATCH_CASE(LBR_NFA_TRUF, LbrTruf, dbnt_func); \
74 DISPATCH_CASE(CASTLE_NFA, Castle, dbnt_func); \
75 DISPATCH_CASE(SHENG_NFA, Sheng, dbnt_func); \
76 DISPATCH_CASE(TAMARAMA_NFA, Tamarama, dbnt_func); \
77 DISPATCH_CASE(MCSHENG_NFA_8, McSheng8, dbnt_func); \
78 DISPATCH_CASE(MCSHENG_NFA_16, McSheng16, dbnt_func); \
79 DISPATCH_CASE(SHENG_NFA_32, Sheng32, dbnt_func); \
80 DISPATCH_CASE(SHENG_NFA_64, Sheng64, dbnt_func); \
81 DISPATCH_CASE(MCSHENG_64_NFA_8, McSheng64_8, dbnt_func); \
82 DISPATCH_CASE(MCSHENG_64_NFA_16, McSheng64_16, dbnt_func); \
83 default: \
84 assert(0); \
85 }
86
nfaCheckFinalState(const struct NFA * nfa,const char * state,const char * streamState,u64a offset,NfaCallback callback,void * context)87 char nfaCheckFinalState(const struct NFA *nfa, const char *state,
88 const char *streamState, u64a offset,
89 NfaCallback callback, void *context) {
90 assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa)));
91
92 // Caller should avoid calling us if we can never produce matches.
93 assert(nfaAcceptsEod(nfa));
94
95 DISPATCH_BY_NFA_TYPE(_testEOD(nfa, state, streamState, offset, callback,
96 context));
97 return 0;
98 }
99
nfaQueueInitState(const struct NFA * nfa,struct mq * q)100 char nfaQueueInitState(const struct NFA *nfa, struct mq *q) {
101 assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa)));
102
103 DISPATCH_BY_NFA_TYPE(_queueInitState(nfa, q));
104 return 0;
105 }
106
107 static really_inline
nfaQueueExec_i(const struct NFA * nfa,struct mq * q,s64a end)108 char nfaQueueExec_i(const struct NFA *nfa, struct mq *q, s64a end) {
109 DISPATCH_BY_NFA_TYPE(_Q(nfa, q, end));
110 return 0;
111 }
112
113 static really_inline
nfaQueueExec2_i(const struct NFA * nfa,struct mq * q,s64a end)114 char nfaQueueExec2_i(const struct NFA *nfa, struct mq *q, s64a end) {
115 DISPATCH_BY_NFA_TYPE(_Q2(nfa, q, end));
116 return 0;
117 }
118
nfaQueueExec_raw(const struct NFA * nfa,struct mq * q,s64a end)119 char nfaQueueExec_raw(const struct NFA *nfa, struct mq *q, s64a end) {
120 return nfaQueueExec_i(nfa, q, end);
121 }
122
nfaQueueExec2_raw(const struct NFA * nfa,struct mq * q,s64a end)123 char nfaQueueExec2_raw(const struct NFA *nfa, struct mq *q, s64a end) {
124 return nfaQueueExec2_i(nfa, q, end);
125 }
126
127 static really_inline
nfaQueueExecRose_i(const struct NFA * nfa,struct mq * q,ReportID report)128 char nfaQueueExecRose_i(const struct NFA *nfa, struct mq *q, ReportID report) {
129 DISPATCH_BY_NFA_TYPE(_QR(nfa, q, report));
130 return 0;
131 }
132
133 /** Returns 0 if this NFA cannot possibly match (due to width constraints etc)
134 * and the caller should return 0. May also edit the queue. */
135 static really_inline
nfaQueueCanMatch(const struct NFA * nfa,struct mq * q,s64a end,char * q_trimmed)136 char nfaQueueCanMatch(const struct NFA *nfa, struct mq *q, s64a end,
137 char *q_trimmed) {
138 assert(q_trimmed);
139 assert(q->end - q->cur >= 2);
140 assert(end >= 0);
141
142 DEBUG_PRINTF("q->offset=%llu, end=%lld\n", q->offset, end);
143 DEBUG_PRINTF("maxBiAnchoredWidth=%u, maxOffset=%u\n",
144 nfa->maxBiAnchoredWidth, nfa->maxOffset);
145
146 if (nfa->maxBiAnchoredWidth &&
147 (end + q->offset > nfa->maxBiAnchoredWidth)) {
148 DEBUG_PRINTF("stream too long: o %llu l %zu max: %hhu\n", q->offset,
149 q->length, nfa->maxBiAnchoredWidth);
150 return 0;
151 }
152
153 if (nfa->maxOffset) {
154 if (q->offset >= nfa->maxOffset) {
155 DEBUG_PRINTF("stream is past maxOffset\n");
156 return 0;
157 }
158
159 if (q->offset + end > nfa->maxOffset) {
160 s64a maxEnd = nfa->maxOffset - q->offset;
161 DEBUG_PRINTF("me %lld off %llu len = %lld\n", maxEnd,
162 q->offset, end);
163 while (q->end > q->cur
164 && q->items[q->end - 1].location > maxEnd) {
165 *q_trimmed = 1;
166 DEBUG_PRINTF("killing item %u %lld %u\n", q->end,
167 q->items[q->end - 1].location,
168 q->items[q->end - 1].type);
169 q->items[q->end - 1].location = maxEnd;
170 q->items[q->end - 1].type = MQE_END;
171 if (q->end - q->cur < 2
172 ||q->items[q->end - 2].location <= maxEnd) {
173 break;
174 }
175 q->end--;
176 }
177
178 if (q->end - q->cur < 2) { /* nothing left on q */
179 DEBUG_PRINTF("queue empty\n");
180 return 0;
181 }
182 }
183
184 #ifdef DEBUG
185 if (*q_trimmed) {
186 debugQueue(q);
187 }
188 #endif
189 }
190
191 return 1;
192 }
193
nfaQueueExec(const struct NFA * nfa,struct mq * q,s64a end)194 char nfaQueueExec(const struct NFA *nfa, struct mq *q, s64a end) {
195 DEBUG_PRINTF("nfa=%p end=%lld\n", nfa, end);
196 #ifdef DEBUG
197 debugQueue(q);
198 #endif
199
200 assert(q && q->context && q->state);
201 assert(end >= 0);
202 assert(q->cur < q->end);
203 assert(q->end <= MAX_MQE_LEN);
204 assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa)));
205 assert(end < q->items[q->end - 1].location
206 || q->items[q->end - 1].type == MQE_END);
207
208 if (q->items[q->cur].location > end) {
209 return 1;
210 }
211
212 char q_trimmed = 0;
213
214 assert(end <= (s64a)q->length || !q->hlength);
215 /* due to reverse accel in block mode some queues may work on a truncated
216 * buffer */
217 if (end > (s64a)q->length) {
218 end = q->length;
219 q_trimmed = 1;
220 }
221
222 if (!nfaQueueCanMatch(nfa, q, end, &q_trimmed)) {
223 if (q->report_current) {
224 nfaReportCurrentMatches(nfa, q);
225 q->report_current = 0;
226 }
227
228 return 0;
229 }
230
231 char rv = nfaQueueExec_i(nfa, q, end);
232
233 #ifdef DEBUG
234 debugQueue(q);
235 #endif
236
237 assert(!q->report_current);
238 DEBUG_PRINTF("returned rv=%d, q_trimmed=%d\n", rv, q_trimmed);
239 return rv && !q_trimmed;
240 }
241
nfaQueueExecToMatch(const struct NFA * nfa,struct mq * q,s64a end)242 char nfaQueueExecToMatch(const struct NFA *nfa, struct mq *q, s64a end) {
243 DEBUG_PRINTF("nfa=%p end=%lld\n", nfa, end);
244 #ifdef DEBUG
245 debugQueue(q);
246 #endif
247
248 assert(q);
249 assert(end >= 0);
250 assert(q->state);
251 assert(q->cur < q->end);
252 assert(q->end <= MAX_MQE_LEN);
253 assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa)));
254 assert(end < q->items[q->end - 1].location
255 || q->items[q->end - 1].type == MQE_END);
256
257 char q_trimmed_ra = 0;
258 assert(end <= (s64a)q->length || !q->hlength);
259 /* due to reverse accel in block mode some queues may work on a truncated
260 * buffer */
261 if (q->items[q->cur].location > end) {
262 return 1;
263 }
264
265 if (end > (s64a)q->length) {
266 end = q->length;
267 q_trimmed_ra = 1;
268 }
269
270 char q_trimmed = 0;
271 if (!nfaQueueCanMatch(nfa, q, end, &q_trimmed)) {
272 if (q->report_current) {
273 nfaReportCurrentMatches(nfa, q);
274 q->report_current = 0;
275 }
276
277 return 0;
278 }
279
280 char rv = nfaQueueExec2_i(nfa, q, end);
281 assert(!q->report_current);
282 DEBUG_PRINTF("returned rv=%d, q_trimmed=%d\n", rv, q_trimmed);
283 if (rv == MO_MATCHES_PENDING) {
284 if (q_trimmed) {
285 // We need to "fix" the queue so that subsequent operations must
286 // trim it as well.
287 assert(q->end > 0);
288 assert(nfa->maxOffset);
289 q->items[q->end - 1].location = nfa->maxOffset + 1;
290 }
291 return rv;
292 }
293 return rv && !q_trimmed && !q_trimmed_ra;
294 }
295
nfaReportCurrentMatches(const struct NFA * nfa,struct mq * q)296 char nfaReportCurrentMatches(const struct NFA *nfa, struct mq *q) {
297 DISPATCH_BY_NFA_TYPE(_reportCurrent(nfa, q));
298 return 0;
299 }
300
nfaInAcceptState(const struct NFA * nfa,ReportID report,struct mq * q)301 char nfaInAcceptState(const struct NFA *nfa, ReportID report, struct mq *q) {
302 DISPATCH_BY_NFA_TYPE(_inAccept(nfa, report, q));
303 return 0;
304 }
305
nfaInAnyAcceptState(const struct NFA * nfa,struct mq * q)306 char nfaInAnyAcceptState(const struct NFA *nfa, struct mq *q) {
307 DISPATCH_BY_NFA_TYPE(_inAnyAccept(nfa, q));
308 return 0;
309 }
310
nfaQueueExecRose(const struct NFA * nfa,struct mq * q,ReportID r)311 char nfaQueueExecRose(const struct NFA *nfa, struct mq *q, ReportID r) {
312 DEBUG_PRINTF("nfa=%p\n", nfa);
313 #ifdef DEBUG
314 debugQueue(q);
315 #endif
316
317 assert(q && !q->context && q->state);
318 assert(q->cur <= q->end);
319 assert(q->end <= MAX_MQE_LEN);
320 assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa)));
321 assert(!q->report_current);
322
323 return nfaQueueExecRose_i(nfa, q, r);
324 }
325
nfaBlockExecReverse(const struct NFA * nfa,u64a offset,const u8 * buf,size_t buflen,const u8 * hbuf,size_t hlen,NfaCallback callback,void * context)326 char nfaBlockExecReverse(const struct NFA *nfa, u64a offset, const u8 *buf,
327 size_t buflen, const u8 *hbuf, size_t hlen,
328 NfaCallback callback, void *context) {
329 assert(nfa);
330 assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa)));
331
332 DISPATCH_BY_NFA_TYPE(_B_Reverse(nfa, offset, buf, buflen, hbuf, hlen,
333 callback, context));
334 return 0;
335 }
336
nfaQueueCompressState(const struct NFA * nfa,const struct mq * q,s64a loc)337 char nfaQueueCompressState(const struct NFA *nfa, const struct mq *q,
338 s64a loc) {
339 assert(nfa && q);
340 assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa)));
341
342 DISPATCH_BY_NFA_TYPE(_queueCompressState(nfa, q, loc));
343 return 0;
344 }
345
nfaExpandState(const struct NFA * nfa,void * dest,const void * src,u64a offset,u8 key)346 char nfaExpandState(const struct NFA *nfa, void *dest, const void *src,
347 u64a offset, u8 key) {
348 assert(nfa && dest && src);
349 assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa)));
350
351 DISPATCH_BY_NFA_TYPE(_expandState(nfa, dest, src, offset, key));
352 return 0;
353 }
354
nfaInitCompressedState(const struct NFA * nfa,u64a offset,void * state,u8 key)355 char nfaInitCompressedState(const struct NFA *nfa, u64a offset, void *state,
356 u8 key) {
357 assert(nfa && state);
358 assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa)));
359
360 DISPATCH_BY_NFA_TYPE(_initCompressedState(nfa, offset, state, key));
361 return 0;
362 }
363
nfaGetZombieStatus(const struct NFA * nfa,struct mq * q,s64a loc)364 enum nfa_zombie_status nfaGetZombieStatus(const struct NFA *nfa, struct mq *q,
365 s64a loc) {
366 DISPATCH_BY_NFA_TYPE(_zombie_status(nfa, q, loc));
367 return NFA_ZOMBIE_NO;
368 }
369