1 /* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
2 
3   This program is free software; you can redistribute it and/or modify
4   it under the terms of the GNU General Public License, version 2.0,
5   as published by the Free Software Foundation.
6 
7   This program is also distributed with certain software (including
8   but not limited to OpenSSL) that is licensed under separate terms,
9   as designated in a particular file or component or in included license
10   documentation.  The authors of MySQL hereby grant you an additional
11   permission to link the program and your derivative works with the
12   separately licensed software that they have included with MySQL.
13 
14   This program is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   GNU General Public License, version 2.0, for more details.
18 
19   You should have received a copy of the GNU General Public License
20   along with this program; if not, write to the Free Software Foundation,
21   51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
22 
23 /*
24   This code needs extra visibility in the lexer structures
25 */
26 
27 #include "my_global.h"
28 #include "my_md5.h"
29 #include "mysqld_error.h"
30 #include "sql_data_change.h"
31 
32 #include "sql_string.h"
33 #include "sql_class.h"
34 #include "sql_lex.h"
35 #include "sql_digest.h"
36 #include "sql_digest_stream.h"
37 
38 #include "sql_get_diagnostics.h"
39 
40 #ifdef NEVER
41 #include "my_sys.h"
42 #include "sql_signal.h"
43 #endif
44 
45 /* Generated code */
46 #include "sql_yacc.h"
47 #define LEX_TOKEN_WITH_DEFINITION
48 #include "lex_token.h"
49 
50 /* Name pollution from sql/sql_lex.h */
51 #ifdef LEX_YYSTYPE
52 #undef LEX_YYSTYPE
53 #endif
54 
55 #define LEX_YYSTYPE YYSTYPE*
56 
57 #define SIZE_OF_A_TOKEN 2
58 
59 ulong max_digest_length= 0;
get_max_digest_length()60 ulong get_max_digest_length()
61 {
62   return max_digest_length;
63 }
64 
65 /**
66   Read a single token from token array.
67 */
read_token(const sql_digest_storage * digest_storage,uint index,uint * tok)68 inline uint read_token(const sql_digest_storage *digest_storage,
69                        uint index, uint *tok)
70 {
71   uint safe_byte_count= (uint)digest_storage->m_byte_count;
72 
73   if (index + SIZE_OF_A_TOKEN <= safe_byte_count &&
74       safe_byte_count <= digest_storage->m_token_array_length)
75   {
76     const unsigned char *src= & digest_storage->m_token_array[index];
77     *tok= src[0] | (src[1] << 8);
78     return index + SIZE_OF_A_TOKEN;
79   }
80 
81   /* The input byte stream is exhausted. */
82   *tok= 0;
83   return MAX_DIGEST_STORAGE_SIZE + 1;
84 }
85 
86 /**
87   Store a single token in token array.
88 */
store_token(sql_digest_storage * digest_storage,uint token)89 inline void store_token(sql_digest_storage* digest_storage, uint token)
90 {
91   DBUG_ASSERT(digest_storage->m_byte_count <= digest_storage->m_token_array_length);
92 
93   if (digest_storage->m_byte_count + SIZE_OF_A_TOKEN <= digest_storage->m_token_array_length)
94   {
95     unsigned char* dest= & digest_storage->m_token_array[digest_storage->m_byte_count];
96     dest[0]= token & 0xff;
97     dest[1]= (token >> 8) & 0xff;
98     digest_storage->m_byte_count+= SIZE_OF_A_TOKEN;
99   }
100   else
101   {
102     digest_storage->m_full= true;
103   }
104 }
105 
106 /**
107   Read an identifier from token array.
108 */
read_identifier(const sql_digest_storage * digest_storage,uint index,char ** id_string,int * id_length)109 inline uint read_identifier(const sql_digest_storage* digest_storage,
110                             uint index, char ** id_string, int *id_length)
111 {
112   uint new_index;
113   uint safe_byte_count= (uint)digest_storage->m_byte_count;
114 
115   DBUG_ASSERT(index <= safe_byte_count);
116   DBUG_ASSERT(safe_byte_count <= digest_storage->m_token_array_length);
117 
118   /*
119     token + length + string are written in an atomic way,
120     so we do always expect a length + string here
121   */
122 
123   uint bytes_needed= SIZE_OF_A_TOKEN;
124   /* If we can read token and identifier length */
125   if ((index + bytes_needed) <= safe_byte_count)
126   {
127     const unsigned char *src= & digest_storage->m_token_array[index];
128     /* Read the length of identifier */
129     uint length= src[0] | (src[1] << 8);
130     bytes_needed+= length;
131     /* If we can read entire identifier from token array */
132     if ((index + bytes_needed) <= safe_byte_count)
133     {
134       *id_string= (char *) (src + 2);
135       *id_length= length;
136 
137       new_index= index + bytes_needed;
138       DBUG_ASSERT(new_index <= safe_byte_count);
139       return new_index;
140     }
141   }
142 
143   /* The input byte stream is exhausted. */
144   return MAX_DIGEST_STORAGE_SIZE + 1;
145 }
146 
147 /**
148   Store an identifier in token array.
149 */
store_token_identifier(sql_digest_storage * digest_storage,uint token,size_t id_length,const char * id_name)150 inline void store_token_identifier(sql_digest_storage* digest_storage,
151                                    uint token,
152                                    size_t id_length, const char *id_name)
153 {
154   DBUG_ASSERT(digest_storage->m_byte_count <= digest_storage->m_token_array_length);
155 
156   size_t bytes_needed= 2 * SIZE_OF_A_TOKEN + id_length;
157   if (digest_storage->m_byte_count + bytes_needed <= (unsigned int)digest_storage->m_token_array_length)
158   {
159     unsigned char* dest= & digest_storage->m_token_array[digest_storage->m_byte_count];
160     /* Write the token */
161     dest[0]= token & 0xff;
162     dest[1]= (token >> 8) & 0xff;
163     /* Write the string length */
164     dest[2]= id_length & 0xff;
165     dest[3]= (id_length >> 8) & 0xff;
166     /* Write the string data */
167     if (id_length > 0)
168       memcpy((char *)(dest + 4), id_name, id_length);
169     digest_storage->m_byte_count+= bytes_needed;
170   }
171   else
172   {
173     digest_storage->m_full= true;
174   }
175 }
176 
compute_digest_md5(const sql_digest_storage * digest_storage,unsigned char * md5)177 void compute_digest_md5(const sql_digest_storage *digest_storage, unsigned char *md5)
178 {
179   compute_md5_hash((char *) md5,
180                    (const char *) digest_storage->m_token_array,
181                    (int)digest_storage->m_byte_count);
182 }
183 
184 /*
185   Iterate token array and updates digest_text.
186 */
compute_digest_text(const sql_digest_storage * digest_storage,String * digest_text)187 void compute_digest_text(const sql_digest_storage* digest_storage,
188                          String *digest_text)
189 {
190   DBUG_ASSERT(digest_storage != NULL);
191   uint byte_count= (uint)digest_storage->m_byte_count;
192   String *digest_output= digest_text;
193   uint tok= 0;
194   uint current_byte= 0;
195   lex_token_string *tok_data;
196 
197   /* Reset existing data */
198   digest_output->length(0);
199 
200   if (byte_count > digest_storage->m_token_array_length)
201   {
202     digest_output->append("\0", 1);
203     return;
204   }
205 
206   /* Convert text to utf8 */
207   const CHARSET_INFO *from_cs= get_charset(digest_storage->m_charset_number, MYF(0));
208   const CHARSET_INFO *to_cs= &my_charset_utf8_bin;
209 
210   if (from_cs == NULL)
211   {
212     /*
213       Can happen, as we do dirty reads on digest_storage,
214       which can be written to in another thread.
215     */
216     digest_output->append("\0", 1);
217     return;
218   }
219 
220   char id_buffer[NAME_LEN + 1]= {'\0'};
221   char *id_string;
222   size_t id_length;
223   bool convert_text= !my_charset_same(from_cs, to_cs);
224 
225   while (current_byte < byte_count)
226   {
227     current_byte= read_token(digest_storage, current_byte, &tok);
228 
229     if (tok <= 0 || tok >= array_elements(lex_token_array)
230         || current_byte > max_digest_length)
231       return;
232 
233     tok_data= &lex_token_array[tok];
234 
235     switch (tok)
236     {
237     /* All identifiers are printed with their name. */
238     case IDENT:
239     case IDENT_QUOTED:
240     case TOK_IDENT:
241       {
242         char *id_ptr= NULL;
243         int id_len= 0;
244         uint err_cs= 0;
245 
246         /* Get the next identifier from the storage buffer. */
247         current_byte= read_identifier(digest_storage, current_byte,
248                                       &id_ptr, &id_len);
249         if (current_byte > max_digest_length)
250           return;
251 
252         if (convert_text)
253         {
254           /* Verify that the converted text will fit. */
255           if (to_cs->mbmaxlen*id_len > NAME_LEN)
256           {
257             digest_output->append("...", 3);
258             break;
259           }
260           /* Convert identifier string into the storage character set. */
261           id_length= my_convert(id_buffer, NAME_LEN, to_cs,
262                                 id_ptr, id_len, from_cs, &err_cs);
263           id_string= id_buffer;
264         }
265         else
266         {
267           id_string= id_ptr;
268           id_length= id_len;
269         }
270 
271         if (id_length == 0 || err_cs != 0)
272         {
273           break;
274         }
275         /* Copy the converted identifier into the digest string. */
276         digest_output->append("`", 1);
277         if (id_length > 0)
278           digest_output->append(id_string, (uint)id_length);
279         digest_output->append("` ", 2);
280       }
281       break;
282 
283     /* Everything else is printed as is. */
284     default:
285       /*
286         Make sure not to overflow digest_text buffer.
287         +1 is to make sure extra space for ' '.
288       */
289       int tok_length= tok_data->m_token_length;
290 
291       digest_output->append(tok_data->m_token_string, tok_length);
292       if (tok_data->m_append_space)
293         digest_output->append(" ", 1);
294       break;
295     }
296   }
297 }
298 
peek_token(const sql_digest_storage * digest,uint index)299 static inline uint peek_token(const sql_digest_storage *digest, uint index)
300 {
301   uint token;
302   DBUG_ASSERT(index + SIZE_OF_A_TOKEN <= digest->m_byte_count);
303   DBUG_ASSERT(digest->m_byte_count <=  digest->m_token_array_length);
304 
305   token= ((digest->m_token_array[index + 1])<<8) | digest->m_token_array[index];
306   return token;
307 }
308 
309 /**
310   Function to read last two tokens from token array. If an identifier
311   is found, do not look for token before that.
312 */
peek_last_two_tokens(const sql_digest_storage * digest_storage,uint last_id_index,uint * t1,uint * t2)313 static inline void peek_last_two_tokens(const sql_digest_storage* digest_storage,
314                                         uint last_id_index, uint *t1, uint *t2)
315 {
316   uint byte_count= (uint)digest_storage->m_byte_count;
317   uint peek_index= byte_count;
318 
319   if (last_id_index + SIZE_OF_A_TOKEN <= peek_index)
320   {
321     /* Take last token. */
322     peek_index-= SIZE_OF_A_TOKEN;
323     *t1= peek_token(digest_storage, peek_index);
324 
325     if (last_id_index + SIZE_OF_A_TOKEN <= peek_index)
326     {
327       /* Take 2nd token from last. */
328       peek_index-= SIZE_OF_A_TOKEN;
329       *t2= peek_token(digest_storage, peek_index);
330     }
331     else
332     {
333       *t2= TOK_UNUSED;
334     }
335   }
336   else
337   {
338     *t1= TOK_UNUSED;
339     *t2= TOK_UNUSED;
340   }
341 }
342 
343 /**
344   Function to read last three tokens from token array. If an identifier
345   is found, do not look for token before that.
346 */
peek_last_three_tokens(const sql_digest_storage * digest_storage,uint last_id_index,uint * t1,uint * t2,uint * t3)347 static inline void peek_last_three_tokens(const sql_digest_storage* digest_storage,
348                                           uint last_id_index, uint *t1, uint *t2, uint *t3)
349 {
350   uint byte_count= (uint)digest_storage->m_byte_count;
351   uint peek_index= byte_count;
352 
353   if (last_id_index + SIZE_OF_A_TOKEN <= peek_index)
354   {
355     /* Take last token. */
356     peek_index-= SIZE_OF_A_TOKEN;
357     *t1= peek_token(digest_storage, peek_index);
358 
359     if (last_id_index + SIZE_OF_A_TOKEN <= peek_index)
360     {
361       /* Take 2nd token from last. */
362       peek_index-= SIZE_OF_A_TOKEN;
363       *t2= peek_token(digest_storage, peek_index);
364 
365       if (last_id_index + SIZE_OF_A_TOKEN <= peek_index)
366       {
367         /* Take 3rd token from last. */
368         peek_index-= SIZE_OF_A_TOKEN;
369         *t3= peek_token(digest_storage, peek_index);
370       }
371       else
372       {
373         *t3= TOK_UNUSED;
374       }
375     }
376     else
377     {
378       *t2= TOK_UNUSED;
379       *t3= TOK_UNUSED;
380     }
381   }
382   else
383   {
384     *t1= TOK_UNUSED;
385     *t2= TOK_UNUSED;
386     *t3= TOK_UNUSED;
387   }
388 }
389 
digest_add_token(sql_digest_state * state,uint token,LEX_YYSTYPE yylval)390 sql_digest_state* digest_add_token(sql_digest_state *state,
391                                    uint token,
392                                    LEX_YYSTYPE yylval)
393 {
394   sql_digest_storage *digest_storage= NULL;
395 
396   digest_storage= &state->m_digest_storage;
397 
398   /*
399     Stop collecting further tokens if digest storage is full or
400     if END token is received.
401   */
402   if (digest_storage->m_full || token == END_OF_INPUT)
403     return NULL;
404 
405   /*
406     Take last_token 2 tokens collected till now. These tokens will be used
407     in reduce for normalisation. Make sure not to consider ID tokens in reduce.
408   */
409   uint last_token;
410   uint last_token2;
411 
412   switch (token)
413   {
414     case NUM:
415     case LONG_NUM:
416     case ULONGLONG_NUM:
417     case DECIMAL_NUM:
418     case FLOAT_NUM:
419     case BIN_NUM:
420     case HEX_NUM:
421     {
422       bool found_unary;
423       do
424       {
425         found_unary= false;
426         peek_last_two_tokens(digest_storage, state->m_last_id_index,
427                              &last_token, &last_token2);
428 
429         if ((last_token == '-') || (last_token == '+'))
430         {
431           /*
432             We need to differentiate:
433             - a <unary minus> operator
434             - a <unary plus> operator
435             from
436             - a <binary minus> operator
437             - a <binary plus> operator
438             to only reduce "a = -1" to "a = ?", and not change "b - 1" to "b ?"
439 
440             Binary operators are found inside an expression,
441             while unary operators are found at the beginning of an expression, or after operators.
442 
443             To achieve this, every token that is followed by an <expr> expression
444             in the SQL grammar is flagged.
445             See sql/sql_yacc.yy
446             See sql/gen_lex_token.cc
447 
448             For example,
449             "(-1)" is parsed as "(", "-", NUM, ")", and lex_token_array["("].m_start_expr is true,
450             so reduction of the "-" NUM is done, the result is "(?)".
451             "(a-1)" is parsed as "(", ID, "-", NUM, ")", and lex_token_array[ID].m_start_expr is false,
452             so the operator is binary, no reduction is done, and the result is "(a-?)".
453           */
454           if (lex_token_array[last_token2].m_start_expr)
455           {
456             /*
457               REDUCE:
458               TOK_GENERIC_VALUE := (UNARY_PLUS | UNARY_MINUS) (NUM | LOG_NUM | ... | FLOAT_NUM)
459 
460               REDUCE:
461               TOK_GENERIC_VALUE := (UNARY_PLUS | UNARY_MINUS) TOK_GENERIC_VALUE
462             */
463             token= TOK_GENERIC_VALUE;
464             digest_storage->m_byte_count-= SIZE_OF_A_TOKEN;
465             found_unary= true;
466           }
467         }
468       } while (found_unary);
469     }
470     /* fall through, for case NULL_SYM below */
471     case LEX_HOSTNAME:
472     case TEXT_STRING:
473     case NCHAR_STRING:
474     case PARAM_MARKER:
475     {
476       /*
477         REDUCE:
478         TOK_GENERIC_VALUE := BIN_NUM | DECIMAL_NUM | ... | ULONGLONG_NUM
479       */
480       token= TOK_GENERIC_VALUE;
481 
482       peek_last_two_tokens(digest_storage, state->m_last_id_index,
483                            &last_token, &last_token2);
484 
485       if ((last_token2 == TOK_GENERIC_VALUE ||
486            last_token2 == TOK_GENERIC_VALUE_LIST) &&
487           (last_token == ','))
488       {
489         /*
490           REDUCE:
491           TOK_GENERIC_VALUE_LIST :=
492             TOK_GENERIC_VALUE ',' TOK_GENERIC_VALUE
493 
494           REDUCE:
495           TOK_GENERIC_VALUE_LIST :=
496             TOK_GENERIC_VALUE_LIST ',' TOK_GENERIC_VALUE
497         */
498         digest_storage->m_byte_count-= 2*SIZE_OF_A_TOKEN;
499         token= TOK_GENERIC_VALUE_LIST;
500       }
501       /*
502         Add this token or the resulting reduce to digest storage.
503       */
504       store_token(digest_storage, token);
505       break;
506     }
507     case ')':
508     {
509       peek_last_two_tokens(digest_storage, state->m_last_id_index,
510                            &last_token, &last_token2);
511 
512       if (last_token == TOK_GENERIC_VALUE &&
513           last_token2 == '(')
514       {
515         /*
516           REDUCE:
517           TOK_ROW_SINGLE_VALUE :=
518             '(' TOK_GENERIC_VALUE ')'
519         */
520         digest_storage->m_byte_count-= 2*SIZE_OF_A_TOKEN;
521         token= TOK_ROW_SINGLE_VALUE;
522 
523         /* Read last two tokens again */
524         peek_last_two_tokens(digest_storage, state->m_last_id_index,
525                              &last_token, &last_token2);
526 
527         if ((last_token2 == TOK_ROW_SINGLE_VALUE ||
528              last_token2 == TOK_ROW_SINGLE_VALUE_LIST) &&
529             (last_token == ','))
530         {
531           /*
532             REDUCE:
533             TOK_ROW_SINGLE_VALUE_LIST :=
534               TOK_ROW_SINGLE_VALUE ',' TOK_ROW_SINGLE_VALUE
535 
536             REDUCE:
537             TOK_ROW_SINGLE_VALUE_LIST :=
538               TOK_ROW_SINGLE_VALUE_LIST ',' TOK_ROW_SINGLE_VALUE
539           */
540           digest_storage->m_byte_count-= 2*SIZE_OF_A_TOKEN;
541           token= TOK_ROW_SINGLE_VALUE_LIST;
542         }
543       }
544       else if (last_token == TOK_GENERIC_VALUE_LIST &&
545                last_token2 == '(')
546       {
547         /*
548           REDUCE:
549           TOK_ROW_MULTIPLE_VALUE :=
550             '(' TOK_GENERIC_VALUE_LIST ')'
551         */
552         digest_storage->m_byte_count-= 2*SIZE_OF_A_TOKEN;
553         token= TOK_ROW_MULTIPLE_VALUE;
554 
555         /* Read last two tokens again */
556         peek_last_two_tokens(digest_storage, state->m_last_id_index,
557                              &last_token, &last_token2);
558 
559         if ((last_token2 == TOK_ROW_MULTIPLE_VALUE ||
560              last_token2 == TOK_ROW_MULTIPLE_VALUE_LIST) &&
561             (last_token == ','))
562         {
563           /*
564             REDUCE:
565             TOK_ROW_MULTIPLE_VALUE_LIST :=
566               TOK_ROW_MULTIPLE_VALUE ',' TOK_ROW_MULTIPLE_VALUE
567 
568             REDUCE:
569             TOK_ROW_MULTIPLE_VALUE_LIST :=
570               TOK_ROW_MULTIPLE_VALUE_LIST ',' TOK_ROW_MULTIPLE_VALUE
571           */
572           digest_storage->m_byte_count-= 2*SIZE_OF_A_TOKEN;
573           token= TOK_ROW_MULTIPLE_VALUE_LIST;
574         }
575       }
576       /*
577         Add this token or the resulting reduce to digest storage.
578       */
579       store_token(digest_storage, token);
580       break;
581     }
582     case IDENT:
583     case IDENT_QUOTED:
584     {
585       YYSTYPE *lex_token= yylval;
586       char *yytext= lex_token->lex_str.str;
587       size_t yylen= lex_token->lex_str.length;
588 
589       /*
590         REDUCE:
591           TOK_IDENT := IDENT | IDENT_QUOTED
592         The parser gives IDENT or IDENT_TOKEN for the same text,
593         depending on the character set used.
594         We unify both to always print the same digest text,
595         and always have the same digest hash.
596       */
597       token= TOK_IDENT;
598       /* Add this token and identifier string to digest storage. */
599       store_token_identifier(digest_storage, token, yylen, yytext);
600 
601       /* Update the index of last identifier found. */
602       state->m_last_id_index= (int)digest_storage->m_byte_count;
603       break;
604     }
605     case 0:
606     {
607       if (digest_storage->m_byte_count < SIZE_OF_A_TOKEN)
608         break;
609       unsigned int temp_tok;
610       read_token(digest_storage,
611                  digest_storage->m_byte_count-SIZE_OF_A_TOKEN,
612                  & temp_tok);
613       if (temp_tok == ';')
614         digest_storage->m_byte_count-= SIZE_OF_A_TOKEN;
615       break;
616     }
617     default:
618     {
619       /* Add this token to digest storage. */
620       store_token(digest_storage, token);
621       break;
622     }
623   }
624 
625   return state;
626 }
627 
digest_reduce_token(sql_digest_state * state,uint token_left,uint token_right)628 sql_digest_state* digest_reduce_token(sql_digest_state *state,
629                                       uint token_left, uint token_right)
630 {
631   sql_digest_storage *digest_storage= NULL;
632 
633   digest_storage= &state->m_digest_storage;
634 
635   /*
636     Stop collecting further tokens if digest storage is full.
637   */
638   if (digest_storage->m_full)
639     return NULL;
640 
641   uint last_token;
642   uint last_token2;
643   uint last_token3;
644   uint token_to_push= TOK_UNUSED;
645 
646   peek_last_two_tokens(digest_storage, state->m_last_id_index,
647                        &last_token, &last_token2);
648 
649   /*
650     There is only one caller of digest_reduce_token(),
651     see sql/sql_yacc.yy, rule literal := NULL_SYM.
652     REDUCE:
653       token_left := token_right
654     Used for:
655       TOK_GENERIC_VALUE := NULL_SYM
656   */
657 
658   if (last_token == token_right)
659   {
660     /*
661       Current stream is like:
662         TOKEN_X TOKEN_RIGHT .
663       REDUCE to
664         TOKEN_X TOKEN_LEFT .
665     */
666     digest_storage->m_byte_count-= SIZE_OF_A_TOKEN;
667     store_token(digest_storage, token_left);
668   }
669   else
670   {
671     /*
672       Current stream is like:
673         TOKEN_X TOKEN_RIGHT TOKEN_Y .
674       Pop TOKEN_Y
675         TOKEN_X TOKEN_RIGHT . TOKEN_Y
676       REDUCE to
677         TOKEN_X TOKEN_LEFT . TOKEN_Y
678     */
679     DBUG_ASSERT(last_token2 == token_right);
680     digest_storage->m_byte_count-= 2 * SIZE_OF_A_TOKEN;
681     store_token(digest_storage, token_left);
682     token_to_push= last_token;
683   }
684 
685   peek_last_three_tokens(digest_storage, state->m_last_id_index,
686                          &last_token, &last_token2, &last_token3);
687 
688   if ((last_token3 == TOK_GENERIC_VALUE ||
689        last_token3 == TOK_GENERIC_VALUE_LIST) &&
690       (last_token2 == ',') &&
691       (last_token == TOK_GENERIC_VALUE))
692   {
693     /*
694       REDUCE:
695       TOK_GENERIC_VALUE_LIST :=
696         TOK_GENERIC_VALUE ',' TOK_GENERIC_VALUE
697 
698       REDUCE:
699       TOK_GENERIC_VALUE_LIST :=
700         TOK_GENERIC_VALUE_LIST ',' TOK_GENERIC_VALUE
701     */
702     digest_storage->m_byte_count-= 3*SIZE_OF_A_TOKEN;
703     store_token(digest_storage, TOK_GENERIC_VALUE_LIST);
704   }
705 
706   if (token_to_push != TOK_UNUSED)
707   {
708     /*
709       Push TOKEN_Y
710     */
711     store_token(digest_storage, token_to_push);
712   }
713 
714   return state;
715 }
716 
717