1 use rustc_ast::ast::AttrStyle;
2 use rustc_ast::token::{self, CommentKind, Token, TokenKind};
3 use rustc_ast::tokenstream::{Spacing, TokenStream};
4 use rustc_errors::{error_code, Applicability, DiagnosticBuilder, FatalError, PResult};
5 use rustc_lexer::unescape::{self, Mode};
6 use rustc_lexer::{Base, DocStyle, RawStrError};
7 use rustc_session::parse::ParseSess;
8 use rustc_span::symbol::{sym, Symbol};
9 use rustc_span::{BytePos, Pos, Span};
10 
11 use tracing::debug;
12 
13 mod tokentrees;
14 mod unescape_error_reporting;
15 mod unicode_chars;
16 
17 use unescape_error_reporting::{emit_unescape_error, escaped_char};
18 
19 #[derive(Clone, Debug)]
20 pub struct UnmatchedBrace {
21     pub expected_delim: token::DelimToken,
22     pub found_delim: Option<token::DelimToken>,
23     pub found_span: Span,
24     pub unclosed_span: Option<Span>,
25     pub candidate_span: Option<Span>,
26 }
27 
parse_token_trees<'a>( sess: &'a ParseSess, src: &'a str, start_pos: BytePos, override_span: Option<Span>, ) -> (PResult<'a, TokenStream>, Vec<UnmatchedBrace>)28 crate fn parse_token_trees<'a>(
29     sess: &'a ParseSess,
30     src: &'a str,
31     start_pos: BytePos,
32     override_span: Option<Span>,
33 ) -> (PResult<'a, TokenStream>, Vec<UnmatchedBrace>) {
34     StringReader { sess, start_pos, pos: start_pos, end_src_index: src.len(), src, override_span }
35         .into_token_trees()
36 }
37 
38 struct StringReader<'a> {
39     sess: &'a ParseSess,
40     /// Initial position, read-only.
41     start_pos: BytePos,
42     /// The absolute offset within the source_map of the current character.
43     pos: BytePos,
44     /// Stop reading src at this index.
45     end_src_index: usize,
46     /// Source text to tokenize.
47     src: &'a str,
48     override_span: Option<Span>,
49 }
50 
51 impl<'a> StringReader<'a> {
mk_sp(&self, lo: BytePos, hi: BytePos) -> Span52     fn mk_sp(&self, lo: BytePos, hi: BytePos) -> Span {
53         self.override_span.unwrap_or_else(|| Span::with_root_ctxt(lo, hi))
54     }
55 
56     /// Returns the next token, and info about preceding whitespace, if any.
next_token(&mut self) -> (Spacing, Token)57     fn next_token(&mut self) -> (Spacing, Token) {
58         let mut spacing = Spacing::Joint;
59 
60         // Skip `#!` at the start of the file
61         let start_src_index = self.src_index(self.pos);
62         let text: &str = &self.src[start_src_index..self.end_src_index];
63         let is_beginning_of_file = self.pos == self.start_pos;
64         if is_beginning_of_file {
65             if let Some(shebang_len) = rustc_lexer::strip_shebang(text) {
66                 self.pos = self.pos + BytePos::from_usize(shebang_len);
67                 spacing = Spacing::Alone;
68             }
69         }
70 
71         // Skip trivial (whitespace & comments) tokens
72         loop {
73             let start_src_index = self.src_index(self.pos);
74             let text: &str = &self.src[start_src_index..self.end_src_index];
75 
76             if text.is_empty() {
77                 let span = self.mk_sp(self.pos, self.pos);
78                 return (spacing, Token::new(token::Eof, span));
79             }
80 
81             let token = rustc_lexer::first_token(text);
82 
83             let start = self.pos;
84             self.pos = self.pos + BytePos::from_usize(token.len);
85 
86             debug!("next_token: {:?}({:?})", token.kind, self.str_from(start));
87 
88             match self.cook_lexer_token(token.kind, start) {
89                 Some(kind) => {
90                     let span = self.mk_sp(start, self.pos);
91                     return (spacing, Token::new(kind, span));
92                 }
93                 None => spacing = Spacing::Alone,
94             }
95         }
96     }
97 
98     /// Report a fatal lexical error with a given span.
fatal_span(&self, sp: Span, m: &str) -> FatalError99     fn fatal_span(&self, sp: Span, m: &str) -> FatalError {
100         self.sess.span_diagnostic.span_fatal(sp, m)
101     }
102 
103     /// Report a lexical error with a given span.
err_span(&self, sp: Span, m: &str)104     fn err_span(&self, sp: Span, m: &str) {
105         self.sess.span_diagnostic.struct_span_err(sp, m).emit();
106     }
107 
108     /// Report a fatal error spanning [`from_pos`, `to_pos`).
fatal_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) -> FatalError109     fn fatal_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) -> FatalError {
110         self.fatal_span(self.mk_sp(from_pos, to_pos), m)
111     }
112 
113     /// Report a lexical error spanning [`from_pos`, `to_pos`).
err_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str)114     fn err_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) {
115         self.err_span(self.mk_sp(from_pos, to_pos), m)
116     }
117 
struct_fatal_span_char( &self, from_pos: BytePos, to_pos: BytePos, m: &str, c: char, ) -> DiagnosticBuilder<'a>118     fn struct_fatal_span_char(
119         &self,
120         from_pos: BytePos,
121         to_pos: BytePos,
122         m: &str,
123         c: char,
124     ) -> DiagnosticBuilder<'a> {
125         self.sess
126             .span_diagnostic
127             .struct_span_fatal(self.mk_sp(from_pos, to_pos), &format!("{}: {}", m, escaped_char(c)))
128     }
129 
130     /// Turns simple `rustc_lexer::TokenKind` enum into a rich
131     /// `rustc_ast::TokenKind`. This turns strings into interned
132     /// symbols and runs additional validation.
cook_lexer_token(&self, token: rustc_lexer::TokenKind, start: BytePos) -> Option<TokenKind>133     fn cook_lexer_token(&self, token: rustc_lexer::TokenKind, start: BytePos) -> Option<TokenKind> {
134         Some(match token {
135             rustc_lexer::TokenKind::LineComment { doc_style } => {
136                 // Skip non-doc comments
137                 let doc_style = doc_style?;
138 
139                 // Opening delimiter of the length 3 is not included into the symbol.
140                 let content_start = start + BytePos(3);
141                 let content = self.str_from(content_start);
142                 self.cook_doc_comment(content_start, content, CommentKind::Line, doc_style)
143             }
144             rustc_lexer::TokenKind::BlockComment { doc_style, terminated } => {
145                 if !terminated {
146                     let msg = match doc_style {
147                         Some(_) => "unterminated block doc-comment",
148                         None => "unterminated block comment",
149                     };
150                     let last_bpos = self.pos;
151                     self.sess
152                         .span_diagnostic
153                         .struct_span_fatal_with_code(
154                             self.mk_sp(start, last_bpos),
155                             msg,
156                             error_code!(E0758),
157                         )
158                         .emit();
159                     FatalError.raise();
160                 }
161 
162                 // Skip non-doc comments
163                 let doc_style = doc_style?;
164 
165                 // Opening delimiter of the length 3 and closing delimiter of the length 2
166                 // are not included into the symbol.
167                 let content_start = start + BytePos(3);
168                 let content_end = self.pos - BytePos(if terminated { 2 } else { 0 });
169                 let content = self.str_from_to(content_start, content_end);
170                 self.cook_doc_comment(content_start, content, CommentKind::Block, doc_style)
171             }
172             rustc_lexer::TokenKind::Whitespace => return None,
173             rustc_lexer::TokenKind::Ident | rustc_lexer::TokenKind::RawIdent => {
174                 let is_raw_ident = token == rustc_lexer::TokenKind::RawIdent;
175                 let mut ident_start = start;
176                 if is_raw_ident {
177                     ident_start = ident_start + BytePos(2);
178                 }
179                 let sym = nfc_normalize(self.str_from(ident_start));
180                 let span = self.mk_sp(start, self.pos);
181                 self.sess.symbol_gallery.insert(sym, span);
182                 if is_raw_ident {
183                     if !sym.can_be_raw() {
184                         self.err_span(span, &format!("`{}` cannot be a raw identifier", sym));
185                     }
186                     self.sess.raw_identifier_spans.borrow_mut().push(span);
187                 }
188                 token::Ident(sym, is_raw_ident)
189             }
190             rustc_lexer::TokenKind::Literal { kind, suffix_start } => {
191                 let suffix_start = start + BytePos(suffix_start as u32);
192                 let (kind, symbol) = self.cook_lexer_literal(start, suffix_start, kind);
193                 let suffix = if suffix_start < self.pos {
194                     let string = self.str_from(suffix_start);
195                     if string == "_" {
196                         self.sess
197                             .span_diagnostic
198                             .struct_span_warn(
199                                 self.mk_sp(suffix_start, self.pos),
200                                 "underscore literal suffix is not allowed",
201                             )
202                             .warn(
203                                 "this was previously accepted by the compiler but is \
204                                    being phased out; it will become a hard error in \
205                                    a future release!",
206                             )
207                             .note(
208                                 "see issue #42326 \
209                                  <https://github.com/rust-lang/rust/issues/42326> \
210                                  for more information",
211                             )
212                             .emit();
213                         None
214                     } else {
215                         Some(Symbol::intern(string))
216                     }
217                 } else {
218                     None
219                 };
220                 token::Literal(token::Lit { kind, symbol, suffix })
221             }
222             rustc_lexer::TokenKind::Lifetime { starts_with_number } => {
223                 // Include the leading `'` in the real identifier, for macro
224                 // expansion purposes. See #12512 for the gory details of why
225                 // this is necessary.
226                 let lifetime_name = self.str_from(start);
227                 if starts_with_number {
228                     self.err_span_(start, self.pos, "lifetimes cannot start with a number");
229                 }
230                 let ident = Symbol::intern(lifetime_name);
231                 token::Lifetime(ident)
232             }
233             rustc_lexer::TokenKind::Semi => token::Semi,
234             rustc_lexer::TokenKind::Comma => token::Comma,
235             rustc_lexer::TokenKind::Dot => token::Dot,
236             rustc_lexer::TokenKind::OpenParen => token::OpenDelim(token::Paren),
237             rustc_lexer::TokenKind::CloseParen => token::CloseDelim(token::Paren),
238             rustc_lexer::TokenKind::OpenBrace => token::OpenDelim(token::Brace),
239             rustc_lexer::TokenKind::CloseBrace => token::CloseDelim(token::Brace),
240             rustc_lexer::TokenKind::OpenBracket => token::OpenDelim(token::Bracket),
241             rustc_lexer::TokenKind::CloseBracket => token::CloseDelim(token::Bracket),
242             rustc_lexer::TokenKind::At => token::At,
243             rustc_lexer::TokenKind::Pound => token::Pound,
244             rustc_lexer::TokenKind::Tilde => token::Tilde,
245             rustc_lexer::TokenKind::Question => token::Question,
246             rustc_lexer::TokenKind::Colon => token::Colon,
247             rustc_lexer::TokenKind::Dollar => token::Dollar,
248             rustc_lexer::TokenKind::Eq => token::Eq,
249             rustc_lexer::TokenKind::Bang => token::Not,
250             rustc_lexer::TokenKind::Lt => token::Lt,
251             rustc_lexer::TokenKind::Gt => token::Gt,
252             rustc_lexer::TokenKind::Minus => token::BinOp(token::Minus),
253             rustc_lexer::TokenKind::And => token::BinOp(token::And),
254             rustc_lexer::TokenKind::Or => token::BinOp(token::Or),
255             rustc_lexer::TokenKind::Plus => token::BinOp(token::Plus),
256             rustc_lexer::TokenKind::Star => token::BinOp(token::Star),
257             rustc_lexer::TokenKind::Slash => token::BinOp(token::Slash),
258             rustc_lexer::TokenKind::Caret => token::BinOp(token::Caret),
259             rustc_lexer::TokenKind::Percent => token::BinOp(token::Percent),
260 
261             rustc_lexer::TokenKind::Unknown => {
262                 let c = self.str_from(start).chars().next().unwrap();
263                 let mut err =
264                     self.struct_fatal_span_char(start, self.pos, "unknown start of token", c);
265                 // FIXME: the lexer could be used to turn the ASCII version of unicode homoglyphs,
266                 // instead of keeping a table in `check_for_substitution`into the token. Ideally,
267                 // this should be inside `rustc_lexer`. However, we should first remove compound
268                 // tokens like `<<` from `rustc_lexer`, and then add fancier error recovery to it,
269                 // as there will be less overall work to do this way.
270                 let token = unicode_chars::check_for_substitution(self, start, c, &mut err);
271                 if c == '\x00' {
272                     err.help("source files must contain UTF-8 encoded text, unexpected null bytes might occur when a different encoding is used");
273                 }
274                 err.emit();
275                 token?
276             }
277         })
278     }
279 
cook_doc_comment( &self, content_start: BytePos, content: &str, comment_kind: CommentKind, doc_style: DocStyle, ) -> TokenKind280     fn cook_doc_comment(
281         &self,
282         content_start: BytePos,
283         content: &str,
284         comment_kind: CommentKind,
285         doc_style: DocStyle,
286     ) -> TokenKind {
287         if content.contains('\r') {
288             for (idx, _) in content.char_indices().filter(|&(_, c)| c == '\r') {
289                 self.err_span_(
290                     content_start + BytePos(idx as u32),
291                     content_start + BytePos(idx as u32 + 1),
292                     match comment_kind {
293                         CommentKind::Line => "bare CR not allowed in doc-comment",
294                         CommentKind::Block => "bare CR not allowed in block doc-comment",
295                     },
296                 );
297             }
298         }
299 
300         let attr_style = match doc_style {
301             DocStyle::Outer => AttrStyle::Outer,
302             DocStyle::Inner => AttrStyle::Inner,
303         };
304 
305         token::DocComment(comment_kind, attr_style, Symbol::intern(content))
306     }
307 
cook_lexer_literal( &self, start: BytePos, suffix_start: BytePos, kind: rustc_lexer::LiteralKind, ) -> (token::LitKind, Symbol)308     fn cook_lexer_literal(
309         &self,
310         start: BytePos,
311         suffix_start: BytePos,
312         kind: rustc_lexer::LiteralKind,
313     ) -> (token::LitKind, Symbol) {
314         // prefix means `"` or `br"` or `r###"`, ...
315         let (lit_kind, mode, prefix_len, postfix_len) = match kind {
316             rustc_lexer::LiteralKind::Char { terminated } => {
317                 if !terminated {
318                     self.sess
319                         .span_diagnostic
320                         .struct_span_fatal_with_code(
321                             self.mk_sp(start, suffix_start),
322                             "unterminated character literal",
323                             error_code!(E0762),
324                         )
325                         .emit();
326                     FatalError.raise();
327                 }
328                 (token::Char, Mode::Char, 1, 1) // ' '
329             }
330             rustc_lexer::LiteralKind::Byte { terminated } => {
331                 if !terminated {
332                     self.sess
333                         .span_diagnostic
334                         .struct_span_fatal_with_code(
335                             self.mk_sp(start + BytePos(1), suffix_start),
336                             "unterminated byte constant",
337                             error_code!(E0763),
338                         )
339                         .emit();
340                     FatalError.raise();
341                 }
342                 (token::Byte, Mode::Byte, 2, 1) // b' '
343             }
344             rustc_lexer::LiteralKind::Str { terminated } => {
345                 if !terminated {
346                     self.sess
347                         .span_diagnostic
348                         .struct_span_fatal_with_code(
349                             self.mk_sp(start, suffix_start),
350                             "unterminated double quote string",
351                             error_code!(E0765),
352                         )
353                         .emit();
354                     FatalError.raise();
355                 }
356                 (token::Str, Mode::Str, 1, 1) // " "
357             }
358             rustc_lexer::LiteralKind::ByteStr { terminated } => {
359                 if !terminated {
360                     self.sess
361                         .span_diagnostic
362                         .struct_span_fatal_with_code(
363                             self.mk_sp(start + BytePos(1), suffix_start),
364                             "unterminated double quote byte string",
365                             error_code!(E0766),
366                         )
367                         .emit();
368                     FatalError.raise();
369                 }
370                 (token::ByteStr, Mode::ByteStr, 2, 1) // b" "
371             }
372             rustc_lexer::LiteralKind::RawStr { n_hashes, err } => {
373                 self.report_raw_str_error(start, err);
374                 let n = u32::from(n_hashes);
375                 (token::StrRaw(n_hashes), Mode::RawStr, 2 + n, 1 + n) // r##" "##
376             }
377             rustc_lexer::LiteralKind::RawByteStr { n_hashes, err } => {
378                 self.report_raw_str_error(start, err);
379                 let n = u32::from(n_hashes);
380                 (token::ByteStrRaw(n_hashes), Mode::RawByteStr, 3 + n, 1 + n) // br##" "##
381             }
382             rustc_lexer::LiteralKind::Int { base, empty_int } => {
383                 return if empty_int {
384                     self.sess
385                         .span_diagnostic
386                         .struct_span_err_with_code(
387                             self.mk_sp(start, suffix_start),
388                             "no valid digits found for number",
389                             error_code!(E0768),
390                         )
391                         .emit();
392                     (token::Integer, sym::integer(0))
393                 } else {
394                     self.validate_int_literal(base, start, suffix_start);
395                     (token::Integer, self.symbol_from_to(start, suffix_start))
396                 };
397             }
398             rustc_lexer::LiteralKind::Float { base, empty_exponent } => {
399                 if empty_exponent {
400                     self.err_span_(start, self.pos, "expected at least one digit in exponent");
401                 }
402 
403                 match base {
404                     Base::Hexadecimal => self.err_span_(
405                         start,
406                         suffix_start,
407                         "hexadecimal float literal is not supported",
408                     ),
409                     Base::Octal => {
410                         self.err_span_(start, suffix_start, "octal float literal is not supported")
411                     }
412                     Base::Binary => {
413                         self.err_span_(start, suffix_start, "binary float literal is not supported")
414                     }
415                     _ => (),
416                 }
417 
418                 let id = self.symbol_from_to(start, suffix_start);
419                 return (token::Float, id);
420             }
421         };
422         let content_start = start + BytePos(prefix_len);
423         let content_end = suffix_start - BytePos(postfix_len);
424         let id = self.symbol_from_to(content_start, content_end);
425         self.validate_literal_escape(mode, content_start, content_end, prefix_len, postfix_len);
426         (lit_kind, id)
427     }
428 
429     #[inline]
src_index(&self, pos: BytePos) -> usize430     fn src_index(&self, pos: BytePos) -> usize {
431         (pos - self.start_pos).to_usize()
432     }
433 
434     /// Slice of the source text from `start` up to but excluding `self.pos`,
435     /// meaning the slice does not include the character `self.ch`.
str_from(&self, start: BytePos) -> &str436     fn str_from(&self, start: BytePos) -> &str {
437         self.str_from_to(start, self.pos)
438     }
439 
440     /// As symbol_from, with an explicit endpoint.
symbol_from_to(&self, start: BytePos, end: BytePos) -> Symbol441     fn symbol_from_to(&self, start: BytePos, end: BytePos) -> Symbol {
442         debug!("taking an ident from {:?} to {:?}", start, end);
443         Symbol::intern(self.str_from_to(start, end))
444     }
445 
446     /// Slice of the source text spanning from `start` up to but excluding `end`.
str_from_to(&self, start: BytePos, end: BytePos) -> &str447     fn str_from_to(&self, start: BytePos, end: BytePos) -> &str {
448         &self.src[self.src_index(start)..self.src_index(end)]
449     }
450 
report_raw_str_error(&self, start: BytePos, opt_err: Option<RawStrError>)451     fn report_raw_str_error(&self, start: BytePos, opt_err: Option<RawStrError>) {
452         match opt_err {
453             Some(RawStrError::InvalidStarter { bad_char }) => {
454                 self.report_non_started_raw_string(start, bad_char)
455             }
456             Some(RawStrError::NoTerminator { expected, found, possible_terminator_offset }) => self
457                 .report_unterminated_raw_string(start, expected, possible_terminator_offset, found),
458             Some(RawStrError::TooManyDelimiters { found }) => {
459                 self.report_too_many_hashes(start, found)
460             }
461             None => (),
462         }
463     }
464 
report_non_started_raw_string(&self, start: BytePos, bad_char: char) -> !465     fn report_non_started_raw_string(&self, start: BytePos, bad_char: char) -> ! {
466         self.struct_fatal_span_char(
467             start,
468             self.pos,
469             "found invalid character; only `#` is allowed in raw string delimitation",
470             bad_char,
471         )
472         .emit();
473         FatalError.raise()
474     }
475 
report_unterminated_raw_string( &self, start: BytePos, n_hashes: usize, possible_offset: Option<usize>, found_terminators: usize, ) -> !476     fn report_unterminated_raw_string(
477         &self,
478         start: BytePos,
479         n_hashes: usize,
480         possible_offset: Option<usize>,
481         found_terminators: usize,
482     ) -> ! {
483         let mut err = self.sess.span_diagnostic.struct_span_fatal_with_code(
484             self.mk_sp(start, start),
485             "unterminated raw string",
486             error_code!(E0748),
487         );
488 
489         err.span_label(self.mk_sp(start, start), "unterminated raw string");
490 
491         if n_hashes > 0 {
492             err.note(&format!(
493                 "this raw string should be terminated with `\"{}`",
494                 "#".repeat(n_hashes)
495             ));
496         }
497 
498         if let Some(possible_offset) = possible_offset {
499             let lo = start + BytePos(possible_offset as u32);
500             let hi = lo + BytePos(found_terminators as u32);
501             let span = self.mk_sp(lo, hi);
502             err.span_suggestion(
503                 span,
504                 "consider terminating the string here",
505                 "#".repeat(n_hashes),
506                 Applicability::MaybeIncorrect,
507             );
508         }
509 
510         err.emit();
511         FatalError.raise()
512     }
513 
514     /// Note: It was decided to not add a test case, because it would be too big.
515     /// <https://github.com/rust-lang/rust/pull/50296#issuecomment-392135180>
report_too_many_hashes(&self, start: BytePos, found: usize) -> !516     fn report_too_many_hashes(&self, start: BytePos, found: usize) -> ! {
517         self.fatal_span_(
518             start,
519             self.pos,
520             &format!(
521                 "too many `#` symbols: raw strings may be delimited \
522                 by up to 65535 `#` symbols, but found {}",
523                 found
524             ),
525         )
526         .raise();
527     }
528 
validate_literal_escape( &self, mode: Mode, content_start: BytePos, content_end: BytePos, prefix_len: u32, postfix_len: u32, )529     fn validate_literal_escape(
530         &self,
531         mode: Mode,
532         content_start: BytePos,
533         content_end: BytePos,
534         prefix_len: u32,
535         postfix_len: u32,
536     ) {
537         let lit_content = self.str_from_to(content_start, content_end);
538         unescape::unescape_literal(lit_content, mode, &mut |range, result| {
539             // Here we only check for errors. The actual unescaping is done later.
540             if let Err(err) = result {
541                 let span_with_quotes = self
542                     .mk_sp(content_start - BytePos(prefix_len), content_end + BytePos(postfix_len));
543                 let (start, end) = (range.start as u32, range.end as u32);
544                 let lo = content_start + BytePos(start);
545                 let hi = lo + BytePos(end - start);
546                 let span = self.mk_sp(lo, hi);
547                 emit_unescape_error(
548                     &self.sess.span_diagnostic,
549                     lit_content,
550                     span_with_quotes,
551                     span,
552                     mode,
553                     range,
554                     err,
555                 );
556             }
557         });
558     }
559 
validate_int_literal(&self, base: Base, content_start: BytePos, content_end: BytePos)560     fn validate_int_literal(&self, base: Base, content_start: BytePos, content_end: BytePos) {
561         let base = match base {
562             Base::Binary => 2,
563             Base::Octal => 8,
564             _ => return,
565         };
566         let s = self.str_from_to(content_start + BytePos(2), content_end);
567         for (idx, c) in s.char_indices() {
568             let idx = idx as u32;
569             if c != '_' && c.to_digit(base).is_none() {
570                 let lo = content_start + BytePos(2 + idx);
571                 let hi = content_start + BytePos(2 + idx + c.len_utf8() as u32);
572                 self.err_span_(lo, hi, &format!("invalid digit for a base {} literal", base));
573             }
574         }
575     }
576 }
577 
nfc_normalize(string: &str) -> Symbol578 pub fn nfc_normalize(string: &str) -> Symbol {
579     use unicode_normalization::{is_nfc_quick, IsNormalized, UnicodeNormalization};
580     match is_nfc_quick(string.chars()) {
581         IsNormalized::Yes => Symbol::intern(string),
582         _ => {
583             let normalized_str: String = string.chars().nfc().collect();
584             Symbol::intern(&normalized_str)
585         }
586     }
587 }
588