1 //===- Lexer.cpp - C Language Family Lexer --------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  This file implements the Lexer and Token interfaces.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/Lex/Lexer.h"
14 #include "UnicodeCharSets.h"
15 #include "clang/Basic/CharInfo.h"
16 #include "clang/Basic/Diagnostic.h"
17 #include "clang/Basic/IdentifierTable.h"
18 #include "clang/Basic/LLVM.h"
19 #include "clang/Basic/LangOptions.h"
20 #include "clang/Basic/SourceLocation.h"
21 #include "clang/Basic/SourceManager.h"
22 #include "clang/Basic/TokenKinds.h"
23 #include "clang/Lex/LexDiagnostic.h"
24 #include "clang/Lex/LiteralSupport.h"
25 #include "clang/Lex/MultipleIncludeOpt.h"
26 #include "clang/Lex/Preprocessor.h"
27 #include "clang/Lex/PreprocessorOptions.h"
28 #include "clang/Lex/Token.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/Optional.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/ADT/StringExtras.h"
33 #include "llvm/ADT/StringRef.h"
34 #include "llvm/ADT/StringSwitch.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/ConvertUTF.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/MemoryBufferRef.h"
39 #include "llvm/Support/NativeFormatting.h"
40 #include "llvm/Support/Unicode.h"
41 #include "llvm/Support/UnicodeCharRanges.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstddef>
45 #include <cstdint>
46 #include <cstring>
47 #include <string>
48 #include <tuple>
49 #include <utility>
50 
51 using namespace clang;
52 
53 //===----------------------------------------------------------------------===//
54 // Token Class Implementation
55 //===----------------------------------------------------------------------===//
56 
57 /// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
58 bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
59   if (isAnnotation())
60     return false;
61   if (IdentifierInfo *II = getIdentifierInfo())
62     return II->getObjCKeywordID() == objcKey;
63   return false;
64 }
65 
66 /// getObjCKeywordID - Return the ObjC keyword kind.
67 tok::ObjCKeywordKind Token::getObjCKeywordID() const {
68   if (isAnnotation())
69     return tok::objc_not_keyword;
70   IdentifierInfo *specId = getIdentifierInfo();
71   return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
72 }
73 
74 //===----------------------------------------------------------------------===//
75 // Lexer Class Implementation
76 //===----------------------------------------------------------------------===//
77 
78 void Lexer::anchor() {}
79 
80 void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
81                       const char *BufEnd) {
82   BufferStart = BufStart;
83   BufferPtr = BufPtr;
84   BufferEnd = BufEnd;
85 
86   assert(BufEnd[0] == 0 &&
87          "We assume that the input buffer has a null character at the end"
88          " to simplify lexing!");
89 
90   // Check whether we have a BOM in the beginning of the buffer. If yes - act
91   // accordingly. Right now we support only UTF-8 with and without BOM, so, just
92   // skip the UTF-8 BOM if it's present.
93   if (BufferStart == BufferPtr) {
94     // Determine the size of the BOM.
95     StringRef Buf(BufferStart, BufferEnd - BufferStart);
96     size_t BOMLength = llvm::StringSwitch<size_t>(Buf)
97       .StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM
98       .Default(0);
99 
100     // Skip the BOM.
101     BufferPtr += BOMLength;
102   }
103 
104   Is_PragmaLexer = false;
105   CurrentConflictMarkerState = CMK_None;
106 
107   // Start of the file is a start of line.
108   IsAtStartOfLine = true;
109   IsAtPhysicalStartOfLine = true;
110 
111   HasLeadingSpace = false;
112   HasLeadingEmptyMacro = false;
113 
114   // We are not after parsing a #.
115   ParsingPreprocessorDirective = false;
116 
117   // We are not after parsing #include.
118   ParsingFilename = false;
119 
120   // We are not in raw mode.  Raw mode disables diagnostics and interpretation
121   // of tokens (e.g. identifiers, thus disabling macro expansion).  It is used
122   // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block
123   // or otherwise skipping over tokens.
124   LexingRawMode = false;
125 
126   // Default to not keeping comments.
127   ExtendedTokenMode = 0;
128 
129   NewLinePtr = nullptr;
130 }
131 
132 /// Lexer constructor - Create a new lexer object for the specified buffer
133 /// with the specified preprocessor managing the lexing process.  This lexer
134 /// assumes that the associated file buffer and Preprocessor objects will
135 /// outlive it, so it doesn't take ownership of either of them.
136 Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile,
137              Preprocessor &PP, bool IsFirstIncludeOfFile)
138     : PreprocessorLexer(&PP, FID),
139       FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
140       LangOpts(PP.getLangOpts()), LineComment(LangOpts.LineComment),
141       IsFirstTimeLexingFile(IsFirstIncludeOfFile) {
142   InitLexer(InputFile.getBufferStart(), InputFile.getBufferStart(),
143             InputFile.getBufferEnd());
144 
145   resetExtendedTokenMode();
146 }
147 
148 /// Lexer constructor - Create a new raw lexer object.  This object is only
149 /// suitable for calls to 'LexFromRawLexer'.  This lexer assumes that the text
150 /// range will outlive it, so it doesn't take ownership of it.
151 Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
152              const char *BufStart, const char *BufPtr, const char *BufEnd,
153              bool IsFirstIncludeOfFile)
154     : FileLoc(fileloc), LangOpts(langOpts), LineComment(LangOpts.LineComment),
155       IsFirstTimeLexingFile(IsFirstIncludeOfFile) {
156   InitLexer(BufStart, BufPtr, BufEnd);
157 
158   // We *are* in raw mode.
159   LexingRawMode = true;
160 }
161 
162 /// Lexer constructor - Create a new raw lexer object.  This object is only
163 /// suitable for calls to 'LexFromRawLexer'.  This lexer assumes that the text
164 /// range will outlive it, so it doesn't take ownership of it.
165 Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &FromFile,
166              const SourceManager &SM, const LangOptions &langOpts,
167              bool IsFirstIncludeOfFile)
168     : Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile.getBufferStart(),
169             FromFile.getBufferStart(), FromFile.getBufferEnd(),
170             IsFirstIncludeOfFile) {}
171 
172 void Lexer::resetExtendedTokenMode() {
173   assert(PP && "Cannot reset token mode without a preprocessor");
174   if (LangOpts.TraditionalCPP)
175     SetKeepWhitespaceMode(true);
176   else
177     SetCommentRetentionState(PP->getCommentRetentionState());
178 }
179 
180 /// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
181 /// _Pragma expansion.  This has a variety of magic semantics that this method
182 /// sets up.  It returns a new'd Lexer that must be delete'd when done.
183 ///
184 /// On entrance to this routine, TokStartLoc is a macro location which has a
185 /// spelling loc that indicates the bytes to be lexed for the token and an
186 /// expansion location that indicates where all lexed tokens should be
187 /// "expanded from".
188 ///
189 /// TODO: It would really be nice to make _Pragma just be a wrapper around a
190 /// normal lexer that remaps tokens as they fly by.  This would require making
191 /// Preprocessor::Lex virtual.  Given that, we could just dump in a magic lexer
192 /// interface that could handle this stuff.  This would pull GetMappedTokenLoc
193 /// out of the critical path of the lexer!
194 ///
195 Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
196                                  SourceLocation ExpansionLocStart,
197                                  SourceLocation ExpansionLocEnd,
198                                  unsigned TokLen, Preprocessor &PP) {
199   SourceManager &SM = PP.getSourceManager();
200 
201   // Create the lexer as if we were going to lex the file normally.
202   FileID SpellingFID = SM.getFileID(SpellingLoc);
203   llvm::MemoryBufferRef InputFile = SM.getBufferOrFake(SpellingFID);
204   Lexer *L = new Lexer(SpellingFID, InputFile, PP);
205 
206   // Now that the lexer is created, change the start/end locations so that we
207   // just lex the subsection of the file that we want.  This is lexing from a
208   // scratch buffer.
209   const char *StrData = SM.getCharacterData(SpellingLoc);
210 
211   L->BufferPtr = StrData;
212   L->BufferEnd = StrData+TokLen;
213   assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!");
214 
215   // Set the SourceLocation with the remapping information.  This ensures that
216   // GetMappedTokenLoc will remap the tokens as they are lexed.
217   L->FileLoc = SM.createExpansionLoc(SM.getLocForStartOfFile(SpellingFID),
218                                      ExpansionLocStart,
219                                      ExpansionLocEnd, TokLen);
220 
221   // Ensure that the lexer thinks it is inside a directive, so that end \n will
222   // return an EOD token.
223   L->ParsingPreprocessorDirective = true;
224 
225   // This lexer really is for _Pragma.
226   L->Is_PragmaLexer = true;
227   return L;
228 }
229 
230 void Lexer::seek(unsigned Offset, bool IsAtStartOfLine) {
231   this->IsAtPhysicalStartOfLine = IsAtStartOfLine;
232   this->IsAtStartOfLine = IsAtStartOfLine;
233   assert((BufferStart + Offset) <= BufferEnd);
234   BufferPtr = BufferStart + Offset;
235 }
236 
237 template <typename T> static void StringifyImpl(T &Str, char Quote) {
238   typename T::size_type i = 0, e = Str.size();
239   while (i < e) {
240     if (Str[i] == '\\' || Str[i] == Quote) {
241       Str.insert(Str.begin() + i, '\\');
242       i += 2;
243       ++e;
244     } else if (Str[i] == '\n' || Str[i] == '\r') {
245       // Replace '\r\n' and '\n\r' to '\\' followed by 'n'.
246       if ((i < e - 1) && (Str[i + 1] == '\n' || Str[i + 1] == '\r') &&
247           Str[i] != Str[i + 1]) {
248         Str[i] = '\\';
249         Str[i + 1] = 'n';
250       } else {
251         // Replace '\n' and '\r' to '\\' followed by 'n'.
252         Str[i] = '\\';
253         Str.insert(Str.begin() + i + 1, 'n');
254         ++e;
255       }
256       i += 2;
257     } else
258       ++i;
259   }
260 }
261 
262 std::string Lexer::Stringify(StringRef Str, bool Charify) {
263   std::string Result = std::string(Str);
264   char Quote = Charify ? '\'' : '"';
265   StringifyImpl(Result, Quote);
266   return Result;
267 }
268 
269 void Lexer::Stringify(SmallVectorImpl<char> &Str) { StringifyImpl(Str, '"'); }
270 
271 //===----------------------------------------------------------------------===//
272 // Token Spelling
273 //===----------------------------------------------------------------------===//
274 
275 /// Slow case of getSpelling. Extract the characters comprising the
276 /// spelling of this token from the provided input buffer.
277 static size_t getSpellingSlow(const Token &Tok, const char *BufPtr,
278                               const LangOptions &LangOpts, char *Spelling) {
279   assert(Tok.needsCleaning() && "getSpellingSlow called on simple token");
280 
281   size_t Length = 0;
282   const char *BufEnd = BufPtr + Tok.getLength();
283 
284   if (tok::isStringLiteral(Tok.getKind())) {
285     // Munch the encoding-prefix and opening double-quote.
286     while (BufPtr < BufEnd) {
287       unsigned Size;
288       Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
289       BufPtr += Size;
290 
291       if (Spelling[Length - 1] == '"')
292         break;
293     }
294 
295     // Raw string literals need special handling; trigraph expansion and line
296     // splicing do not occur within their d-char-sequence nor within their
297     // r-char-sequence.
298     if (Length >= 2 &&
299         Spelling[Length - 2] == 'R' && Spelling[Length - 1] == '"') {
300       // Search backwards from the end of the token to find the matching closing
301       // quote.
302       const char *RawEnd = BufEnd;
303       do --RawEnd; while (*RawEnd != '"');
304       size_t RawLength = RawEnd - BufPtr + 1;
305 
306       // Everything between the quotes is included verbatim in the spelling.
307       memcpy(Spelling + Length, BufPtr, RawLength);
308       Length += RawLength;
309       BufPtr += RawLength;
310 
311       // The rest of the token is lexed normally.
312     }
313   }
314 
315   while (BufPtr < BufEnd) {
316     unsigned Size;
317     Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
318     BufPtr += Size;
319   }
320 
321   assert(Length < Tok.getLength() &&
322          "NeedsCleaning flag set on token that didn't need cleaning!");
323   return Length;
324 }
325 
326 /// getSpelling() - Return the 'spelling' of this token.  The spelling of a
327 /// token are the characters used to represent the token in the source file
328 /// after trigraph expansion and escaped-newline folding.  In particular, this
329 /// wants to get the true, uncanonicalized, spelling of things like digraphs
330 /// UCNs, etc.
331 StringRef Lexer::getSpelling(SourceLocation loc,
332                              SmallVectorImpl<char> &buffer,
333                              const SourceManager &SM,
334                              const LangOptions &options,
335                              bool *invalid) {
336   // Break down the source location.
337   std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
338 
339   // Try to the load the file buffer.
340   bool invalidTemp = false;
341   StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
342   if (invalidTemp) {
343     if (invalid) *invalid = true;
344     return {};
345   }
346 
347   const char *tokenBegin = file.data() + locInfo.second;
348 
349   // Lex from the start of the given location.
350   Lexer lexer(SM.getLocForStartOfFile(locInfo.first), options,
351               file.begin(), tokenBegin, file.end());
352   Token token;
353   lexer.LexFromRawLexer(token);
354 
355   unsigned length = token.getLength();
356 
357   // Common case:  no need for cleaning.
358   if (!token.needsCleaning())
359     return StringRef(tokenBegin, length);
360 
361   // Hard case, we need to relex the characters into the string.
362   buffer.resize(length);
363   buffer.resize(getSpellingSlow(token, tokenBegin, options, buffer.data()));
364   return StringRef(buffer.data(), buffer.size());
365 }
366 
367 /// getSpelling() - Return the 'spelling' of this token.  The spelling of a
368 /// token are the characters used to represent the token in the source file
369 /// after trigraph expansion and escaped-newline folding.  In particular, this
370 /// wants to get the true, uncanonicalized, spelling of things like digraphs
371 /// UCNs, etc.
372 std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr,
373                                const LangOptions &LangOpts, bool *Invalid) {
374   assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
375 
376   bool CharDataInvalid = false;
377   const char *TokStart = SourceMgr.getCharacterData(Tok.getLocation(),
378                                                     &CharDataInvalid);
379   if (Invalid)
380     *Invalid = CharDataInvalid;
381   if (CharDataInvalid)
382     return {};
383 
384   // If this token contains nothing interesting, return it directly.
385   if (!Tok.needsCleaning())
386     return std::string(TokStart, TokStart + Tok.getLength());
387 
388   std::string Result;
389   Result.resize(Tok.getLength());
390   Result.resize(getSpellingSlow(Tok, TokStart, LangOpts, &*Result.begin()));
391   return Result;
392 }
393 
394 /// getSpelling - This method is used to get the spelling of a token into a
395 /// preallocated buffer, instead of as an std::string.  The caller is required
396 /// to allocate enough space for the token, which is guaranteed to be at least
397 /// Tok.getLength() bytes long.  The actual length of the token is returned.
398 ///
399 /// Note that this method may do two possible things: it may either fill in
400 /// the buffer specified with characters, or it may *change the input pointer*
401 /// to point to a constant buffer with the data already in it (avoiding a
402 /// copy).  The caller is not allowed to modify the returned buffer pointer
403 /// if an internal buffer is returned.
404 unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer,
405                             const SourceManager &SourceMgr,
406                             const LangOptions &LangOpts, bool *Invalid) {
407   assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
408 
409   const char *TokStart = nullptr;
410   // NOTE: this has to be checked *before* testing for an IdentifierInfo.
411   if (Tok.is(tok::raw_identifier))
412     TokStart = Tok.getRawIdentifier().data();
413   else if (!Tok.hasUCN()) {
414     if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
415       // Just return the string from the identifier table, which is very quick.
416       Buffer = II->getNameStart();
417       return II->getLength();
418     }
419   }
420 
421   // NOTE: this can be checked even after testing for an IdentifierInfo.
422   if (Tok.isLiteral())
423     TokStart = Tok.getLiteralData();
424 
425   if (!TokStart) {
426     // Compute the start of the token in the input lexer buffer.
427     bool CharDataInvalid = false;
428     TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid);
429     if (Invalid)
430       *Invalid = CharDataInvalid;
431     if (CharDataInvalid) {
432       Buffer = "";
433       return 0;
434     }
435   }
436 
437   // If this token contains nothing interesting, return it directly.
438   if (!Tok.needsCleaning()) {
439     Buffer = TokStart;
440     return Tok.getLength();
441   }
442 
443   // Otherwise, hard case, relex the characters into the string.
444   return getSpellingSlow(Tok, TokStart, LangOpts, const_cast<char*>(Buffer));
445 }
446 
447 /// MeasureTokenLength - Relex the token at the specified location and return
448 /// its length in bytes in the input file.  If the token needs cleaning (e.g.
449 /// includes a trigraph or an escaped newline) then this count includes bytes
450 /// that are part of that.
451 unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
452                                    const SourceManager &SM,
453                                    const LangOptions &LangOpts) {
454   Token TheTok;
455   if (getRawToken(Loc, TheTok, SM, LangOpts))
456     return 0;
457   return TheTok.getLength();
458 }
459 
460 /// Relex the token at the specified location.
461 /// \returns true if there was a failure, false on success.
462 bool Lexer::getRawToken(SourceLocation Loc, Token &Result,
463                         const SourceManager &SM,
464                         const LangOptions &LangOpts,
465                         bool IgnoreWhiteSpace) {
466   // TODO: this could be special cased for common tokens like identifiers, ')',
467   // etc to make this faster, if it mattered.  Just look at StrData[0] to handle
468   // all obviously single-char tokens.  This could use
469   // Lexer::isObviouslySimpleCharacter for example to handle identifiers or
470   // something.
471 
472   // If this comes from a macro expansion, we really do want the macro name, not
473   // the token this macro expanded to.
474   Loc = SM.getExpansionLoc(Loc);
475   std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
476   bool Invalid = false;
477   StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
478   if (Invalid)
479     return true;
480 
481   const char *StrData = Buffer.data()+LocInfo.second;
482 
483   if (!IgnoreWhiteSpace && isWhitespace(StrData[0]))
484     return true;
485 
486   // Create a lexer starting at the beginning of this token.
487   Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts,
488                  Buffer.begin(), StrData, Buffer.end());
489   TheLexer.SetCommentRetentionState(true);
490   TheLexer.LexFromRawLexer(Result);
491   return false;
492 }
493 
494 /// Returns the pointer that points to the beginning of line that contains
495 /// the given offset, or null if the offset if invalid.
496 static const char *findBeginningOfLine(StringRef Buffer, unsigned Offset) {
497   const char *BufStart = Buffer.data();
498   if (Offset >= Buffer.size())
499     return nullptr;
500 
501   const char *LexStart = BufStart + Offset;
502   for (; LexStart != BufStart; --LexStart) {
503     if (isVerticalWhitespace(LexStart[0]) &&
504         !Lexer::isNewLineEscaped(BufStart, LexStart)) {
505       // LexStart should point at first character of logical line.
506       ++LexStart;
507       break;
508     }
509   }
510   return LexStart;
511 }
512 
513 static SourceLocation getBeginningOfFileToken(SourceLocation Loc,
514                                               const SourceManager &SM,
515                                               const LangOptions &LangOpts) {
516   assert(Loc.isFileID());
517   std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
518   if (LocInfo.first.isInvalid())
519     return Loc;
520 
521   bool Invalid = false;
522   StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
523   if (Invalid)
524     return Loc;
525 
526   // Back up from the current location until we hit the beginning of a line
527   // (or the buffer). We'll relex from that point.
528   const char *StrData = Buffer.data() + LocInfo.second;
529   const char *LexStart = findBeginningOfLine(Buffer, LocInfo.second);
530   if (!LexStart || LexStart == StrData)
531     return Loc;
532 
533   // Create a lexer starting at the beginning of this token.
534   SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second);
535   Lexer TheLexer(LexerStartLoc, LangOpts, Buffer.data(), LexStart,
536                  Buffer.end());
537   TheLexer.SetCommentRetentionState(true);
538 
539   // Lex tokens until we find the token that contains the source location.
540   Token TheTok;
541   do {
542     TheLexer.LexFromRawLexer(TheTok);
543 
544     if (TheLexer.getBufferLocation() > StrData) {
545       // Lexing this token has taken the lexer past the source location we're
546       // looking for. If the current token encompasses our source location,
547       // return the beginning of that token.
548       if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData)
549         return TheTok.getLocation();
550 
551       // We ended up skipping over the source location entirely, which means
552       // that it points into whitespace. We're done here.
553       break;
554     }
555   } while (TheTok.getKind() != tok::eof);
556 
557   // We've passed our source location; just return the original source location.
558   return Loc;
559 }
560 
561 SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc,
562                                           const SourceManager &SM,
563                                           const LangOptions &LangOpts) {
564   if (Loc.isFileID())
565     return getBeginningOfFileToken(Loc, SM, LangOpts);
566 
567   if (!SM.isMacroArgExpansion(Loc))
568     return Loc;
569 
570   SourceLocation FileLoc = SM.getSpellingLoc(Loc);
571   SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts);
572   std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc);
573   std::pair<FileID, unsigned> BeginFileLocInfo =
574       SM.getDecomposedLoc(BeginFileLoc);
575   assert(FileLocInfo.first == BeginFileLocInfo.first &&
576          FileLocInfo.second >= BeginFileLocInfo.second);
577   return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second);
578 }
579 
580 namespace {
581 
582 enum PreambleDirectiveKind {
583   PDK_Skipped,
584   PDK_Unknown
585 };
586 
587 } // namespace
588 
589 PreambleBounds Lexer::ComputePreamble(StringRef Buffer,
590                                       const LangOptions &LangOpts,
591                                       unsigned MaxLines) {
592   // Create a lexer starting at the beginning of the file. Note that we use a
593   // "fake" file source location at offset 1 so that the lexer will track our
594   // position within the file.
595   const SourceLocation::UIntTy StartOffset = 1;
596   SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset);
597   Lexer TheLexer(FileLoc, LangOpts, Buffer.begin(), Buffer.begin(),
598                  Buffer.end());
599   TheLexer.SetCommentRetentionState(true);
600 
601   bool InPreprocessorDirective = false;
602   Token TheTok;
603   SourceLocation ActiveCommentLoc;
604 
605   unsigned MaxLineOffset = 0;
606   if (MaxLines) {
607     const char *CurPtr = Buffer.begin();
608     unsigned CurLine = 0;
609     while (CurPtr != Buffer.end()) {
610       char ch = *CurPtr++;
611       if (ch == '\n') {
612         ++CurLine;
613         if (CurLine == MaxLines)
614           break;
615       }
616     }
617     if (CurPtr != Buffer.end())
618       MaxLineOffset = CurPtr - Buffer.begin();
619   }
620 
621   do {
622     TheLexer.LexFromRawLexer(TheTok);
623 
624     if (InPreprocessorDirective) {
625       // If we've hit the end of the file, we're done.
626       if (TheTok.getKind() == tok::eof) {
627         break;
628       }
629 
630       // If we haven't hit the end of the preprocessor directive, skip this
631       // token.
632       if (!TheTok.isAtStartOfLine())
633         continue;
634 
635       // We've passed the end of the preprocessor directive, and will look
636       // at this token again below.
637       InPreprocessorDirective = false;
638     }
639 
640     // Keep track of the # of lines in the preamble.
641     if (TheTok.isAtStartOfLine()) {
642       unsigned TokOffset = TheTok.getLocation().getRawEncoding() - StartOffset;
643 
644       // If we were asked to limit the number of lines in the preamble,
645       // and we're about to exceed that limit, we're done.
646       if (MaxLineOffset && TokOffset >= MaxLineOffset)
647         break;
648     }
649 
650     // Comments are okay; skip over them.
651     if (TheTok.getKind() == tok::comment) {
652       if (ActiveCommentLoc.isInvalid())
653         ActiveCommentLoc = TheTok.getLocation();
654       continue;
655     }
656 
657     if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) {
658       // This is the start of a preprocessor directive.
659       Token HashTok = TheTok;
660       InPreprocessorDirective = true;
661       ActiveCommentLoc = SourceLocation();
662 
663       // Figure out which directive this is. Since we're lexing raw tokens,
664       // we don't have an identifier table available. Instead, just look at
665       // the raw identifier to recognize and categorize preprocessor directives.
666       TheLexer.LexFromRawLexer(TheTok);
667       if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) {
668         StringRef Keyword = TheTok.getRawIdentifier();
669         PreambleDirectiveKind PDK
670           = llvm::StringSwitch<PreambleDirectiveKind>(Keyword)
671               .Case("include", PDK_Skipped)
672               .Case("__include_macros", PDK_Skipped)
673               .Case("define", PDK_Skipped)
674               .Case("undef", PDK_Skipped)
675               .Case("line", PDK_Skipped)
676               .Case("error", PDK_Skipped)
677               .Case("pragma", PDK_Skipped)
678               .Case("import", PDK_Skipped)
679               .Case("include_next", PDK_Skipped)
680               .Case("warning", PDK_Skipped)
681               .Case("ident", PDK_Skipped)
682               .Case("sccs", PDK_Skipped)
683               .Case("assert", PDK_Skipped)
684               .Case("unassert", PDK_Skipped)
685               .Case("if", PDK_Skipped)
686               .Case("ifdef", PDK_Skipped)
687               .Case("ifndef", PDK_Skipped)
688               .Case("elif", PDK_Skipped)
689               .Case("elifdef", PDK_Skipped)
690               .Case("elifndef", PDK_Skipped)
691               .Case("else", PDK_Skipped)
692               .Case("endif", PDK_Skipped)
693               .Default(PDK_Unknown);
694 
695         switch (PDK) {
696         case PDK_Skipped:
697           continue;
698 
699         case PDK_Unknown:
700           // We don't know what this directive is; stop at the '#'.
701           break;
702         }
703       }
704 
705       // We only end up here if we didn't recognize the preprocessor
706       // directive or it was one that can't occur in the preamble at this
707       // point. Roll back the current token to the location of the '#'.
708       TheTok = HashTok;
709     }
710 
711     // We hit a token that we don't recognize as being in the
712     // "preprocessing only" part of the file, so we're no longer in
713     // the preamble.
714     break;
715   } while (true);
716 
717   SourceLocation End;
718   if (ActiveCommentLoc.isValid())
719     End = ActiveCommentLoc; // don't truncate a decl comment.
720   else
721     End = TheTok.getLocation();
722 
723   return PreambleBounds(End.getRawEncoding() - FileLoc.getRawEncoding(),
724                         TheTok.isAtStartOfLine());
725 }
726 
727 unsigned Lexer::getTokenPrefixLength(SourceLocation TokStart, unsigned CharNo,
728                                      const SourceManager &SM,
729                                      const LangOptions &LangOpts) {
730   // Figure out how many physical characters away the specified expansion
731   // character is.  This needs to take into consideration newlines and
732   // trigraphs.
733   bool Invalid = false;
734   const char *TokPtr = SM.getCharacterData(TokStart, &Invalid);
735 
736   // If they request the first char of the token, we're trivially done.
737   if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr)))
738     return 0;
739 
740   unsigned PhysOffset = 0;
741 
742   // The usual case is that tokens don't contain anything interesting.  Skip
743   // over the uninteresting characters.  If a token only consists of simple
744   // chars, this method is extremely fast.
745   while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
746     if (CharNo == 0)
747       return PhysOffset;
748     ++TokPtr;
749     --CharNo;
750     ++PhysOffset;
751   }
752 
753   // If we have a character that may be a trigraph or escaped newline, use a
754   // lexer to parse it correctly.
755   for (; CharNo; --CharNo) {
756     unsigned Size;
757     Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts);
758     TokPtr += Size;
759     PhysOffset += Size;
760   }
761 
762   // Final detail: if we end up on an escaped newline, we want to return the
763   // location of the actual byte of the token.  For example foo\<newline>bar
764   // advanced by 3 should return the location of b, not of \\.  One compounding
765   // detail of this is that the escape may be made by a trigraph.
766   if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
767     PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
768 
769   return PhysOffset;
770 }
771 
772 /// Computes the source location just past the end of the
773 /// token at this source location.
774 ///
775 /// This routine can be used to produce a source location that
776 /// points just past the end of the token referenced by \p Loc, and
777 /// is generally used when a diagnostic needs to point just after a
778 /// token where it expected something different that it received. If
779 /// the returned source location would not be meaningful (e.g., if
780 /// it points into a macro), this routine returns an invalid
781 /// source location.
782 ///
783 /// \param Offset an offset from the end of the token, where the source
784 /// location should refer to. The default offset (0) produces a source
785 /// location pointing just past the end of the token; an offset of 1 produces
786 /// a source location pointing to the last character in the token, etc.
787 SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
788                                           const SourceManager &SM,
789                                           const LangOptions &LangOpts) {
790   if (Loc.isInvalid())
791     return {};
792 
793   if (Loc.isMacroID()) {
794     if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
795       return {}; // Points inside the macro expansion.
796   }
797 
798   unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
799   if (Len > Offset)
800     Len = Len - Offset;
801   else
802     return Loc;
803 
804   return Loc.getLocWithOffset(Len);
805 }
806 
807 /// Returns true if the given MacroID location points at the first
808 /// token of the macro expansion.
809 bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc,
810                                       const SourceManager &SM,
811                                       const LangOptions &LangOpts,
812                                       SourceLocation *MacroBegin) {
813   assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
814 
815   SourceLocation expansionLoc;
816   if (!SM.isAtStartOfImmediateMacroExpansion(loc, &expansionLoc))
817     return false;
818 
819   if (expansionLoc.isFileID()) {
820     // No other macro expansions, this is the first.
821     if (MacroBegin)
822       *MacroBegin = expansionLoc;
823     return true;
824   }
825 
826   return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin);
827 }
828 
829 /// Returns true if the given MacroID location points at the last
830 /// token of the macro expansion.
831 bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc,
832                                     const SourceManager &SM,
833                                     const LangOptions &LangOpts,
834                                     SourceLocation *MacroEnd) {
835   assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
836 
837   SourceLocation spellLoc = SM.getSpellingLoc(loc);
838   unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts);
839   if (tokLen == 0)
840     return false;
841 
842   SourceLocation afterLoc = loc.getLocWithOffset(tokLen);
843   SourceLocation expansionLoc;
844   if (!SM.isAtEndOfImmediateMacroExpansion(afterLoc, &expansionLoc))
845     return false;
846 
847   if (expansionLoc.isFileID()) {
848     // No other macro expansions.
849     if (MacroEnd)
850       *MacroEnd = expansionLoc;
851     return true;
852   }
853 
854   return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd);
855 }
856 
857 static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range,
858                                              const SourceManager &SM,
859                                              const LangOptions &LangOpts) {
860   SourceLocation Begin = Range.getBegin();
861   SourceLocation End = Range.getEnd();
862   assert(Begin.isFileID() && End.isFileID());
863   if (Range.isTokenRange()) {
864     End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts);
865     if (End.isInvalid())
866       return {};
867   }
868 
869   // Break down the source locations.
870   FileID FID;
871   unsigned BeginOffs;
872   std::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
873   if (FID.isInvalid())
874     return {};
875 
876   unsigned EndOffs;
877   if (!SM.isInFileID(End, FID, &EndOffs) ||
878       BeginOffs > EndOffs)
879     return {};
880 
881   return CharSourceRange::getCharRange(Begin, End);
882 }
883 
884 // Assumes that `Loc` is in an expansion.
885 static bool isInExpansionTokenRange(const SourceLocation Loc,
886                                     const SourceManager &SM) {
887   return SM.getSLocEntry(SM.getFileID(Loc))
888       .getExpansion()
889       .isExpansionTokenRange();
890 }
891 
892 CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range,
893                                          const SourceManager &SM,
894                                          const LangOptions &LangOpts) {
895   SourceLocation Begin = Range.getBegin();
896   SourceLocation End = Range.getEnd();
897   if (Begin.isInvalid() || End.isInvalid())
898     return {};
899 
900   if (Begin.isFileID() && End.isFileID())
901     return makeRangeFromFileLocs(Range, SM, LangOpts);
902 
903   if (Begin.isMacroID() && End.isFileID()) {
904     if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin))
905       return {};
906     Range.setBegin(Begin);
907     return makeRangeFromFileLocs(Range, SM, LangOpts);
908   }
909 
910   if (Begin.isFileID() && End.isMacroID()) {
911     if (Range.isTokenRange()) {
912       if (!isAtEndOfMacroExpansion(End, SM, LangOpts, &End))
913         return {};
914       // Use the *original* end, not the expanded one in `End`.
915       Range.setTokenRange(isInExpansionTokenRange(Range.getEnd(), SM));
916     } else if (!isAtStartOfMacroExpansion(End, SM, LangOpts, &End))
917       return {};
918     Range.setEnd(End);
919     return makeRangeFromFileLocs(Range, SM, LangOpts);
920   }
921 
922   assert(Begin.isMacroID() && End.isMacroID());
923   SourceLocation MacroBegin, MacroEnd;
924   if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) &&
925       ((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts,
926                                                         &MacroEnd)) ||
927        (Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts,
928                                                          &MacroEnd)))) {
929     Range.setBegin(MacroBegin);
930     Range.setEnd(MacroEnd);
931     // Use the *original* `End`, not the expanded one in `MacroEnd`.
932     if (Range.isTokenRange())
933       Range.setTokenRange(isInExpansionTokenRange(End, SM));
934     return makeRangeFromFileLocs(Range, SM, LangOpts);
935   }
936 
937   bool Invalid = false;
938   const SrcMgr::SLocEntry &BeginEntry = SM.getSLocEntry(SM.getFileID(Begin),
939                                                         &Invalid);
940   if (Invalid)
941     return {};
942 
943   if (BeginEntry.getExpansion().isMacroArgExpansion()) {
944     const SrcMgr::SLocEntry &EndEntry = SM.getSLocEntry(SM.getFileID(End),
945                                                         &Invalid);
946     if (Invalid)
947       return {};
948 
949     if (EndEntry.getExpansion().isMacroArgExpansion() &&
950         BeginEntry.getExpansion().getExpansionLocStart() ==
951             EndEntry.getExpansion().getExpansionLocStart()) {
952       Range.setBegin(SM.getImmediateSpellingLoc(Begin));
953       Range.setEnd(SM.getImmediateSpellingLoc(End));
954       return makeFileCharRange(Range, SM, LangOpts);
955     }
956   }
957 
958   return {};
959 }
960 
961 StringRef Lexer::getSourceText(CharSourceRange Range,
962                                const SourceManager &SM,
963                                const LangOptions &LangOpts,
964                                bool *Invalid) {
965   Range = makeFileCharRange(Range, SM, LangOpts);
966   if (Range.isInvalid()) {
967     if (Invalid) *Invalid = true;
968     return {};
969   }
970 
971   // Break down the source location.
972   std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin());
973   if (beginInfo.first.isInvalid()) {
974     if (Invalid) *Invalid = true;
975     return {};
976   }
977 
978   unsigned EndOffs;
979   if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) ||
980       beginInfo.second > EndOffs) {
981     if (Invalid) *Invalid = true;
982     return {};
983   }
984 
985   // Try to the load the file buffer.
986   bool invalidTemp = false;
987   StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp);
988   if (invalidTemp) {
989     if (Invalid) *Invalid = true;
990     return {};
991   }
992 
993   if (Invalid) *Invalid = false;
994   return file.substr(beginInfo.second, EndOffs - beginInfo.second);
995 }
996 
997 StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
998                                        const SourceManager &SM,
999                                        const LangOptions &LangOpts) {
1000   assert(Loc.isMacroID() && "Only reasonable to call this on macros");
1001 
1002   // Find the location of the immediate macro expansion.
1003   while (true) {
1004     FileID FID = SM.getFileID(Loc);
1005     const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID);
1006     const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
1007     Loc = Expansion.getExpansionLocStart();
1008     if (!Expansion.isMacroArgExpansion())
1009       break;
1010 
1011     // For macro arguments we need to check that the argument did not come
1012     // from an inner macro, e.g: "MAC1( MAC2(foo) )"
1013 
1014     // Loc points to the argument id of the macro definition, move to the
1015     // macro expansion.
1016     Loc = SM.getImmediateExpansionRange(Loc).getBegin();
1017     SourceLocation SpellLoc = Expansion.getSpellingLoc();
1018     if (SpellLoc.isFileID())
1019       break; // No inner macro.
1020 
1021     // If spelling location resides in the same FileID as macro expansion
1022     // location, it means there is no inner macro.
1023     FileID MacroFID = SM.getFileID(Loc);
1024     if (SM.isInFileID(SpellLoc, MacroFID))
1025       break;
1026 
1027     // Argument came from inner macro.
1028     Loc = SpellLoc;
1029   }
1030 
1031   // Find the spelling location of the start of the non-argument expansion
1032   // range. This is where the macro name was spelled in order to begin
1033   // expanding this macro.
1034   Loc = SM.getSpellingLoc(Loc);
1035 
1036   // Dig out the buffer where the macro name was spelled and the extents of the
1037   // name so that we can render it into the expansion note.
1038   std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
1039   unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
1040   StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
1041   return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
1042 }
1043 
1044 StringRef Lexer::getImmediateMacroNameForDiagnostics(
1045     SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) {
1046   assert(Loc.isMacroID() && "Only reasonable to call this on macros");
1047   // Walk past macro argument expansions.
1048   while (SM.isMacroArgExpansion(Loc))
1049     Loc = SM.getImmediateExpansionRange(Loc).getBegin();
1050 
1051   // If the macro's spelling has no FileID, then it's actually a token paste
1052   // or stringization (or similar) and not a macro at all.
1053   if (!SM.getFileEntryForID(SM.getFileID(SM.getSpellingLoc(Loc))))
1054     return {};
1055 
1056   // Find the spelling location of the start of the non-argument expansion
1057   // range. This is where the macro name was spelled in order to begin
1058   // expanding this macro.
1059   Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).getBegin());
1060 
1061   // Dig out the buffer where the macro name was spelled and the extents of the
1062   // name so that we can render it into the expansion note.
1063   std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
1064   unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
1065   StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
1066   return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
1067 }
1068 
1069 bool Lexer::isAsciiIdentifierContinueChar(char c, const LangOptions &LangOpts) {
1070   return isAsciiIdentifierContinue(c, LangOpts.DollarIdents);
1071 }
1072 
1073 bool Lexer::isNewLineEscaped(const char *BufferStart, const char *Str) {
1074   assert(isVerticalWhitespace(Str[0]));
1075   if (Str - 1 < BufferStart)
1076     return false;
1077 
1078   if ((Str[0] == '\n' && Str[-1] == '\r') ||
1079       (Str[0] == '\r' && Str[-1] == '\n')) {
1080     if (Str - 2 < BufferStart)
1081       return false;
1082     --Str;
1083   }
1084   --Str;
1085 
1086   // Rewind to first non-space character:
1087   while (Str > BufferStart && isHorizontalWhitespace(*Str))
1088     --Str;
1089 
1090   return *Str == '\\';
1091 }
1092 
1093 StringRef Lexer::getIndentationForLine(SourceLocation Loc,
1094                                        const SourceManager &SM) {
1095   if (Loc.isInvalid() || Loc.isMacroID())
1096     return {};
1097   std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
1098   if (LocInfo.first.isInvalid())
1099     return {};
1100   bool Invalid = false;
1101   StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
1102   if (Invalid)
1103     return {};
1104   const char *Line = findBeginningOfLine(Buffer, LocInfo.second);
1105   if (!Line)
1106     return {};
1107   StringRef Rest = Buffer.substr(Line - Buffer.data());
1108   size_t NumWhitespaceChars = Rest.find_first_not_of(" \t");
1109   return NumWhitespaceChars == StringRef::npos
1110              ? ""
1111              : Rest.take_front(NumWhitespaceChars);
1112 }
1113 
1114 //===----------------------------------------------------------------------===//
1115 // Diagnostics forwarding code.
1116 //===----------------------------------------------------------------------===//
1117 
1118 /// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the
1119 /// lexer buffer was all expanded at a single point, perform the mapping.
1120 /// This is currently only used for _Pragma implementation, so it is the slow
1121 /// path of the hot getSourceLocation method.  Do not allow it to be inlined.
1122 static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc(
1123     Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen);
1124 static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
1125                                         SourceLocation FileLoc,
1126                                         unsigned CharNo, unsigned TokLen) {
1127   assert(FileLoc.isMacroID() && "Must be a macro expansion");
1128 
1129   // Otherwise, we're lexing "mapped tokens".  This is used for things like
1130   // _Pragma handling.  Combine the expansion location of FileLoc with the
1131   // spelling location.
1132   SourceManager &SM = PP.getSourceManager();
1133 
1134   // Create a new SLoc which is expanded from Expansion(FileLoc) but whose
1135   // characters come from spelling(FileLoc)+Offset.
1136   SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc);
1137   SpellingLoc = SpellingLoc.getLocWithOffset(CharNo);
1138 
1139   // Figure out the expansion loc range, which is the range covered by the
1140   // original _Pragma(...) sequence.
1141   CharSourceRange II = SM.getImmediateExpansionRange(FileLoc);
1142 
1143   return SM.createExpansionLoc(SpellingLoc, II.getBegin(), II.getEnd(), TokLen);
1144 }
1145 
1146 /// getSourceLocation - Return a source location identifier for the specified
1147 /// offset in the current file.
1148 SourceLocation Lexer::getSourceLocation(const char *Loc,
1149                                         unsigned TokLen) const {
1150   assert(Loc >= BufferStart && Loc <= BufferEnd &&
1151          "Location out of range for this buffer!");
1152 
1153   // In the normal case, we're just lexing from a simple file buffer, return
1154   // the file id from FileLoc with the offset specified.
1155   unsigned CharNo = Loc-BufferStart;
1156   if (FileLoc.isFileID())
1157     return FileLoc.getLocWithOffset(CharNo);
1158 
1159   // Otherwise, this is the _Pragma lexer case, which pretends that all of the
1160   // tokens are lexed from where the _Pragma was defined.
1161   assert(PP && "This doesn't work on raw lexers");
1162   return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen);
1163 }
1164 
1165 /// Diag - Forwarding function for diagnostics.  This translate a source
1166 /// position in the current buffer into a SourceLocation object for rendering.
1167 DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const {
1168   return PP->Diag(getSourceLocation(Loc), DiagID);
1169 }
1170 
1171 //===----------------------------------------------------------------------===//
1172 // Trigraph and Escaped Newline Handling Code.
1173 //===----------------------------------------------------------------------===//
1174 
1175 /// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
1176 /// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
1177 static char GetTrigraphCharForLetter(char Letter) {
1178   switch (Letter) {
1179   default:   return 0;
1180   case '=':  return '#';
1181   case ')':  return ']';
1182   case '(':  return '[';
1183   case '!':  return '|';
1184   case '\'': return '^';
1185   case '>':  return '}';
1186   case '/':  return '\\';
1187   case '<':  return '{';
1188   case '-':  return '~';
1189   }
1190 }
1191 
1192 /// DecodeTrigraphChar - If the specified character is a legal trigraph when
1193 /// prefixed with ??, emit a trigraph warning.  If trigraphs are enabled,
1194 /// return the result character.  Finally, emit a warning about trigraph use
1195 /// whether trigraphs are enabled or not.
1196 static char DecodeTrigraphChar(const char *CP, Lexer *L, bool Trigraphs) {
1197   char Res = GetTrigraphCharForLetter(*CP);
1198   if (!Res || !L) return Res;
1199 
1200   if (!Trigraphs) {
1201     if (!L->isLexingRawMode())
1202       L->Diag(CP-2, diag::trigraph_ignored);
1203     return 0;
1204   }
1205 
1206   if (!L->isLexingRawMode())
1207     L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1);
1208   return Res;
1209 }
1210 
1211 /// getEscapedNewLineSize - Return the size of the specified escaped newline,
1212 /// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a
1213 /// trigraph equivalent on entry to this function.
1214 unsigned Lexer::getEscapedNewLineSize(const char *Ptr) {
1215   unsigned Size = 0;
1216   while (isWhitespace(Ptr[Size])) {
1217     ++Size;
1218 
1219     if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r')
1220       continue;
1221 
1222     // If this is a \r\n or \n\r, skip the other half.
1223     if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') &&
1224         Ptr[Size-1] != Ptr[Size])
1225       ++Size;
1226 
1227     return Size;
1228   }
1229 
1230   // Not an escaped newline, must be a \t or something else.
1231   return 0;
1232 }
1233 
1234 /// SkipEscapedNewLines - If P points to an escaped newline (or a series of
1235 /// them), skip over them and return the first non-escaped-newline found,
1236 /// otherwise return P.
1237 const char *Lexer::SkipEscapedNewLines(const char *P) {
1238   while (true) {
1239     const char *AfterEscape;
1240     if (*P == '\\') {
1241       AfterEscape = P+1;
1242     } else if (*P == '?') {
1243       // If not a trigraph for escape, bail out.
1244       if (P[1] != '?' || P[2] != '/')
1245         return P;
1246       // FIXME: Take LangOpts into account; the language might not
1247       // support trigraphs.
1248       AfterEscape = P+3;
1249     } else {
1250       return P;
1251     }
1252 
1253     unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape);
1254     if (NewLineSize == 0) return P;
1255     P = AfterEscape+NewLineSize;
1256   }
1257 }
1258 
1259 Optional<Token> Lexer::findNextToken(SourceLocation Loc,
1260                                      const SourceManager &SM,
1261                                      const LangOptions &LangOpts) {
1262   if (Loc.isMacroID()) {
1263     if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
1264       return None;
1265   }
1266   Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts);
1267 
1268   // Break down the source location.
1269   std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
1270 
1271   // Try to load the file buffer.
1272   bool InvalidTemp = false;
1273   StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp);
1274   if (InvalidTemp)
1275     return None;
1276 
1277   const char *TokenBegin = File.data() + LocInfo.second;
1278 
1279   // Lex from the start of the given location.
1280   Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(),
1281                                       TokenBegin, File.end());
1282   // Find the token.
1283   Token Tok;
1284   lexer.LexFromRawLexer(Tok);
1285   return Tok;
1286 }
1287 
1288 /// Checks that the given token is the first token that occurs after the
1289 /// given location (this excludes comments and whitespace). Returns the location
1290 /// immediately after the specified token. If the token is not found or the
1291 /// location is inside a macro, the returned source location will be invalid.
1292 SourceLocation Lexer::findLocationAfterToken(
1293     SourceLocation Loc, tok::TokenKind TKind, const SourceManager &SM,
1294     const LangOptions &LangOpts, bool SkipTrailingWhitespaceAndNewLine) {
1295   Optional<Token> Tok = findNextToken(Loc, SM, LangOpts);
1296   if (!Tok || Tok->isNot(TKind))
1297     return {};
1298   SourceLocation TokenLoc = Tok->getLocation();
1299 
1300   // Calculate how much whitespace needs to be skipped if any.
1301   unsigned NumWhitespaceChars = 0;
1302   if (SkipTrailingWhitespaceAndNewLine) {
1303     const char *TokenEnd = SM.getCharacterData(TokenLoc) + Tok->getLength();
1304     unsigned char C = *TokenEnd;
1305     while (isHorizontalWhitespace(C)) {
1306       C = *(++TokenEnd);
1307       NumWhitespaceChars++;
1308     }
1309 
1310     // Skip \r, \n, \r\n, or \n\r
1311     if (C == '\n' || C == '\r') {
1312       char PrevC = C;
1313       C = *(++TokenEnd);
1314       NumWhitespaceChars++;
1315       if ((C == '\n' || C == '\r') && C != PrevC)
1316         NumWhitespaceChars++;
1317     }
1318   }
1319 
1320   return TokenLoc.getLocWithOffset(Tok->getLength() + NumWhitespaceChars);
1321 }
1322 
1323 /// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
1324 /// get its size, and return it.  This is tricky in several cases:
1325 ///   1. If currently at the start of a trigraph, we warn about the trigraph,
1326 ///      then either return the trigraph (skipping 3 chars) or the '?',
1327 ///      depending on whether trigraphs are enabled or not.
1328 ///   2. If this is an escaped newline (potentially with whitespace between
1329 ///      the backslash and newline), implicitly skip the newline and return
1330 ///      the char after it.
1331 ///
1332 /// This handles the slow/uncommon case of the getCharAndSize method.  Here we
1333 /// know that we can accumulate into Size, and that we have already incremented
1334 /// Ptr by Size bytes.
1335 ///
1336 /// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should
1337 /// be updated to match.
1338 char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
1339                                Token *Tok) {
1340   // If we have a slash, look for an escaped newline.
1341   if (Ptr[0] == '\\') {
1342     ++Size;
1343     ++Ptr;
1344 Slash:
1345     // Common case, backslash-char where the char is not whitespace.
1346     if (!isWhitespace(Ptr[0])) return '\\';
1347 
1348     // See if we have optional whitespace characters between the slash and
1349     // newline.
1350     if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
1351       // Remember that this token needs to be cleaned.
1352       if (Tok) Tok->setFlag(Token::NeedsCleaning);
1353 
1354       // Warn if there was whitespace between the backslash and newline.
1355       if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode())
1356         Diag(Ptr, diag::backslash_newline_space);
1357 
1358       // Found backslash<whitespace><newline>.  Parse the char after it.
1359       Size += EscapedNewLineSize;
1360       Ptr  += EscapedNewLineSize;
1361 
1362       // Use slow version to accumulate a correct size field.
1363       return getCharAndSizeSlow(Ptr, Size, Tok);
1364     }
1365 
1366     // Otherwise, this is not an escaped newline, just return the slash.
1367     return '\\';
1368   }
1369 
1370   // If this is a trigraph, process it.
1371   if (Ptr[0] == '?' && Ptr[1] == '?') {
1372     // If this is actually a legal trigraph (not something like "??x"), emit
1373     // a trigraph warning.  If so, and if trigraphs are enabled, return it.
1374     if (char C = DecodeTrigraphChar(Ptr + 2, Tok ? this : nullptr,
1375                                     LangOpts.Trigraphs)) {
1376       // Remember that this token needs to be cleaned.
1377       if (Tok) Tok->setFlag(Token::NeedsCleaning);
1378 
1379       Ptr += 3;
1380       Size += 3;
1381       if (C == '\\') goto Slash;
1382       return C;
1383     }
1384   }
1385 
1386   // If this is neither, return a single character.
1387   ++Size;
1388   return *Ptr;
1389 }
1390 
1391 /// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
1392 /// getCharAndSizeNoWarn method.  Here we know that we can accumulate into Size,
1393 /// and that we have already incremented Ptr by Size bytes.
1394 ///
1395 /// NOTE: When this method is updated, getCharAndSizeSlow (above) should
1396 /// be updated to match.
1397 char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
1398                                      const LangOptions &LangOpts) {
1399   // If we have a slash, look for an escaped newline.
1400   if (Ptr[0] == '\\') {
1401     ++Size;
1402     ++Ptr;
1403 Slash:
1404     // Common case, backslash-char where the char is not whitespace.
1405     if (!isWhitespace(Ptr[0])) return '\\';
1406 
1407     // See if we have optional whitespace characters followed by a newline.
1408     if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
1409       // Found backslash<whitespace><newline>.  Parse the char after it.
1410       Size += EscapedNewLineSize;
1411       Ptr  += EscapedNewLineSize;
1412 
1413       // Use slow version to accumulate a correct size field.
1414       return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
1415     }
1416 
1417     // Otherwise, this is not an escaped newline, just return the slash.
1418     return '\\';
1419   }
1420 
1421   // If this is a trigraph, process it.
1422   if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
1423     // If this is actually a legal trigraph (not something like "??x"), return
1424     // it.
1425     if (char C = GetTrigraphCharForLetter(Ptr[2])) {
1426       Ptr += 3;
1427       Size += 3;
1428       if (C == '\\') goto Slash;
1429       return C;
1430     }
1431   }
1432 
1433   // If this is neither, return a single character.
1434   ++Size;
1435   return *Ptr;
1436 }
1437 
1438 //===----------------------------------------------------------------------===//
1439 // Helper methods for lexing.
1440 //===----------------------------------------------------------------------===//
1441 
1442 /// Routine that indiscriminately sets the offset into the source file.
1443 void Lexer::SetByteOffset(unsigned Offset, bool StartOfLine) {
1444   BufferPtr = BufferStart + Offset;
1445   if (BufferPtr > BufferEnd)
1446     BufferPtr = BufferEnd;
1447   // FIXME: What exactly does the StartOfLine bit mean?  There are two
1448   // possible meanings for the "start" of the line: the first token on the
1449   // unexpanded line, or the first token on the expanded line.
1450   IsAtStartOfLine = StartOfLine;
1451   IsAtPhysicalStartOfLine = StartOfLine;
1452 }
1453 
1454 static bool isUnicodeWhitespace(uint32_t Codepoint) {
1455   static const llvm::sys::UnicodeCharSet UnicodeWhitespaceChars(
1456       UnicodeWhitespaceCharRanges);
1457   return UnicodeWhitespaceChars.contains(Codepoint);
1458 }
1459 
1460 static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts) {
1461   if (LangOpts.AsmPreprocessor) {
1462     return false;
1463   } else if (LangOpts.DollarIdents && '$' == C) {
1464     return true;
1465   } else if (LangOpts.CPlusPlus) {
1466     // A non-leading codepoint must have the XID_Continue property.
1467     // XIDContinueRanges doesn't contains characters also in XIDStartRanges,
1468     // so we need to check both tables.
1469     // '_' doesn't have the XID_Continue property but is allowed in C++.
1470     static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges);
1471     static const llvm::sys::UnicodeCharSet XIDContinueChars(XIDContinueRanges);
1472     return C == '_' || XIDStartChars.contains(C) ||
1473            XIDContinueChars.contains(C);
1474   } else if (LangOpts.C11) {
1475     static const llvm::sys::UnicodeCharSet C11AllowedIDChars(
1476         C11AllowedIDCharRanges);
1477     return C11AllowedIDChars.contains(C);
1478   } else {
1479     static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
1480         C99AllowedIDCharRanges);
1481     return C99AllowedIDChars.contains(C);
1482   }
1483 }
1484 
1485 static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts) {
1486   if (LangOpts.AsmPreprocessor) {
1487     return false;
1488   }
1489   if (LangOpts.CPlusPlus) {
1490     static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges);
1491     // '_' doesn't have the XID_Start property but is allowed in C++.
1492     return C == '_' || XIDStartChars.contains(C);
1493   }
1494   if (!isAllowedIDChar(C, LangOpts))
1495     return false;
1496   if (LangOpts.C11) {
1497     static const llvm::sys::UnicodeCharSet C11DisallowedInitialIDChars(
1498         C11DisallowedInitialIDCharRanges);
1499     return !C11DisallowedInitialIDChars.contains(C);
1500   }
1501   static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
1502       C99DisallowedInitialIDCharRanges);
1503   return !C99DisallowedInitialIDChars.contains(C);
1504 }
1505 
1506 static inline CharSourceRange makeCharRange(Lexer &L, const char *Begin,
1507                                             const char *End) {
1508   return CharSourceRange::getCharRange(L.getSourceLocation(Begin),
1509                                        L.getSourceLocation(End));
1510 }
1511 
1512 static void maybeDiagnoseIDCharCompat(DiagnosticsEngine &Diags, uint32_t C,
1513                                       CharSourceRange Range, bool IsFirst) {
1514   // Check C99 compatibility.
1515   if (!Diags.isIgnored(diag::warn_c99_compat_unicode_id, Range.getBegin())) {
1516     enum {
1517       CannotAppearInIdentifier = 0,
1518       CannotStartIdentifier
1519     };
1520 
1521     static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
1522         C99AllowedIDCharRanges);
1523     static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
1524         C99DisallowedInitialIDCharRanges);
1525     if (!C99AllowedIDChars.contains(C)) {
1526       Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
1527         << Range
1528         << CannotAppearInIdentifier;
1529     } else if (IsFirst && C99DisallowedInitialIDChars.contains(C)) {
1530       Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
1531         << Range
1532         << CannotStartIdentifier;
1533     }
1534   }
1535 }
1536 
1537 /// After encountering UTF-8 character C and interpreting it as an identifier
1538 /// character, check whether it's a homoglyph for a common non-identifier
1539 /// source character that is unlikely to be an intentional identifier
1540 /// character and warn if so.
1541 static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C,
1542                                        CharSourceRange Range) {
1543   // FIXME: Handle Unicode quotation marks (smart quotes, fullwidth quotes).
1544   struct HomoglyphPair {
1545     uint32_t Character;
1546     char LooksLike;
1547     bool operator<(HomoglyphPair R) const { return Character < R.Character; }
1548   };
1549   static constexpr HomoglyphPair SortedHomoglyphs[] = {
1550     {U'\u00ad', 0},   // SOFT HYPHEN
1551     {U'\u01c3', '!'}, // LATIN LETTER RETROFLEX CLICK
1552     {U'\u037e', ';'}, // GREEK QUESTION MARK
1553     {U'\u200b', 0},   // ZERO WIDTH SPACE
1554     {U'\u200c', 0},   // ZERO WIDTH NON-JOINER
1555     {U'\u200d', 0},   // ZERO WIDTH JOINER
1556     {U'\u2060', 0},   // WORD JOINER
1557     {U'\u2061', 0},   // FUNCTION APPLICATION
1558     {U'\u2062', 0},   // INVISIBLE TIMES
1559     {U'\u2063', 0},   // INVISIBLE SEPARATOR
1560     {U'\u2064', 0},   // INVISIBLE PLUS
1561     {U'\u2212', '-'}, // MINUS SIGN
1562     {U'\u2215', '/'}, // DIVISION SLASH
1563     {U'\u2216', '\\'}, // SET MINUS
1564     {U'\u2217', '*'}, // ASTERISK OPERATOR
1565     {U'\u2223', '|'}, // DIVIDES
1566     {U'\u2227', '^'}, // LOGICAL AND
1567     {U'\u2236', ':'}, // RATIO
1568     {U'\u223c', '~'}, // TILDE OPERATOR
1569     {U'\ua789', ':'}, // MODIFIER LETTER COLON
1570     {U'\ufeff', 0},   // ZERO WIDTH NO-BREAK SPACE
1571     {U'\uff01', '!'}, // FULLWIDTH EXCLAMATION MARK
1572     {U'\uff03', '#'}, // FULLWIDTH NUMBER SIGN
1573     {U'\uff04', '$'}, // FULLWIDTH DOLLAR SIGN
1574     {U'\uff05', '%'}, // FULLWIDTH PERCENT SIGN
1575     {U'\uff06', '&'}, // FULLWIDTH AMPERSAND
1576     {U'\uff08', '('}, // FULLWIDTH LEFT PARENTHESIS
1577     {U'\uff09', ')'}, // FULLWIDTH RIGHT PARENTHESIS
1578     {U'\uff0a', '*'}, // FULLWIDTH ASTERISK
1579     {U'\uff0b', '+'}, // FULLWIDTH ASTERISK
1580     {U'\uff0c', ','}, // FULLWIDTH COMMA
1581     {U'\uff0d', '-'}, // FULLWIDTH HYPHEN-MINUS
1582     {U'\uff0e', '.'}, // FULLWIDTH FULL STOP
1583     {U'\uff0f', '/'}, // FULLWIDTH SOLIDUS
1584     {U'\uff1a', ':'}, // FULLWIDTH COLON
1585     {U'\uff1b', ';'}, // FULLWIDTH SEMICOLON
1586     {U'\uff1c', '<'}, // FULLWIDTH LESS-THAN SIGN
1587     {U'\uff1d', '='}, // FULLWIDTH EQUALS SIGN
1588     {U'\uff1e', '>'}, // FULLWIDTH GREATER-THAN SIGN
1589     {U'\uff1f', '?'}, // FULLWIDTH QUESTION MARK
1590     {U'\uff20', '@'}, // FULLWIDTH COMMERCIAL AT
1591     {U'\uff3b', '['}, // FULLWIDTH LEFT SQUARE BRACKET
1592     {U'\uff3c', '\\'}, // FULLWIDTH REVERSE SOLIDUS
1593     {U'\uff3d', ']'}, // FULLWIDTH RIGHT SQUARE BRACKET
1594     {U'\uff3e', '^'}, // FULLWIDTH CIRCUMFLEX ACCENT
1595     {U'\uff5b', '{'}, // FULLWIDTH LEFT CURLY BRACKET
1596     {U'\uff5c', '|'}, // FULLWIDTH VERTICAL LINE
1597     {U'\uff5d', '}'}, // FULLWIDTH RIGHT CURLY BRACKET
1598     {U'\uff5e', '~'}, // FULLWIDTH TILDE
1599     {0, 0}
1600   };
1601   auto Homoglyph =
1602       std::lower_bound(std::begin(SortedHomoglyphs),
1603                        std::end(SortedHomoglyphs) - 1, HomoglyphPair{C, '\0'});
1604   if (Homoglyph->Character == C) {
1605     llvm::SmallString<5> CharBuf;
1606     {
1607       llvm::raw_svector_ostream CharOS(CharBuf);
1608       llvm::write_hex(CharOS, C, llvm::HexPrintStyle::Upper, 4);
1609     }
1610     if (Homoglyph->LooksLike) {
1611       const char LooksLikeStr[] = {Homoglyph->LooksLike, 0};
1612       Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_homoglyph)
1613           << Range << CharBuf << LooksLikeStr;
1614     } else {
1615       Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_zero_width)
1616           << Range << CharBuf;
1617     }
1618   }
1619 }
1620 
1621 static void diagnoseInvalidUnicodeCodepointInIdentifier(
1622     DiagnosticsEngine &Diags, const LangOptions &LangOpts, uint32_t CodePoint,
1623     CharSourceRange Range, bool IsFirst) {
1624   if (isASCII(CodePoint))
1625     return;
1626 
1627   bool IsIDStart = isAllowedInitiallyIDChar(CodePoint, LangOpts);
1628   bool IsIDContinue = IsIDStart || isAllowedIDChar(CodePoint, LangOpts);
1629 
1630   if ((IsFirst && IsIDStart) || (!IsFirst && IsIDContinue))
1631     return;
1632 
1633   bool InvalidOnlyAtStart = IsFirst && !IsIDStart && IsIDContinue;
1634 
1635   llvm::SmallString<5> CharBuf;
1636   llvm::raw_svector_ostream CharOS(CharBuf);
1637   llvm::write_hex(CharOS, CodePoint, llvm::HexPrintStyle::Upper, 4);
1638 
1639   if (!IsFirst || InvalidOnlyAtStart) {
1640     Diags.Report(Range.getBegin(), diag::err_character_not_allowed_identifier)
1641         << Range << CharBuf << int(InvalidOnlyAtStart)
1642         << FixItHint::CreateRemoval(Range);
1643   } else {
1644     Diags.Report(Range.getBegin(), diag::err_character_not_allowed)
1645         << Range << CharBuf << FixItHint::CreateRemoval(Range);
1646   }
1647 }
1648 
1649 bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size,
1650                                     Token &Result) {
1651   const char *UCNPtr = CurPtr + Size;
1652   uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, /*Token=*/nullptr);
1653   if (CodePoint == 0) {
1654     return false;
1655   }
1656 
1657   if (!isAllowedIDChar(CodePoint, LangOpts)) {
1658     if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint))
1659       return false;
1660     if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
1661         !PP->isPreprocessedOutput())
1662       diagnoseInvalidUnicodeCodepointInIdentifier(
1663           PP->getDiagnostics(), LangOpts, CodePoint,
1664           makeCharRange(*this, CurPtr, UCNPtr),
1665           /*IsFirst=*/false);
1666 
1667     // We got a unicode codepoint that is neither a space nor a
1668     // a valid identifier part.
1669     // Carry on as if the codepoint was valid for recovery purposes.
1670   } else if (!isLexingRawMode())
1671     maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
1672                               makeCharRange(*this, CurPtr, UCNPtr),
1673                               /*IsFirst=*/false);
1674 
1675   Result.setFlag(Token::HasUCN);
1676   if ((UCNPtr - CurPtr ==  6 && CurPtr[1] == 'u') ||
1677       (UCNPtr - CurPtr == 10 && CurPtr[1] == 'U'))
1678     CurPtr = UCNPtr;
1679   else
1680     while (CurPtr != UCNPtr)
1681       (void)getAndAdvanceChar(CurPtr, Result);
1682   return true;
1683 }
1684 
1685 bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr) {
1686   const char *UnicodePtr = CurPtr;
1687   llvm::UTF32 CodePoint;
1688   llvm::ConversionResult Result =
1689       llvm::convertUTF8Sequence((const llvm::UTF8 **)&UnicodePtr,
1690                                 (const llvm::UTF8 *)BufferEnd,
1691                                 &CodePoint,
1692                                 llvm::strictConversion);
1693   if (Result != llvm::conversionOK)
1694     return false;
1695 
1696   if (!isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts)) {
1697     if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint))
1698       return false;
1699 
1700     if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
1701         !PP->isPreprocessedOutput())
1702       diagnoseInvalidUnicodeCodepointInIdentifier(
1703           PP->getDiagnostics(), LangOpts, CodePoint,
1704           makeCharRange(*this, CurPtr, UnicodePtr), /*IsFirst=*/false);
1705     // We got a unicode codepoint that is neither a space nor a
1706     // a valid identifier part. Carry on as if the codepoint was
1707     // valid for recovery purposes.
1708   } else if (!isLexingRawMode()) {
1709     maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
1710                               makeCharRange(*this, CurPtr, UnicodePtr),
1711                               /*IsFirst=*/false);
1712     maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), CodePoint,
1713                                makeCharRange(*this, CurPtr, UnicodePtr));
1714   }
1715 
1716   CurPtr = UnicodePtr;
1717   return true;
1718 }
1719 
1720 bool Lexer::LexUnicodeIdentifierStart(Token &Result, uint32_t C,
1721                                       const char *CurPtr) {
1722   if (isAllowedInitiallyIDChar(C, LangOpts)) {
1723     if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
1724         !PP->isPreprocessedOutput()) {
1725       maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C,
1726                                 makeCharRange(*this, BufferPtr, CurPtr),
1727                                 /*IsFirst=*/true);
1728       maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), C,
1729                                  makeCharRange(*this, BufferPtr, CurPtr));
1730     }
1731 
1732     MIOpt.ReadToken();
1733     return LexIdentifierContinue(Result, CurPtr);
1734   }
1735 
1736   if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
1737       !PP->isPreprocessedOutput() && !isASCII(*BufferPtr) &&
1738       !isAllowedInitiallyIDChar(C, LangOpts) && !isUnicodeWhitespace(C)) {
1739     // Non-ASCII characters tend to creep into source code unintentionally.
1740     // Instead of letting the parser complain about the unknown token,
1741     // just drop the character.
1742     // Note that we can /only/ do this when the non-ASCII character is actually
1743     // spelled as Unicode, not written as a UCN. The standard requires that
1744     // we not throw away any possible preprocessor tokens, but there's a
1745     // loophole in the mapping of Unicode characters to basic character set
1746     // characters that allows us to map these particular characters to, say,
1747     // whitespace.
1748     diagnoseInvalidUnicodeCodepointInIdentifier(
1749         PP->getDiagnostics(), LangOpts, C,
1750         makeCharRange(*this, BufferPtr, CurPtr), /*IsStart*/ true);
1751     BufferPtr = CurPtr;
1752     return false;
1753   }
1754 
1755   // Otherwise, we have an explicit UCN or a character that's unlikely to show
1756   // up by accident.
1757   MIOpt.ReadToken();
1758   FormTokenWithChars(Result, CurPtr, tok::unknown);
1759   return true;
1760 }
1761 
1762 bool Lexer::LexIdentifierContinue(Token &Result, const char *CurPtr) {
1763   // Match [_A-Za-z0-9]*, we have already matched an identifier start.
1764   while (true) {
1765     unsigned char C = *CurPtr;
1766     // Fast path.
1767     if (isAsciiIdentifierContinue(C)) {
1768       ++CurPtr;
1769       continue;
1770     }
1771 
1772     unsigned Size;
1773     // Slow path: handle trigraph, unicode codepoints, UCNs.
1774     C = getCharAndSize(CurPtr, Size);
1775     if (isAsciiIdentifierContinue(C)) {
1776       CurPtr = ConsumeChar(CurPtr, Size, Result);
1777       continue;
1778     }
1779     if (C == '$') {
1780       // If we hit a $ and they are not supported in identifiers, we are done.
1781       if (!LangOpts.DollarIdents)
1782         break;
1783       // Otherwise, emit a diagnostic and continue.
1784       if (!isLexingRawMode())
1785         Diag(CurPtr, diag::ext_dollar_in_identifier);
1786       CurPtr = ConsumeChar(CurPtr, Size, Result);
1787       continue;
1788     }
1789     if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
1790       continue;
1791     if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
1792       continue;
1793     // Neither an expected Unicode codepoint nor a UCN.
1794     break;
1795   }
1796 
1797   const char *IdStart = BufferPtr;
1798   FormTokenWithChars(Result, CurPtr, tok::raw_identifier);
1799   Result.setRawIdentifierData(IdStart);
1800 
1801   // If we are in raw mode, return this identifier raw.  There is no need to
1802   // look up identifier information or attempt to macro expand it.
1803   if (LexingRawMode)
1804     return true;
1805 
1806   // Fill in Result.IdentifierInfo and update the token kind,
1807   // looking up the identifier in the identifier table.
1808   IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
1809   // Note that we have to call PP->LookUpIdentifierInfo() even for code
1810   // completion, it writes IdentifierInfo into Result, and callers rely on it.
1811 
1812   // If the completion point is at the end of an identifier, we want to treat
1813   // the identifier as incomplete even if it resolves to a macro or a keyword.
1814   // This allows e.g. 'class^' to complete to 'classifier'.
1815   if (isCodeCompletionPoint(CurPtr)) {
1816     // Return the code-completion token.
1817     Result.setKind(tok::code_completion);
1818     // Skip the code-completion char and all immediate identifier characters.
1819     // This ensures we get consistent behavior when completing at any point in
1820     // an identifier (i.e. at the start, in the middle, at the end). Note that
1821     // only simple cases (i.e. [a-zA-Z0-9_]) are supported to keep the code
1822     // simpler.
1823     assert(*CurPtr == 0 && "Completion character must be 0");
1824     ++CurPtr;
1825     // Note that code completion token is not added as a separate character
1826     // when the completion point is at the end of the buffer. Therefore, we need
1827     // to check if the buffer has ended.
1828     if (CurPtr < BufferEnd) {
1829       while (isAsciiIdentifierContinue(*CurPtr))
1830         ++CurPtr;
1831     }
1832     BufferPtr = CurPtr;
1833     return true;
1834   }
1835 
1836   // Finally, now that we know we have an identifier, pass this off to the
1837   // preprocessor, which may macro expand it or something.
1838   if (II->isHandleIdentifierCase())
1839     return PP->HandleIdentifier(Result);
1840 
1841   return true;
1842 }
1843 
1844 /// isHexaLiteral - Return true if Start points to a hex constant.
1845 /// in microsoft mode (where this is supposed to be several different tokens).
1846 bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) {
1847   unsigned Size;
1848   char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts);
1849   if (C1 != '0')
1850     return false;
1851   char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts);
1852   return (C2 == 'x' || C2 == 'X');
1853 }
1854 
1855 /// LexNumericConstant - Lex the remainder of a integer or floating point
1856 /// constant. From[-1] is the first character lexed.  Return the end of the
1857 /// constant.
1858 bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
1859   unsigned Size;
1860   char C = getCharAndSize(CurPtr, Size);
1861   char PrevCh = 0;
1862   while (isPreprocessingNumberBody(C)) {
1863     CurPtr = ConsumeChar(CurPtr, Size, Result);
1864     PrevCh = C;
1865     C = getCharAndSize(CurPtr, Size);
1866   }
1867 
1868   // If we fell out, check for a sign, due to 1e+12.  If we have one, continue.
1869   if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) {
1870     // If we are in Microsoft mode, don't continue if the constant is hex.
1871     // For example, MSVC will accept the following as 3 tokens: 0x1234567e+1
1872     if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts))
1873       return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
1874   }
1875 
1876   // If we have a hex FP constant, continue.
1877   if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) {
1878     // Outside C99 and C++17, we accept hexadecimal floating point numbers as a
1879     // not-quite-conforming extension. Only do so if this looks like it's
1880     // actually meant to be a hexfloat, and not if it has a ud-suffix.
1881     bool IsHexFloat = true;
1882     if (!LangOpts.C99) {
1883       if (!isHexaLiteral(BufferPtr, LangOpts))
1884         IsHexFloat = false;
1885       else if (!LangOpts.CPlusPlus17 &&
1886                std::find(BufferPtr, CurPtr, '_') != CurPtr)
1887         IsHexFloat = false;
1888     }
1889     if (IsHexFloat)
1890       return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
1891   }
1892 
1893   // If we have a digit separator, continue.
1894   if (C == '\'' && (LangOpts.CPlusPlus14 || LangOpts.C2x)) {
1895     unsigned NextSize;
1896     char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, LangOpts);
1897     if (isAsciiIdentifierContinue(Next)) {
1898       if (!isLexingRawMode())
1899         Diag(CurPtr, LangOpts.CPlusPlus
1900                          ? diag::warn_cxx11_compat_digit_separator
1901                          : diag::warn_c2x_compat_digit_separator);
1902       CurPtr = ConsumeChar(CurPtr, Size, Result);
1903       CurPtr = ConsumeChar(CurPtr, NextSize, Result);
1904       return LexNumericConstant(Result, CurPtr);
1905     }
1906   }
1907 
1908   // If we have a UCN or UTF-8 character (perhaps in a ud-suffix), continue.
1909   if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
1910     return LexNumericConstant(Result, CurPtr);
1911   if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
1912     return LexNumericConstant(Result, CurPtr);
1913 
1914   // Update the location of token as well as BufferPtr.
1915   const char *TokStart = BufferPtr;
1916   FormTokenWithChars(Result, CurPtr, tok::numeric_constant);
1917   Result.setLiteralData(TokStart);
1918   return true;
1919 }
1920 
1921 /// LexUDSuffix - Lex the ud-suffix production for user-defined literal suffixes
1922 /// in C++11, or warn on a ud-suffix in C++98.
1923 const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
1924                                bool IsStringLiteral) {
1925   assert(LangOpts.CPlusPlus);
1926 
1927   // Maximally munch an identifier.
1928   unsigned Size;
1929   char C = getCharAndSize(CurPtr, Size);
1930   bool Consumed = false;
1931 
1932   if (!isAsciiIdentifierStart(C)) {
1933     if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
1934       Consumed = true;
1935     else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
1936       Consumed = true;
1937     else
1938       return CurPtr;
1939   }
1940 
1941   if (!LangOpts.CPlusPlus11) {
1942     if (!isLexingRawMode())
1943       Diag(CurPtr,
1944            C == '_' ? diag::warn_cxx11_compat_user_defined_literal
1945                     : diag::warn_cxx11_compat_reserved_user_defined_literal)
1946         << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
1947     return CurPtr;
1948   }
1949 
1950   // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix
1951   // that does not start with an underscore is ill-formed. As a conforming
1952   // extension, we treat all such suffixes as if they had whitespace before
1953   // them. We assume a suffix beginning with a UCN or UTF-8 character is more
1954   // likely to be a ud-suffix than a macro, however, and accept that.
1955   if (!Consumed) {
1956     bool IsUDSuffix = false;
1957     if (C == '_')
1958       IsUDSuffix = true;
1959     else if (IsStringLiteral && LangOpts.CPlusPlus14) {
1960       // In C++1y, we need to look ahead a few characters to see if this is a
1961       // valid suffix for a string literal or a numeric literal (this could be
1962       // the 'operator""if' defining a numeric literal operator).
1963       const unsigned MaxStandardSuffixLength = 3;
1964       char Buffer[MaxStandardSuffixLength] = { C };
1965       unsigned Consumed = Size;
1966       unsigned Chars = 1;
1967       while (true) {
1968         unsigned NextSize;
1969         char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize, LangOpts);
1970         if (!isAsciiIdentifierContinue(Next)) {
1971           // End of suffix. Check whether this is on the allowed list.
1972           const StringRef CompleteSuffix(Buffer, Chars);
1973           IsUDSuffix =
1974               StringLiteralParser::isValidUDSuffix(LangOpts, CompleteSuffix);
1975           break;
1976         }
1977 
1978         if (Chars == MaxStandardSuffixLength)
1979           // Too long: can't be a standard suffix.
1980           break;
1981 
1982         Buffer[Chars++] = Next;
1983         Consumed += NextSize;
1984       }
1985     }
1986 
1987     if (!IsUDSuffix) {
1988       if (!isLexingRawMode())
1989         Diag(CurPtr, LangOpts.MSVCCompat
1990                          ? diag::ext_ms_reserved_user_defined_literal
1991                          : diag::ext_reserved_user_defined_literal)
1992             << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
1993       return CurPtr;
1994     }
1995 
1996     CurPtr = ConsumeChar(CurPtr, Size, Result);
1997   }
1998 
1999   Result.setFlag(Token::HasUDSuffix);
2000   while (true) {
2001     C = getCharAndSize(CurPtr, Size);
2002     if (isAsciiIdentifierContinue(C)) {
2003       CurPtr = ConsumeChar(CurPtr, Size, Result);
2004     } else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {
2005     } else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) {
2006     } else
2007       break;
2008   }
2009 
2010   return CurPtr;
2011 }
2012 
2013 /// LexStringLiteral - Lex the remainder of a string literal, after having lexed
2014 /// either " or L" or u8" or u" or U".
2015 bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
2016                              tok::TokenKind Kind) {
2017   const char *AfterQuote = CurPtr;
2018   // Does this string contain the \0 character?
2019   const char *NulCharacter = nullptr;
2020 
2021   if (!isLexingRawMode() &&
2022       (Kind == tok::utf8_string_literal ||
2023        Kind == tok::utf16_string_literal ||
2024        Kind == tok::utf32_string_literal))
2025     Diag(BufferPtr, LangOpts.CPlusPlus ? diag::warn_cxx98_compat_unicode_literal
2026                                        : diag::warn_c99_compat_unicode_literal);
2027 
2028   char C = getAndAdvanceChar(CurPtr, Result);
2029   while (C != '"') {
2030     // Skip escaped characters.  Escaped newlines will already be processed by
2031     // getAndAdvanceChar.
2032     if (C == '\\')
2033       C = getAndAdvanceChar(CurPtr, Result);
2034 
2035     if (C == '\n' || C == '\r' ||             // Newline.
2036         (C == 0 && CurPtr-1 == BufferEnd)) {  // End of file.
2037       if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
2038         Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 1;
2039       FormTokenWithChars(Result, CurPtr-1, tok::unknown);
2040       return true;
2041     }
2042 
2043     if (C == 0) {
2044       if (isCodeCompletionPoint(CurPtr-1)) {
2045         if (ParsingFilename)
2046           codeCompleteIncludedFile(AfterQuote, CurPtr - 1, /*IsAngled=*/false);
2047         else
2048           PP->CodeCompleteNaturalLanguage();
2049         FormTokenWithChars(Result, CurPtr - 1, tok::unknown);
2050         cutOffLexing();
2051         return true;
2052       }
2053 
2054       NulCharacter = CurPtr-1;
2055     }
2056     C = getAndAdvanceChar(CurPtr, Result);
2057   }
2058 
2059   // If we are in C++11, lex the optional ud-suffix.
2060   if (LangOpts.CPlusPlus)
2061     CurPtr = LexUDSuffix(Result, CurPtr, true);
2062 
2063   // If a nul character existed in the string, warn about it.
2064   if (NulCharacter && !isLexingRawMode())
2065     Diag(NulCharacter, diag::null_in_char_or_string) << 1;
2066 
2067   // Update the location of the token as well as the BufferPtr instance var.
2068   const char *TokStart = BufferPtr;
2069   FormTokenWithChars(Result, CurPtr, Kind);
2070   Result.setLiteralData(TokStart);
2071   return true;
2072 }
2073 
2074 /// LexRawStringLiteral - Lex the remainder of a raw string literal, after
2075 /// having lexed R", LR", u8R", uR", or UR".
2076 bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
2077                                 tok::TokenKind Kind) {
2078   // This function doesn't use getAndAdvanceChar because C++0x [lex.pptoken]p3:
2079   //  Between the initial and final double quote characters of the raw string,
2080   //  any transformations performed in phases 1 and 2 (trigraphs,
2081   //  universal-character-names, and line splicing) are reverted.
2082 
2083   if (!isLexingRawMode())
2084     Diag(BufferPtr, diag::warn_cxx98_compat_raw_string_literal);
2085 
2086   unsigned PrefixLen = 0;
2087 
2088   while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen]))
2089     ++PrefixLen;
2090 
2091   // If the last character was not a '(', then we didn't lex a valid delimiter.
2092   if (CurPtr[PrefixLen] != '(') {
2093     if (!isLexingRawMode()) {
2094       const char *PrefixEnd = &CurPtr[PrefixLen];
2095       if (PrefixLen == 16) {
2096         Diag(PrefixEnd, diag::err_raw_delim_too_long);
2097       } else {
2098         Diag(PrefixEnd, diag::err_invalid_char_raw_delim)
2099           << StringRef(PrefixEnd, 1);
2100       }
2101     }
2102 
2103     // Search for the next '"' in hopes of salvaging the lexer. Unfortunately,
2104     // it's possible the '"' was intended to be part of the raw string, but
2105     // there's not much we can do about that.
2106     while (true) {
2107       char C = *CurPtr++;
2108 
2109       if (C == '"')
2110         break;
2111       if (C == 0 && CurPtr-1 == BufferEnd) {
2112         --CurPtr;
2113         break;
2114       }
2115     }
2116 
2117     FormTokenWithChars(Result, CurPtr, tok::unknown);
2118     return true;
2119   }
2120 
2121   // Save prefix and move CurPtr past it
2122   const char *Prefix = CurPtr;
2123   CurPtr += PrefixLen + 1; // skip over prefix and '('
2124 
2125   while (true) {
2126     char C = *CurPtr++;
2127 
2128     if (C == ')') {
2129       // Check for prefix match and closing quote.
2130       if (strncmp(CurPtr, Prefix, PrefixLen) == 0 && CurPtr[PrefixLen] == '"') {
2131         CurPtr += PrefixLen + 1; // skip over prefix and '"'
2132         break;
2133       }
2134     } else if (C == 0 && CurPtr-1 == BufferEnd) { // End of file.
2135       if (!isLexingRawMode())
2136         Diag(BufferPtr, diag::err_unterminated_raw_string)
2137           << StringRef(Prefix, PrefixLen);
2138       FormTokenWithChars(Result, CurPtr-1, tok::unknown);
2139       return true;
2140     }
2141   }
2142 
2143   // If we are in C++11, lex the optional ud-suffix.
2144   if (LangOpts.CPlusPlus)
2145     CurPtr = LexUDSuffix(Result, CurPtr, true);
2146 
2147   // Update the location of token as well as BufferPtr.
2148   const char *TokStart = BufferPtr;
2149   FormTokenWithChars(Result, CurPtr, Kind);
2150   Result.setLiteralData(TokStart);
2151   return true;
2152 }
2153 
2154 /// LexAngledStringLiteral - Lex the remainder of an angled string literal,
2155 /// after having lexed the '<' character.  This is used for #include filenames.
2156 bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
2157   // Does this string contain the \0 character?
2158   const char *NulCharacter = nullptr;
2159   const char *AfterLessPos = CurPtr;
2160   char C = getAndAdvanceChar(CurPtr, Result);
2161   while (C != '>') {
2162     // Skip escaped characters.  Escaped newlines will already be processed by
2163     // getAndAdvanceChar.
2164     if (C == '\\')
2165       C = getAndAdvanceChar(CurPtr, Result);
2166 
2167     if (isVerticalWhitespace(C) ||               // Newline.
2168         (C == 0 && (CurPtr - 1 == BufferEnd))) { // End of file.
2169       // If the filename is unterminated, then it must just be a lone <
2170       // character.  Return this as such.
2171       FormTokenWithChars(Result, AfterLessPos, tok::less);
2172       return true;
2173     }
2174 
2175     if (C == 0) {
2176       if (isCodeCompletionPoint(CurPtr - 1)) {
2177         codeCompleteIncludedFile(AfterLessPos, CurPtr - 1, /*IsAngled=*/true);
2178         cutOffLexing();
2179         FormTokenWithChars(Result, CurPtr - 1, tok::unknown);
2180         return true;
2181       }
2182       NulCharacter = CurPtr-1;
2183     }
2184     C = getAndAdvanceChar(CurPtr, Result);
2185   }
2186 
2187   // If a nul character existed in the string, warn about it.
2188   if (NulCharacter && !isLexingRawMode())
2189     Diag(NulCharacter, diag::null_in_char_or_string) << 1;
2190 
2191   // Update the location of token as well as BufferPtr.
2192   const char *TokStart = BufferPtr;
2193   FormTokenWithChars(Result, CurPtr, tok::header_name);
2194   Result.setLiteralData(TokStart);
2195   return true;
2196 }
2197 
2198 void Lexer::codeCompleteIncludedFile(const char *PathStart,
2199                                      const char *CompletionPoint,
2200                                      bool IsAngled) {
2201   // Completion only applies to the filename, after the last slash.
2202   StringRef PartialPath(PathStart, CompletionPoint - PathStart);
2203   llvm::StringRef SlashChars = LangOpts.MSVCCompat ? "/\\" : "/";
2204   auto Slash = PartialPath.find_last_of(SlashChars);
2205   StringRef Dir =
2206       (Slash == StringRef::npos) ? "" : PartialPath.take_front(Slash);
2207   const char *StartOfFilename =
2208       (Slash == StringRef::npos) ? PathStart : PathStart + Slash + 1;
2209   // Code completion filter range is the filename only, up to completion point.
2210   PP->setCodeCompletionIdentifierInfo(&PP->getIdentifierTable().get(
2211       StringRef(StartOfFilename, CompletionPoint - StartOfFilename)));
2212   // We should replace the characters up to the closing quote or closest slash,
2213   // if any.
2214   while (CompletionPoint < BufferEnd) {
2215     char Next = *(CompletionPoint + 1);
2216     if (Next == 0 || Next == '\r' || Next == '\n')
2217       break;
2218     ++CompletionPoint;
2219     if (Next == (IsAngled ? '>' : '"'))
2220       break;
2221     if (llvm::is_contained(SlashChars, Next))
2222       break;
2223   }
2224 
2225   PP->setCodeCompletionTokenRange(
2226       FileLoc.getLocWithOffset(StartOfFilename - BufferStart),
2227       FileLoc.getLocWithOffset(CompletionPoint - BufferStart));
2228   PP->CodeCompleteIncludedFile(Dir, IsAngled);
2229 }
2230 
2231 /// LexCharConstant - Lex the remainder of a character constant, after having
2232 /// lexed either ' or L' or u8' or u' or U'.
2233 bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
2234                             tok::TokenKind Kind) {
2235   // Does this character contain the \0 character?
2236   const char *NulCharacter = nullptr;
2237 
2238   if (!isLexingRawMode()) {
2239     if (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant)
2240       Diag(BufferPtr, LangOpts.CPlusPlus
2241                           ? diag::warn_cxx98_compat_unicode_literal
2242                           : diag::warn_c99_compat_unicode_literal);
2243     else if (Kind == tok::utf8_char_constant)
2244       Diag(BufferPtr, diag::warn_cxx14_compat_u8_character_literal);
2245   }
2246 
2247   char C = getAndAdvanceChar(CurPtr, Result);
2248   if (C == '\'') {
2249     if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
2250       Diag(BufferPtr, diag::ext_empty_character);
2251     FormTokenWithChars(Result, CurPtr, tok::unknown);
2252     return true;
2253   }
2254 
2255   while (C != '\'') {
2256     // Skip escaped characters.
2257     if (C == '\\')
2258       C = getAndAdvanceChar(CurPtr, Result);
2259 
2260     if (C == '\n' || C == '\r' ||             // Newline.
2261         (C == 0 && CurPtr-1 == BufferEnd)) {  // End of file.
2262       if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
2263         Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 0;
2264       FormTokenWithChars(Result, CurPtr-1, tok::unknown);
2265       return true;
2266     }
2267 
2268     if (C == 0) {
2269       if (isCodeCompletionPoint(CurPtr-1)) {
2270         PP->CodeCompleteNaturalLanguage();
2271         FormTokenWithChars(Result, CurPtr-1, tok::unknown);
2272         cutOffLexing();
2273         return true;
2274       }
2275 
2276       NulCharacter = CurPtr-1;
2277     }
2278     C = getAndAdvanceChar(CurPtr, Result);
2279   }
2280 
2281   // If we are in C++11, lex the optional ud-suffix.
2282   if (LangOpts.CPlusPlus)
2283     CurPtr = LexUDSuffix(Result, CurPtr, false);
2284 
2285   // If a nul character existed in the character, warn about it.
2286   if (NulCharacter && !isLexingRawMode())
2287     Diag(NulCharacter, diag::null_in_char_or_string) << 0;
2288 
2289   // Update the location of token as well as BufferPtr.
2290   const char *TokStart = BufferPtr;
2291   FormTokenWithChars(Result, CurPtr, Kind);
2292   Result.setLiteralData(TokStart);
2293   return true;
2294 }
2295 
2296 /// SkipWhitespace - Efficiently skip over a series of whitespace characters.
2297 /// Update BufferPtr to point to the next non-whitespace character and return.
2298 ///
2299 /// This method forms a token and returns true if KeepWhitespaceMode is enabled.
2300 bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr,
2301                            bool &TokAtPhysicalStartOfLine) {
2302   // Whitespace - Skip it, then return the token after the whitespace.
2303   bool SawNewline = isVerticalWhitespace(CurPtr[-1]);
2304 
2305   unsigned char Char = *CurPtr;
2306 
2307   const char *lastNewLine = nullptr;
2308   auto setLastNewLine = [&](const char *Ptr) {
2309     lastNewLine = Ptr;
2310     if (!NewLinePtr)
2311       NewLinePtr = Ptr;
2312   };
2313   if (SawNewline)
2314     setLastNewLine(CurPtr - 1);
2315 
2316   // Skip consecutive spaces efficiently.
2317   while (true) {
2318     // Skip horizontal whitespace very aggressively.
2319     while (isHorizontalWhitespace(Char))
2320       Char = *++CurPtr;
2321 
2322     // Otherwise if we have something other than whitespace, we're done.
2323     if (!isVerticalWhitespace(Char))
2324       break;
2325 
2326     if (ParsingPreprocessorDirective) {
2327       // End of preprocessor directive line, let LexTokenInternal handle this.
2328       BufferPtr = CurPtr;
2329       return false;
2330     }
2331 
2332     // OK, but handle newline.
2333     if (*CurPtr == '\n')
2334       setLastNewLine(CurPtr);
2335     SawNewline = true;
2336     Char = *++CurPtr;
2337   }
2338 
2339   // If the client wants us to return whitespace, return it now.
2340   if (isKeepWhitespaceMode()) {
2341     FormTokenWithChars(Result, CurPtr, tok::unknown);
2342     if (SawNewline) {
2343       IsAtStartOfLine = true;
2344       IsAtPhysicalStartOfLine = true;
2345     }
2346     // FIXME: The next token will not have LeadingSpace set.
2347     return true;
2348   }
2349 
2350   // If this isn't immediately after a newline, there is leading space.
2351   char PrevChar = CurPtr[-1];
2352   bool HasLeadingSpace = !isVerticalWhitespace(PrevChar);
2353 
2354   Result.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
2355   if (SawNewline) {
2356     Result.setFlag(Token::StartOfLine);
2357     TokAtPhysicalStartOfLine = true;
2358 
2359     if (NewLinePtr && lastNewLine && NewLinePtr != lastNewLine && PP) {
2360       if (auto *Handler = PP->getEmptylineHandler())
2361         Handler->HandleEmptyline(SourceRange(getSourceLocation(NewLinePtr + 1),
2362                                              getSourceLocation(lastNewLine)));
2363     }
2364   }
2365 
2366   BufferPtr = CurPtr;
2367   return false;
2368 }
2369 
2370 /// We have just read the // characters from input.  Skip until we find the
2371 /// newline character that terminates the comment.  Then update BufferPtr and
2372 /// return.
2373 ///
2374 /// If we're in KeepCommentMode or any CommentHandler has inserted
2375 /// some tokens, this will store the first token and return true.
2376 bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
2377                             bool &TokAtPhysicalStartOfLine) {
2378   // If Line comments aren't explicitly enabled for this language, emit an
2379   // extension warning.
2380   if (!LineComment) {
2381     if (!isLexingRawMode()) // There's no PP in raw mode, so can't emit diags.
2382       Diag(BufferPtr, diag::ext_line_comment);
2383 
2384     // Mark them enabled so we only emit one warning for this translation
2385     // unit.
2386     LineComment = true;
2387   }
2388 
2389   // Scan over the body of the comment.  The common case, when scanning, is that
2390   // the comment contains normal ascii characters with nothing interesting in
2391   // them.  As such, optimize for this case with the inner loop.
2392   //
2393   // This loop terminates with CurPtr pointing at the newline (or end of buffer)
2394   // character that ends the line comment.
2395   char C;
2396   while (true) {
2397     C = *CurPtr;
2398     // Skip over characters in the fast loop.
2399     while (C != 0 &&                // Potentially EOF.
2400            C != '\n' && C != '\r')  // Newline or DOS-style newline.
2401       C = *++CurPtr;
2402 
2403     const char *NextLine = CurPtr;
2404     if (C != 0) {
2405       // We found a newline, see if it's escaped.
2406       const char *EscapePtr = CurPtr-1;
2407       bool HasSpace = false;
2408       while (isHorizontalWhitespace(*EscapePtr)) { // Skip whitespace.
2409         --EscapePtr;
2410         HasSpace = true;
2411       }
2412 
2413       if (*EscapePtr == '\\')
2414         // Escaped newline.
2415         CurPtr = EscapePtr;
2416       else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' &&
2417                EscapePtr[-2] == '?' && LangOpts.Trigraphs)
2418         // Trigraph-escaped newline.
2419         CurPtr = EscapePtr-2;
2420       else
2421         break; // This is a newline, we're done.
2422 
2423       // If there was space between the backslash and newline, warn about it.
2424       if (HasSpace && !isLexingRawMode())
2425         Diag(EscapePtr, diag::backslash_newline_space);
2426     }
2427 
2428     // Otherwise, this is a hard case.  Fall back on getAndAdvanceChar to
2429     // properly decode the character.  Read it in raw mode to avoid emitting
2430     // diagnostics about things like trigraphs.  If we see an escaped newline,
2431     // we'll handle it below.
2432     const char *OldPtr = CurPtr;
2433     bool OldRawMode = isLexingRawMode();
2434     LexingRawMode = true;
2435     C = getAndAdvanceChar(CurPtr, Result);
2436     LexingRawMode = OldRawMode;
2437 
2438     // If we only read only one character, then no special handling is needed.
2439     // We're done and can skip forward to the newline.
2440     if (C != 0 && CurPtr == OldPtr+1) {
2441       CurPtr = NextLine;
2442       break;
2443     }
2444 
2445     // If we read multiple characters, and one of those characters was a \r or
2446     // \n, then we had an escaped newline within the comment.  Emit diagnostic
2447     // unless the next line is also a // comment.
2448     if (CurPtr != OldPtr + 1 && C != '/' &&
2449         (CurPtr == BufferEnd + 1 || CurPtr[0] != '/')) {
2450       for (; OldPtr != CurPtr; ++OldPtr)
2451         if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
2452           // Okay, we found a // comment that ends in a newline, if the next
2453           // line is also a // comment, but has spaces, don't emit a diagnostic.
2454           if (isWhitespace(C)) {
2455             const char *ForwardPtr = CurPtr;
2456             while (isWhitespace(*ForwardPtr))  // Skip whitespace.
2457               ++ForwardPtr;
2458             if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/')
2459               break;
2460           }
2461 
2462           if (!isLexingRawMode())
2463             Diag(OldPtr-1, diag::ext_multi_line_line_comment);
2464           break;
2465         }
2466     }
2467 
2468     if (C == '\r' || C == '\n' || CurPtr == BufferEnd + 1) {
2469       --CurPtr;
2470       break;
2471     }
2472 
2473     if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
2474       PP->CodeCompleteNaturalLanguage();
2475       cutOffLexing();
2476       return false;
2477     }
2478   }
2479 
2480   // Found but did not consume the newline.  Notify comment handlers about the
2481   // comment unless we're in a #if 0 block.
2482   if (PP && !isLexingRawMode() &&
2483       PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
2484                                             getSourceLocation(CurPtr)))) {
2485     BufferPtr = CurPtr;
2486     return true; // A token has to be returned.
2487   }
2488 
2489   // If we are returning comments as tokens, return this comment as a token.
2490   if (inKeepCommentMode())
2491     return SaveLineComment(Result, CurPtr);
2492 
2493   // If we are inside a preprocessor directive and we see the end of line,
2494   // return immediately, so that the lexer can return this as an EOD token.
2495   if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
2496     BufferPtr = CurPtr;
2497     return false;
2498   }
2499 
2500   // Otherwise, eat the \n character.  We don't care if this is a \n\r or
2501   // \r\n sequence.  This is an efficiency hack (because we know the \n can't
2502   // contribute to another token), it isn't needed for correctness.  Note that
2503   // this is ok even in KeepWhitespaceMode, because we would have returned the
2504   /// comment above in that mode.
2505   NewLinePtr = CurPtr++;
2506 
2507   // The next returned token is at the start of the line.
2508   Result.setFlag(Token::StartOfLine);
2509   TokAtPhysicalStartOfLine = true;
2510   // No leading whitespace seen so far.
2511   Result.clearFlag(Token::LeadingSpace);
2512   BufferPtr = CurPtr;
2513   return false;
2514 }
2515 
2516 /// If in save-comment mode, package up this Line comment in an appropriate
2517 /// way and return it.
2518 bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) {
2519   // If we're not in a preprocessor directive, just return the // comment
2520   // directly.
2521   FormTokenWithChars(Result, CurPtr, tok::comment);
2522 
2523   if (!ParsingPreprocessorDirective || LexingRawMode)
2524     return true;
2525 
2526   // If this Line-style comment is in a macro definition, transmogrify it into
2527   // a C-style block comment.
2528   bool Invalid = false;
2529   std::string Spelling = PP->getSpelling(Result, &Invalid);
2530   if (Invalid)
2531     return true;
2532 
2533   assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not line comment?");
2534   Spelling[1] = '*';   // Change prefix to "/*".
2535   Spelling += "*/";    // add suffix.
2536 
2537   Result.setKind(tok::comment);
2538   PP->CreateString(Spelling, Result,
2539                    Result.getLocation(), Result.getLocation());
2540   return true;
2541 }
2542 
2543 /// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
2544 /// character (either \\n or \\r) is part of an escaped newline sequence.  Issue
2545 /// a diagnostic if so.  We know that the newline is inside of a block comment.
2546 static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, Lexer *L,
2547                                                   bool Trigraphs) {
2548   assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
2549 
2550   // Position of the first trigraph in the ending sequence.
2551   const char *TrigraphPos = nullptr;
2552   // Position of the first whitespace after a '\' in the ending sequence.
2553   const char *SpacePos = nullptr;
2554 
2555   while (true) {
2556     // Back up off the newline.
2557     --CurPtr;
2558 
2559     // If this is a two-character newline sequence, skip the other character.
2560     if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
2561       // \n\n or \r\r -> not escaped newline.
2562       if (CurPtr[0] == CurPtr[1])
2563         return false;
2564       // \n\r or \r\n -> skip the newline.
2565       --CurPtr;
2566     }
2567 
2568     // If we have horizontal whitespace, skip over it.  We allow whitespace
2569     // between the slash and newline.
2570     while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
2571       SpacePos = CurPtr;
2572       --CurPtr;
2573     }
2574 
2575     // If we have a slash, this is an escaped newline.
2576     if (*CurPtr == '\\') {
2577       --CurPtr;
2578     } else if (CurPtr[0] == '/' && CurPtr[-1] == '?' && CurPtr[-2] == '?') {
2579       // This is a trigraph encoding of a slash.
2580       TrigraphPos = CurPtr - 2;
2581       CurPtr -= 3;
2582     } else {
2583       return false;
2584     }
2585 
2586     // If the character preceding the escaped newline is a '*', then after line
2587     // splicing we have a '*/' ending the comment.
2588     if (*CurPtr == '*')
2589       break;
2590 
2591     if (*CurPtr != '\n' && *CurPtr != '\r')
2592       return false;
2593   }
2594 
2595   if (TrigraphPos) {
2596     // If no trigraphs are enabled, warn that we ignored this trigraph and
2597     // ignore this * character.
2598     if (!Trigraphs) {
2599       if (!L->isLexingRawMode())
2600         L->Diag(TrigraphPos, diag::trigraph_ignored_block_comment);
2601       return false;
2602     }
2603     if (!L->isLexingRawMode())
2604       L->Diag(TrigraphPos, diag::trigraph_ends_block_comment);
2605   }
2606 
2607   // Warn about having an escaped newline between the */ characters.
2608   if (!L->isLexingRawMode())
2609     L->Diag(CurPtr + 1, diag::escaped_newline_block_comment_end);
2610 
2611   // If there was space between the backslash and newline, warn about it.
2612   if (SpacePos && !L->isLexingRawMode())
2613     L->Diag(SpacePos, diag::backslash_newline_space);
2614 
2615   return true;
2616 }
2617 
2618 #ifdef __SSE2__
2619 #include <emmintrin.h>
2620 #elif __ALTIVEC__
2621 #include <altivec.h>
2622 #undef bool
2623 #endif
2624 
2625 /// We have just read from input the / and * characters that started a comment.
2626 /// Read until we find the * and / characters that terminate the comment.
2627 /// Note that we don't bother decoding trigraphs or escaped newlines in block
2628 /// comments, because they cannot cause the comment to end.  The only thing
2629 /// that can happen is the comment could end with an escaped newline between
2630 /// the terminating * and /.
2631 ///
2632 /// If we're in KeepCommentMode or any CommentHandler has inserted
2633 /// some tokens, this will store the first token and return true.
2634 bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
2635                              bool &TokAtPhysicalStartOfLine) {
2636   // Scan one character past where we should, looking for a '/' character.  Once
2637   // we find it, check to see if it was preceded by a *.  This common
2638   // optimization helps people who like to put a lot of * characters in their
2639   // comments.
2640 
2641   // The first character we get with newlines and trigraphs skipped to handle
2642   // the degenerate /*/ case below correctly if the * has an escaped newline
2643   // after it.
2644   unsigned CharSize;
2645   unsigned char C = getCharAndSize(CurPtr, CharSize);
2646   CurPtr += CharSize;
2647   if (C == 0 && CurPtr == BufferEnd+1) {
2648     if (!isLexingRawMode())
2649       Diag(BufferPtr, diag::err_unterminated_block_comment);
2650     --CurPtr;
2651 
2652     // KeepWhitespaceMode should return this broken comment as a token.  Since
2653     // it isn't a well formed comment, just return it as an 'unknown' token.
2654     if (isKeepWhitespaceMode()) {
2655       FormTokenWithChars(Result, CurPtr, tok::unknown);
2656       return true;
2657     }
2658 
2659     BufferPtr = CurPtr;
2660     return false;
2661   }
2662 
2663   // Check to see if the first character after the '/*' is another /.  If so,
2664   // then this slash does not end the block comment, it is part of it.
2665   if (C == '/')
2666     C = *CurPtr++;
2667 
2668   while (true) {
2669     // Skip over all non-interesting characters until we find end of buffer or a
2670     // (probably ending) '/' character.
2671     if (CurPtr + 24 < BufferEnd &&
2672         // If there is a code-completion point avoid the fast scan because it
2673         // doesn't check for '\0'.
2674         !(PP && PP->getCodeCompletionFileLoc() == FileLoc)) {
2675       // While not aligned to a 16-byte boundary.
2676       while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
2677         C = *CurPtr++;
2678 
2679       if (C == '/') goto FoundSlash;
2680 
2681 #ifdef __SSE2__
2682       __m128i Slashes = _mm_set1_epi8('/');
2683       while (CurPtr+16 <= BufferEnd) {
2684         int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr,
2685                                     Slashes));
2686         if (cmp != 0) {
2687           // Adjust the pointer to point directly after the first slash. It's
2688           // not necessary to set C here, it will be overwritten at the end of
2689           // the outer loop.
2690           CurPtr += llvm::countTrailingZeros<unsigned>(cmp) + 1;
2691           goto FoundSlash;
2692         }
2693         CurPtr += 16;
2694       }
2695 #elif __ALTIVEC__
2696       __vector unsigned char Slashes = {
2697         '/', '/', '/', '/',  '/', '/', '/', '/',
2698         '/', '/', '/', '/',  '/', '/', '/', '/'
2699       };
2700       while (CurPtr + 16 <= BufferEnd &&
2701              !vec_any_eq(*(const __vector unsigned char *)CurPtr, Slashes))
2702         CurPtr += 16;
2703 #else
2704       // Scan for '/' quickly.  Many block comments are very large.
2705       while (CurPtr[0] != '/' &&
2706              CurPtr[1] != '/' &&
2707              CurPtr[2] != '/' &&
2708              CurPtr[3] != '/' &&
2709              CurPtr+4 < BufferEnd) {
2710         CurPtr += 4;
2711       }
2712 #endif
2713 
2714       // It has to be one of the bytes scanned, increment to it and read one.
2715       C = *CurPtr++;
2716     }
2717 
2718     // Loop to scan the remainder.
2719     while (C != '/' && C != '\0')
2720       C = *CurPtr++;
2721 
2722     if (C == '/') {
2723   FoundSlash:
2724       if (CurPtr[-2] == '*')  // We found the final */.  We're done!
2725         break;
2726 
2727       if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
2728         if (isEndOfBlockCommentWithEscapedNewLine(CurPtr - 2, this,
2729                                                   LangOpts.Trigraphs)) {
2730           // We found the final */, though it had an escaped newline between the
2731           // * and /.  We're done!
2732           break;
2733         }
2734       }
2735       if (CurPtr[0] == '*' && CurPtr[1] != '/') {
2736         // If this is a /* inside of the comment, emit a warning.  Don't do this
2737         // if this is a /*/, which will end the comment.  This misses cases with
2738         // embedded escaped newlines, but oh well.
2739         if (!isLexingRawMode())
2740           Diag(CurPtr-1, diag::warn_nested_block_comment);
2741       }
2742     } else if (C == 0 && CurPtr == BufferEnd+1) {
2743       if (!isLexingRawMode())
2744         Diag(BufferPtr, diag::err_unterminated_block_comment);
2745       // Note: the user probably forgot a */.  We could continue immediately
2746       // after the /*, but this would involve lexing a lot of what really is the
2747       // comment, which surely would confuse the parser.
2748       --CurPtr;
2749 
2750       // KeepWhitespaceMode should return this broken comment as a token.  Since
2751       // it isn't a well formed comment, just return it as an 'unknown' token.
2752       if (isKeepWhitespaceMode()) {
2753         FormTokenWithChars(Result, CurPtr, tok::unknown);
2754         return true;
2755       }
2756 
2757       BufferPtr = CurPtr;
2758       return false;
2759     } else if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
2760       PP->CodeCompleteNaturalLanguage();
2761       cutOffLexing();
2762       return false;
2763     }
2764 
2765     C = *CurPtr++;
2766   }
2767 
2768   // Notify comment handlers about the comment unless we're in a #if 0 block.
2769   if (PP && !isLexingRawMode() &&
2770       PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
2771                                             getSourceLocation(CurPtr)))) {
2772     BufferPtr = CurPtr;
2773     return true; // A token has to be returned.
2774   }
2775 
2776   // If we are returning comments as tokens, return this comment as a token.
2777   if (inKeepCommentMode()) {
2778     FormTokenWithChars(Result, CurPtr, tok::comment);
2779     return true;
2780   }
2781 
2782   // It is common for the tokens immediately after a /**/ comment to be
2783   // whitespace.  Instead of going through the big switch, handle it
2784   // efficiently now.  This is safe even in KeepWhitespaceMode because we would
2785   // have already returned above with the comment as a token.
2786   if (isHorizontalWhitespace(*CurPtr)) {
2787     SkipWhitespace(Result, CurPtr+1, TokAtPhysicalStartOfLine);
2788     return false;
2789   }
2790 
2791   // Otherwise, just return so that the next character will be lexed as a token.
2792   BufferPtr = CurPtr;
2793   Result.setFlag(Token::LeadingSpace);
2794   return false;
2795 }
2796 
2797 //===----------------------------------------------------------------------===//
2798 // Primary Lexing Entry Points
2799 //===----------------------------------------------------------------------===//
2800 
2801 /// ReadToEndOfLine - Read the rest of the current preprocessor line as an
2802 /// uninterpreted string.  This switches the lexer out of directive mode.
2803 void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) {
2804   assert(ParsingPreprocessorDirective && ParsingFilename == false &&
2805          "Must be in a preprocessing directive!");
2806   Token Tmp;
2807   Tmp.startToken();
2808 
2809   // CurPtr - Cache BufferPtr in an automatic variable.
2810   const char *CurPtr = BufferPtr;
2811   while (true) {
2812     char Char = getAndAdvanceChar(CurPtr, Tmp);
2813     switch (Char) {
2814     default:
2815       if (Result)
2816         Result->push_back(Char);
2817       break;
2818     case 0:  // Null.
2819       // Found end of file?
2820       if (CurPtr-1 != BufferEnd) {
2821         if (isCodeCompletionPoint(CurPtr-1)) {
2822           PP->CodeCompleteNaturalLanguage();
2823           cutOffLexing();
2824           return;
2825         }
2826 
2827         // Nope, normal character, continue.
2828         if (Result)
2829           Result->push_back(Char);
2830         break;
2831       }
2832       // FALL THROUGH.
2833       LLVM_FALLTHROUGH;
2834     case '\r':
2835     case '\n':
2836       // Okay, we found the end of the line. First, back up past the \0, \r, \n.
2837       assert(CurPtr[-1] == Char && "Trigraphs for newline?");
2838       BufferPtr = CurPtr-1;
2839 
2840       // Next, lex the character, which should handle the EOD transition.
2841       Lex(Tmp);
2842       if (Tmp.is(tok::code_completion)) {
2843         if (PP)
2844           PP->CodeCompleteNaturalLanguage();
2845         Lex(Tmp);
2846       }
2847       assert(Tmp.is(tok::eod) && "Unexpected token!");
2848 
2849       // Finally, we're done;
2850       return;
2851     }
2852   }
2853 }
2854 
2855 /// LexEndOfFile - CurPtr points to the end of this file.  Handle this
2856 /// condition, reporting diagnostics and handling other edge cases as required.
2857 /// This returns true if Result contains a token, false if PP.Lex should be
2858 /// called again.
2859 bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
2860   // If we hit the end of the file while parsing a preprocessor directive,
2861   // end the preprocessor directive first.  The next token returned will
2862   // then be the end of file.
2863   if (ParsingPreprocessorDirective) {
2864     // Done parsing the "line".
2865     ParsingPreprocessorDirective = false;
2866     // Update the location of token as well as BufferPtr.
2867     FormTokenWithChars(Result, CurPtr, tok::eod);
2868 
2869     // Restore comment saving mode, in case it was disabled for directive.
2870     if (PP)
2871       resetExtendedTokenMode();
2872     return true;  // Have a token.
2873   }
2874 
2875   // If we are in raw mode, return this event as an EOF token.  Let the caller
2876   // that put us in raw mode handle the event.
2877   if (isLexingRawMode()) {
2878     Result.startToken();
2879     BufferPtr = BufferEnd;
2880     FormTokenWithChars(Result, BufferEnd, tok::eof);
2881     return true;
2882   }
2883 
2884   if (PP->isRecordingPreamble() && PP->isInPrimaryFile()) {
2885     PP->setRecordedPreambleConditionalStack(ConditionalStack);
2886     // If the preamble cuts off the end of a header guard, consider it guarded.
2887     // The guard is valid for the preamble content itself, and for tools the
2888     // most useful answer is "yes, this file has a header guard".
2889     if (!ConditionalStack.empty())
2890       MIOpt.ExitTopLevelConditional();
2891     ConditionalStack.clear();
2892   }
2893 
2894   // Issue diagnostics for unterminated #if and missing newline.
2895 
2896   // If we are in a #if directive, emit an error.
2897   while (!ConditionalStack.empty()) {
2898     if (PP->getCodeCompletionFileLoc() != FileLoc)
2899       PP->Diag(ConditionalStack.back().IfLoc,
2900                diag::err_pp_unterminated_conditional);
2901     ConditionalStack.pop_back();
2902   }
2903 
2904   // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
2905   // a pedwarn.
2906   if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) {
2907     DiagnosticsEngine &Diags = PP->getDiagnostics();
2908     SourceLocation EndLoc = getSourceLocation(BufferEnd);
2909     unsigned DiagID;
2910 
2911     if (LangOpts.CPlusPlus11) {
2912       // C++11 [lex.phases] 2.2 p2
2913       // Prefer the C++98 pedantic compatibility warning over the generic,
2914       // non-extension, user-requested "missing newline at EOF" warning.
2915       if (!Diags.isIgnored(diag::warn_cxx98_compat_no_newline_eof, EndLoc)) {
2916         DiagID = diag::warn_cxx98_compat_no_newline_eof;
2917       } else {
2918         DiagID = diag::warn_no_newline_eof;
2919       }
2920     } else {
2921       DiagID = diag::ext_no_newline_eof;
2922     }
2923 
2924     Diag(BufferEnd, DiagID)
2925       << FixItHint::CreateInsertion(EndLoc, "\n");
2926   }
2927 
2928   BufferPtr = CurPtr;
2929 
2930   // Finally, let the preprocessor handle this.
2931   return PP->HandleEndOfFile(Result, isPragmaLexer());
2932 }
2933 
2934 /// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
2935 /// the specified lexer will return a tok::l_paren token, 0 if it is something
2936 /// else and 2 if there are no more tokens in the buffer controlled by the
2937 /// lexer.
2938 unsigned Lexer::isNextPPTokenLParen() {
2939   assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
2940 
2941   if (isDependencyDirectivesLexer()) {
2942     if (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size())
2943       return 2;
2944     return DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is(
2945         tok::l_paren);
2946   }
2947 
2948   // Switch to 'skipping' mode.  This will ensure that we can lex a token
2949   // without emitting diagnostics, disables macro expansion, and will cause EOF
2950   // to return an EOF token instead of popping the include stack.
2951   LexingRawMode = true;
2952 
2953   // Save state that can be changed while lexing so that we can restore it.
2954   const char *TmpBufferPtr = BufferPtr;
2955   bool inPPDirectiveMode = ParsingPreprocessorDirective;
2956   bool atStartOfLine = IsAtStartOfLine;
2957   bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
2958   bool leadingSpace = HasLeadingSpace;
2959 
2960   Token Tok;
2961   Lex(Tok);
2962 
2963   // Restore state that may have changed.
2964   BufferPtr = TmpBufferPtr;
2965   ParsingPreprocessorDirective = inPPDirectiveMode;
2966   HasLeadingSpace = leadingSpace;
2967   IsAtStartOfLine = atStartOfLine;
2968   IsAtPhysicalStartOfLine = atPhysicalStartOfLine;
2969 
2970   // Restore the lexer back to non-skipping mode.
2971   LexingRawMode = false;
2972 
2973   if (Tok.is(tok::eof))
2974     return 2;
2975   return Tok.is(tok::l_paren);
2976 }
2977 
2978 /// Find the end of a version control conflict marker.
2979 static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd,
2980                                    ConflictMarkerKind CMK) {
2981   const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>";
2982   size_t TermLen = CMK == CMK_Perforce ? 5 : 7;
2983   auto RestOfBuffer = StringRef(CurPtr, BufferEnd - CurPtr).substr(TermLen);
2984   size_t Pos = RestOfBuffer.find(Terminator);
2985   while (Pos != StringRef::npos) {
2986     // Must occur at start of line.
2987     if (Pos == 0 ||
2988         (RestOfBuffer[Pos - 1] != '\r' && RestOfBuffer[Pos - 1] != '\n')) {
2989       RestOfBuffer = RestOfBuffer.substr(Pos+TermLen);
2990       Pos = RestOfBuffer.find(Terminator);
2991       continue;
2992     }
2993     return RestOfBuffer.data()+Pos;
2994   }
2995   return nullptr;
2996 }
2997 
2998 /// IsStartOfConflictMarker - If the specified pointer is the start of a version
2999 /// control conflict marker like '<<<<<<<', recognize it as such, emit an error
3000 /// and recover nicely.  This returns true if it is a conflict marker and false
3001 /// if not.
3002 bool Lexer::IsStartOfConflictMarker(const char *CurPtr) {
3003   // Only a conflict marker if it starts at the beginning of a line.
3004   if (CurPtr != BufferStart &&
3005       CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
3006     return false;
3007 
3008   // Check to see if we have <<<<<<< or >>>>.
3009   if (!StringRef(CurPtr, BufferEnd - CurPtr).startswith("<<<<<<<") &&
3010       !StringRef(CurPtr, BufferEnd - CurPtr).startswith(">>>> "))
3011     return false;
3012 
3013   // If we have a situation where we don't care about conflict markers, ignore
3014   // it.
3015   if (CurrentConflictMarkerState || isLexingRawMode())
3016     return false;
3017 
3018   ConflictMarkerKind Kind = *CurPtr == '<' ? CMK_Normal : CMK_Perforce;
3019 
3020   // Check to see if there is an ending marker somewhere in the buffer at the
3021   // start of a line to terminate this conflict marker.
3022   if (FindConflictEnd(CurPtr, BufferEnd, Kind)) {
3023     // We found a match.  We are really in a conflict marker.
3024     // Diagnose this, and ignore to the end of line.
3025     Diag(CurPtr, diag::err_conflict_marker);
3026     CurrentConflictMarkerState = Kind;
3027 
3028     // Skip ahead to the end of line.  We know this exists because the
3029     // end-of-conflict marker starts with \r or \n.
3030     while (*CurPtr != '\r' && *CurPtr != '\n') {
3031       assert(CurPtr != BufferEnd && "Didn't find end of line");
3032       ++CurPtr;
3033     }
3034     BufferPtr = CurPtr;
3035     return true;
3036   }
3037 
3038   // No end of conflict marker found.
3039   return false;
3040 }
3041 
3042 /// HandleEndOfConflictMarker - If this is a '====' or '||||' or '>>>>', or if
3043 /// it is '<<<<' and the conflict marker started with a '>>>>' marker, then it
3044 /// is the end of a conflict marker.  Handle it by ignoring up until the end of
3045 /// the line.  This returns true if it is a conflict marker and false if not.
3046 bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) {
3047   // Only a conflict marker if it starts at the beginning of a line.
3048   if (CurPtr != BufferStart &&
3049       CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
3050     return false;
3051 
3052   // If we have a situation where we don't care about conflict markers, ignore
3053   // it.
3054   if (!CurrentConflictMarkerState || isLexingRawMode())
3055     return false;
3056 
3057   // Check to see if we have the marker (4 characters in a row).
3058   for (unsigned i = 1; i != 4; ++i)
3059     if (CurPtr[i] != CurPtr[0])
3060       return false;
3061 
3062   // If we do have it, search for the end of the conflict marker.  This could
3063   // fail if it got skipped with a '#if 0' or something.  Note that CurPtr might
3064   // be the end of conflict marker.
3065   if (const char *End = FindConflictEnd(CurPtr, BufferEnd,
3066                                         CurrentConflictMarkerState)) {
3067     CurPtr = End;
3068 
3069     // Skip ahead to the end of line.
3070     while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n')
3071       ++CurPtr;
3072 
3073     BufferPtr = CurPtr;
3074 
3075     // No longer in the conflict marker.
3076     CurrentConflictMarkerState = CMK_None;
3077     return true;
3078   }
3079 
3080   return false;
3081 }
3082 
3083 static const char *findPlaceholderEnd(const char *CurPtr,
3084                                       const char *BufferEnd) {
3085   if (CurPtr == BufferEnd)
3086     return nullptr;
3087   BufferEnd -= 1; // Scan until the second last character.
3088   for (; CurPtr != BufferEnd; ++CurPtr) {
3089     if (CurPtr[0] == '#' && CurPtr[1] == '>')
3090       return CurPtr + 2;
3091   }
3092   return nullptr;
3093 }
3094 
3095 bool Lexer::lexEditorPlaceholder(Token &Result, const char *CurPtr) {
3096   assert(CurPtr[-1] == '<' && CurPtr[0] == '#' && "Not a placeholder!");
3097   if (!PP || !PP->getPreprocessorOpts().LexEditorPlaceholders || LexingRawMode)
3098     return false;
3099   const char *End = findPlaceholderEnd(CurPtr + 1, BufferEnd);
3100   if (!End)
3101     return false;
3102   const char *Start = CurPtr - 1;
3103   if (!LangOpts.AllowEditorPlaceholders)
3104     Diag(Start, diag::err_placeholder_in_source);
3105   Result.startToken();
3106   FormTokenWithChars(Result, End, tok::raw_identifier);
3107   Result.setRawIdentifierData(Start);
3108   PP->LookUpIdentifierInfo(Result);
3109   Result.setFlag(Token::IsEditorPlaceholder);
3110   BufferPtr = End;
3111   return true;
3112 }
3113 
3114 bool Lexer::isCodeCompletionPoint(const char *CurPtr) const {
3115   if (PP && PP->isCodeCompletionEnabled()) {
3116     SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart);
3117     return Loc == PP->getCodeCompletionLoc();
3118   }
3119 
3120   return false;
3121 }
3122 
3123 llvm::Optional<uint32_t> Lexer::tryReadNumericUCN(const char *&StartPtr,
3124                                                   const char *SlashLoc,
3125                                                   Token *Result) {
3126   unsigned CharSize;
3127   char Kind = getCharAndSize(StartPtr, CharSize);
3128   assert((Kind == 'u' || Kind == 'U') && "expected a UCN");
3129 
3130   unsigned NumHexDigits;
3131   if (Kind == 'u')
3132     NumHexDigits = 4;
3133   else if (Kind == 'U')
3134     NumHexDigits = 8;
3135 
3136   bool Delimited = false;
3137   bool FoundEndDelimiter = false;
3138   unsigned Count = 0;
3139   bool Diagnose = Result && !isLexingRawMode();
3140 
3141   if (!LangOpts.CPlusPlus && !LangOpts.C99) {
3142     if (Diagnose)
3143       Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89);
3144     return llvm::None;
3145   }
3146 
3147   const char *CurPtr = StartPtr + CharSize;
3148   const char *KindLoc = &CurPtr[-1];
3149 
3150   uint32_t CodePoint = 0;
3151   while (Count != NumHexDigits || Delimited) {
3152     char C = getCharAndSize(CurPtr, CharSize);
3153     if (!Delimited && C == '{') {
3154       Delimited = true;
3155       CurPtr += CharSize;
3156       continue;
3157     }
3158 
3159     if (Delimited && C == '}') {
3160       CurPtr += CharSize;
3161       FoundEndDelimiter = true;
3162       break;
3163     }
3164 
3165     unsigned Value = llvm::hexDigitValue(C);
3166     if (Value == -1U) {
3167       if (!Delimited)
3168         break;
3169       if (Diagnose)
3170         Diag(BufferPtr, diag::warn_delimited_ucn_incomplete)
3171             << StringRef(KindLoc, 1);
3172       return llvm::None;
3173     }
3174 
3175     if (CodePoint & 0xF000'0000) {
3176       if (Diagnose)
3177         Diag(KindLoc, diag::err_escape_too_large) << 0;
3178       return llvm::None;
3179     }
3180 
3181     CodePoint <<= 4;
3182     CodePoint |= Value;
3183     CurPtr += CharSize;
3184     Count++;
3185   }
3186 
3187   if (Count == 0) {
3188     if (Diagnose)
3189       Diag(StartPtr, FoundEndDelimiter ? diag::warn_delimited_ucn_empty
3190                                        : diag::warn_ucn_escape_no_digits)
3191           << StringRef(KindLoc, 1);
3192     return llvm::None;
3193   }
3194 
3195   if (Delimited && Kind == 'U') {
3196     if (Diagnose)
3197       Diag(StartPtr, diag::err_hex_escape_no_digits) << StringRef(KindLoc, 1);
3198     return llvm::None;
3199   }
3200 
3201   if (!Delimited && Count != NumHexDigits) {
3202     if (Diagnose) {
3203       Diag(BufferPtr, diag::warn_ucn_escape_incomplete);
3204       // If the user wrote \U1234, suggest a fixit to \u.
3205       if (Count == 4 && NumHexDigits == 8) {
3206         CharSourceRange URange = makeCharRange(*this, KindLoc, KindLoc + 1);
3207         Diag(KindLoc, diag::note_ucn_four_not_eight)
3208             << FixItHint::CreateReplacement(URange, "u");
3209       }
3210     }
3211     return llvm::None;
3212   }
3213 
3214   if (Delimited && PP) {
3215     Diag(BufferPtr, diag::ext_delimited_escape_sequence) << /*delimited*/ 0;
3216   }
3217 
3218   if (Result) {
3219     Result->setFlag(Token::HasUCN);
3220     if (CurPtr - StartPtr == (ptrdiff_t)(Count + 2 + (Delimited ? 2 : 0)))
3221       StartPtr = CurPtr;
3222     else
3223       while (StartPtr != CurPtr)
3224         (void)getAndAdvanceChar(StartPtr, *Result);
3225   } else {
3226     StartPtr = CurPtr;
3227   }
3228   return CodePoint;
3229 }
3230 
3231 llvm::Optional<uint32_t> Lexer::tryReadNamedUCN(const char *&StartPtr,
3232                                                 Token *Result) {
3233   unsigned CharSize;
3234   bool Diagnose = Result && !isLexingRawMode();
3235 
3236   char C = getCharAndSize(StartPtr, CharSize);
3237   assert(C == 'N' && "expected \\N{...}");
3238 
3239   const char *CurPtr = StartPtr + CharSize;
3240   const char *KindLoc = &CurPtr[-1];
3241 
3242   C = getCharAndSize(CurPtr, CharSize);
3243   if (C != '{') {
3244     if (Diagnose)
3245       Diag(StartPtr, diag::warn_ucn_escape_incomplete);
3246     return llvm::None;
3247   }
3248   CurPtr += CharSize;
3249   const char *StartName = CurPtr;
3250   bool FoundEndDelimiter = false;
3251   llvm::SmallVector<char, 30> Buffer;
3252   while (C) {
3253     C = getCharAndSize(CurPtr, CharSize);
3254     CurPtr += CharSize;
3255     if (C == '}') {
3256       FoundEndDelimiter = true;
3257       break;
3258     }
3259 
3260     if (!isAlphanumeric(C) && C != '_' && C != '-' && C != ' ')
3261       break;
3262     Buffer.push_back(C);
3263   }
3264 
3265   if (!FoundEndDelimiter || Buffer.empty()) {
3266     if (Diagnose)
3267       Diag(StartPtr, FoundEndDelimiter ? diag::warn_delimited_ucn_empty
3268                                        : diag::warn_delimited_ucn_incomplete)
3269           << StringRef(KindLoc, 1);
3270     return llvm::None;
3271   }
3272 
3273   StringRef Name(Buffer.data(), Buffer.size());
3274   llvm::Optional<char32_t> Res =
3275       llvm::sys::unicode::nameToCodepointStrict(Name);
3276   llvm::Optional<llvm::sys::unicode::LooseMatchingResult> LooseMatch;
3277   if (!Res) {
3278     if (!isLexingRawMode()) {
3279       Diag(StartPtr, diag::err_invalid_ucn_name)
3280           << StringRef(Buffer.data(), Buffer.size());
3281       LooseMatch = llvm::sys::unicode::nameToCodepointLooseMatching(Name);
3282       if (LooseMatch) {
3283         Diag(StartName, diag::note_invalid_ucn_name_loose_matching)
3284             << FixItHint::CreateReplacement(
3285                    makeCharRange(*this, StartName, CurPtr - CharSize),
3286                    LooseMatch->Name);
3287       }
3288     }
3289     // When finding a match using Unicode loose matching rules
3290     // recover after having emitted a diagnostic.
3291     if (!LooseMatch)
3292       return llvm::None;
3293     // We do not offer missspelled character names suggestions here
3294     // as the set of what would be a valid suggestion depends on context,
3295     // and we should not make invalid suggestions.
3296   }
3297 
3298   if (Diagnose && PP && !LooseMatch)
3299     Diag(BufferPtr, diag::ext_delimited_escape_sequence) << /*named*/ 1;
3300 
3301   if (LooseMatch)
3302     Res = LooseMatch->CodePoint;
3303 
3304   if (Result) {
3305     Result->setFlag(Token::HasUCN);
3306     if (CurPtr - StartPtr == (ptrdiff_t)(Buffer.size() + 4))
3307       StartPtr = CurPtr;
3308     else
3309       while (StartPtr != CurPtr)
3310         (void)getAndAdvanceChar(StartPtr, *Result);
3311   } else {
3312     StartPtr = CurPtr;
3313   }
3314   return *Res;
3315 }
3316 
3317 uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
3318                            Token *Result) {
3319 
3320   unsigned CharSize;
3321   llvm::Optional<uint32_t> CodePointOpt;
3322   char Kind = getCharAndSize(StartPtr, CharSize);
3323   if (Kind == 'u' || Kind == 'U')
3324     CodePointOpt = tryReadNumericUCN(StartPtr, SlashLoc, Result);
3325   else if (Kind == 'N')
3326     CodePointOpt = tryReadNamedUCN(StartPtr, Result);
3327 
3328   if (!CodePointOpt)
3329     return 0;
3330 
3331   uint32_t CodePoint = *CodePointOpt;
3332 
3333   // Don't apply C family restrictions to UCNs in assembly mode
3334   if (LangOpts.AsmPreprocessor)
3335     return CodePoint;
3336 
3337   // C99 6.4.3p2: A universal character name shall not specify a character whose
3338   //   short identifier is less than 00A0 other than 0024 ($), 0040 (@), or
3339   //   0060 (`), nor one in the range D800 through DFFF inclusive.)
3340   // C++11 [lex.charset]p2: If the hexadecimal value for a
3341   //   universal-character-name corresponds to a surrogate code point (in the
3342   //   range 0xD800-0xDFFF, inclusive), the program is ill-formed. Additionally,
3343   //   if the hexadecimal value for a universal-character-name outside the
3344   //   c-char-sequence, s-char-sequence, or r-char-sequence of a character or
3345   //   string literal corresponds to a control character (in either of the
3346   //   ranges 0x00-0x1F or 0x7F-0x9F, both inclusive) or to a character in the
3347   //   basic source character set, the program is ill-formed.
3348   if (CodePoint < 0xA0) {
3349     if (CodePoint == 0x24 || CodePoint == 0x40 || CodePoint == 0x60)
3350       return CodePoint;
3351 
3352     // We don't use isLexingRawMode() here because we need to warn about bad
3353     // UCNs even when skipping preprocessing tokens in a #if block.
3354     if (Result && PP) {
3355       if (CodePoint < 0x20 || CodePoint >= 0x7F)
3356         Diag(BufferPtr, diag::err_ucn_control_character);
3357       else {
3358         char C = static_cast<char>(CodePoint);
3359         Diag(BufferPtr, diag::err_ucn_escape_basic_scs) << StringRef(&C, 1);
3360       }
3361     }
3362 
3363     return 0;
3364   } else if (CodePoint >= 0xD800 && CodePoint <= 0xDFFF) {
3365     // C++03 allows UCNs representing surrogate characters. C99 and C++11 don't.
3366     // We don't use isLexingRawMode() here because we need to diagnose bad
3367     // UCNs even when skipping preprocessing tokens in a #if block.
3368     if (Result && PP) {
3369       if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus11)
3370         Diag(BufferPtr, diag::warn_ucn_escape_surrogate);
3371       else
3372         Diag(BufferPtr, diag::err_ucn_escape_invalid);
3373     }
3374     return 0;
3375   }
3376 
3377   return CodePoint;
3378 }
3379 
3380 bool Lexer::CheckUnicodeWhitespace(Token &Result, uint32_t C,
3381                                    const char *CurPtr) {
3382   if (!isLexingRawMode() && !PP->isPreprocessedOutput() &&
3383       isUnicodeWhitespace(C)) {
3384     Diag(BufferPtr, diag::ext_unicode_whitespace)
3385       << makeCharRange(*this, BufferPtr, CurPtr);
3386 
3387     Result.setFlag(Token::LeadingSpace);
3388     return true;
3389   }
3390   return false;
3391 }
3392 
3393 void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) {
3394   IsAtStartOfLine = Result.isAtStartOfLine();
3395   HasLeadingSpace = Result.hasLeadingSpace();
3396   HasLeadingEmptyMacro = Result.hasLeadingEmptyMacro();
3397   // Note that this doesn't affect IsAtPhysicalStartOfLine.
3398 }
3399 
3400 bool Lexer::Lex(Token &Result) {
3401   assert(!isDependencyDirectivesLexer());
3402 
3403   // Start a new token.
3404   Result.startToken();
3405 
3406   // Set up misc whitespace flags for LexTokenInternal.
3407   if (IsAtStartOfLine) {
3408     Result.setFlag(Token::StartOfLine);
3409     IsAtStartOfLine = false;
3410   }
3411 
3412   if (HasLeadingSpace) {
3413     Result.setFlag(Token::LeadingSpace);
3414     HasLeadingSpace = false;
3415   }
3416 
3417   if (HasLeadingEmptyMacro) {
3418     Result.setFlag(Token::LeadingEmptyMacro);
3419     HasLeadingEmptyMacro = false;
3420   }
3421 
3422   bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
3423   IsAtPhysicalStartOfLine = false;
3424   bool isRawLex = isLexingRawMode();
3425   (void) isRawLex;
3426   bool returnedToken = LexTokenInternal(Result, atPhysicalStartOfLine);
3427   // (After the LexTokenInternal call, the lexer might be destroyed.)
3428   assert((returnedToken || !isRawLex) && "Raw lex must succeed");
3429   return returnedToken;
3430 }
3431 
3432 /// LexTokenInternal - This implements a simple C family lexer.  It is an
3433 /// extremely performance critical piece of code.  This assumes that the buffer
3434 /// has a null character at the end of the file.  This returns a preprocessing
3435 /// token, not a normal token, as such, it is an internal interface.  It assumes
3436 /// that the Flags of result have been cleared before calling this.
3437 bool Lexer::LexTokenInternal(Token &Result, bool TokAtPhysicalStartOfLine) {
3438 LexNextToken:
3439   // New token, can't need cleaning yet.
3440   Result.clearFlag(Token::NeedsCleaning);
3441   Result.setIdentifierInfo(nullptr);
3442 
3443   // CurPtr - Cache BufferPtr in an automatic variable.
3444   const char *CurPtr = BufferPtr;
3445 
3446   // Small amounts of horizontal whitespace is very common between tokens.
3447   if (isHorizontalWhitespace(*CurPtr)) {
3448     do {
3449       ++CurPtr;
3450     } while (isHorizontalWhitespace(*CurPtr));
3451 
3452     // If we are keeping whitespace and other tokens, just return what we just
3453     // skipped.  The next lexer invocation will return the token after the
3454     // whitespace.
3455     if (isKeepWhitespaceMode()) {
3456       FormTokenWithChars(Result, CurPtr, tok::unknown);
3457       // FIXME: The next token will not have LeadingSpace set.
3458       return true;
3459     }
3460 
3461     BufferPtr = CurPtr;
3462     Result.setFlag(Token::LeadingSpace);
3463   }
3464 
3465   unsigned SizeTmp, SizeTmp2;   // Temporaries for use in cases below.
3466 
3467   // Read a character, advancing over it.
3468   char Char = getAndAdvanceChar(CurPtr, Result);
3469   tok::TokenKind Kind;
3470 
3471   if (!isVerticalWhitespace(Char))
3472     NewLinePtr = nullptr;
3473 
3474   switch (Char) {
3475   case 0:  // Null.
3476     // Found end of file?
3477     if (CurPtr-1 == BufferEnd)
3478       return LexEndOfFile(Result, CurPtr-1);
3479 
3480     // Check if we are performing code completion.
3481     if (isCodeCompletionPoint(CurPtr-1)) {
3482       // Return the code-completion token.
3483       Result.startToken();
3484       FormTokenWithChars(Result, CurPtr, tok::code_completion);
3485       return true;
3486     }
3487 
3488     if (!isLexingRawMode())
3489       Diag(CurPtr-1, diag::null_in_file);
3490     Result.setFlag(Token::LeadingSpace);
3491     if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
3492       return true; // KeepWhitespaceMode
3493 
3494     // We know the lexer hasn't changed, so just try again with this lexer.
3495     // (We manually eliminate the tail call to avoid recursion.)
3496     goto LexNextToken;
3497 
3498   case 26:  // DOS & CP/M EOF: "^Z".
3499     // If we're in Microsoft extensions mode, treat this as end of file.
3500     if (LangOpts.MicrosoftExt) {
3501       if (!isLexingRawMode())
3502         Diag(CurPtr-1, diag::ext_ctrl_z_eof_microsoft);
3503       return LexEndOfFile(Result, CurPtr-1);
3504     }
3505 
3506     // If Microsoft extensions are disabled, this is just random garbage.
3507     Kind = tok::unknown;
3508     break;
3509 
3510   case '\r':
3511     if (CurPtr[0] == '\n')
3512       (void)getAndAdvanceChar(CurPtr, Result);
3513     LLVM_FALLTHROUGH;
3514   case '\n':
3515     // If we are inside a preprocessor directive and we see the end of line,
3516     // we know we are done with the directive, so return an EOD token.
3517     if (ParsingPreprocessorDirective) {
3518       // Done parsing the "line".
3519       ParsingPreprocessorDirective = false;
3520 
3521       // Restore comment saving mode, in case it was disabled for directive.
3522       if (PP)
3523         resetExtendedTokenMode();
3524 
3525       // Since we consumed a newline, we are back at the start of a line.
3526       IsAtStartOfLine = true;
3527       IsAtPhysicalStartOfLine = true;
3528       NewLinePtr = CurPtr - 1;
3529 
3530       Kind = tok::eod;
3531       break;
3532     }
3533 
3534     // No leading whitespace seen so far.
3535     Result.clearFlag(Token::LeadingSpace);
3536 
3537     if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
3538       return true; // KeepWhitespaceMode
3539 
3540     // We only saw whitespace, so just try again with this lexer.
3541     // (We manually eliminate the tail call to avoid recursion.)
3542     goto LexNextToken;
3543   case ' ':
3544   case '\t':
3545   case '\f':
3546   case '\v':
3547   SkipHorizontalWhitespace:
3548     Result.setFlag(Token::LeadingSpace);
3549     if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
3550       return true; // KeepWhitespaceMode
3551 
3552   SkipIgnoredUnits:
3553     CurPtr = BufferPtr;
3554 
3555     // If the next token is obviously a // or /* */ comment, skip it efficiently
3556     // too (without going through the big switch stmt).
3557     if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
3558         LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) {
3559       if (SkipLineComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
3560         return true; // There is a token to return.
3561       goto SkipIgnoredUnits;
3562     } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
3563       if (SkipBlockComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
3564         return true; // There is a token to return.
3565       goto SkipIgnoredUnits;
3566     } else if (isHorizontalWhitespace(*CurPtr)) {
3567       goto SkipHorizontalWhitespace;
3568     }
3569     // We only saw whitespace, so just try again with this lexer.
3570     // (We manually eliminate the tail call to avoid recursion.)
3571     goto LexNextToken;
3572 
3573   // C99 6.4.4.1: Integer Constants.
3574   // C99 6.4.4.2: Floating Constants.
3575   case '0': case '1': case '2': case '3': case '4':
3576   case '5': case '6': case '7': case '8': case '9':
3577     // Notify MIOpt that we read a non-whitespace/non-comment token.
3578     MIOpt.ReadToken();
3579     return LexNumericConstant(Result, CurPtr);
3580 
3581   // Identifier (e.g., uber), or
3582   // UTF-8 (C2x/C++17) or UTF-16 (C11/C++11) character literal, or
3583   // UTF-8 or UTF-16 string literal (C11/C++11).
3584   case 'u':
3585     // Notify MIOpt that we read a non-whitespace/non-comment token.
3586     MIOpt.ReadToken();
3587 
3588     if (LangOpts.CPlusPlus11 || LangOpts.C11) {
3589       Char = getCharAndSize(CurPtr, SizeTmp);
3590 
3591       // UTF-16 string literal
3592       if (Char == '"')
3593         return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3594                                 tok::utf16_string_literal);
3595 
3596       // UTF-16 character constant
3597       if (Char == '\'')
3598         return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3599                                tok::utf16_char_constant);
3600 
3601       // UTF-16 raw string literal
3602       if (Char == 'R' && LangOpts.CPlusPlus11 &&
3603           getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
3604         return LexRawStringLiteral(Result,
3605                                ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3606                                            SizeTmp2, Result),
3607                                tok::utf16_string_literal);
3608 
3609       if (Char == '8') {
3610         char Char2 = getCharAndSize(CurPtr + SizeTmp, SizeTmp2);
3611 
3612         // UTF-8 string literal
3613         if (Char2 == '"')
3614           return LexStringLiteral(Result,
3615                                ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3616                                            SizeTmp2, Result),
3617                                tok::utf8_string_literal);
3618         if (Char2 == '\'' && (LangOpts.CPlusPlus17 || LangOpts.C2x))
3619           return LexCharConstant(
3620               Result, ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3621                                   SizeTmp2, Result),
3622               tok::utf8_char_constant);
3623 
3624         if (Char2 == 'R' && LangOpts.CPlusPlus11) {
3625           unsigned SizeTmp3;
3626           char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
3627           // UTF-8 raw string literal
3628           if (Char3 == '"') {
3629             return LexRawStringLiteral(Result,
3630                    ConsumeChar(ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3631                                            SizeTmp2, Result),
3632                                SizeTmp3, Result),
3633                    tok::utf8_string_literal);
3634           }
3635         }
3636       }
3637     }
3638 
3639     // treat u like the start of an identifier.
3640     return LexIdentifierContinue(Result, CurPtr);
3641 
3642   case 'U': // Identifier (e.g. Uber) or C11/C++11 UTF-32 string literal
3643     // Notify MIOpt that we read a non-whitespace/non-comment token.
3644     MIOpt.ReadToken();
3645 
3646     if (LangOpts.CPlusPlus11 || LangOpts.C11) {
3647       Char = getCharAndSize(CurPtr, SizeTmp);
3648 
3649       // UTF-32 string literal
3650       if (Char == '"')
3651         return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3652                                 tok::utf32_string_literal);
3653 
3654       // UTF-32 character constant
3655       if (Char == '\'')
3656         return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3657                                tok::utf32_char_constant);
3658 
3659       // UTF-32 raw string literal
3660       if (Char == 'R' && LangOpts.CPlusPlus11 &&
3661           getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
3662         return LexRawStringLiteral(Result,
3663                                ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3664                                            SizeTmp2, Result),
3665                                tok::utf32_string_literal);
3666     }
3667 
3668     // treat U like the start of an identifier.
3669     return LexIdentifierContinue(Result, CurPtr);
3670 
3671   case 'R': // Identifier or C++0x raw string literal
3672     // Notify MIOpt that we read a non-whitespace/non-comment token.
3673     MIOpt.ReadToken();
3674 
3675     if (LangOpts.CPlusPlus11) {
3676       Char = getCharAndSize(CurPtr, SizeTmp);
3677 
3678       if (Char == '"')
3679         return LexRawStringLiteral(Result,
3680                                    ConsumeChar(CurPtr, SizeTmp, Result),
3681                                    tok::string_literal);
3682     }
3683 
3684     // treat R like the start of an identifier.
3685     return LexIdentifierContinue(Result, CurPtr);
3686 
3687   case 'L':   // Identifier (Loony) or wide literal (L'x' or L"xyz").
3688     // Notify MIOpt that we read a non-whitespace/non-comment token.
3689     MIOpt.ReadToken();
3690     Char = getCharAndSize(CurPtr, SizeTmp);
3691 
3692     // Wide string literal.
3693     if (Char == '"')
3694       return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3695                               tok::wide_string_literal);
3696 
3697     // Wide raw string literal.
3698     if (LangOpts.CPlusPlus11 && Char == 'R' &&
3699         getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
3700       return LexRawStringLiteral(Result,
3701                                ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3702                                            SizeTmp2, Result),
3703                                tok::wide_string_literal);
3704 
3705     // Wide character constant.
3706     if (Char == '\'')
3707       return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3708                              tok::wide_char_constant);
3709     // FALL THROUGH, treating L like the start of an identifier.
3710     LLVM_FALLTHROUGH;
3711 
3712   // C99 6.4.2: Identifiers.
3713   case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
3714   case 'H': case 'I': case 'J': case 'K':    /*'L'*/case 'M': case 'N':
3715   case 'O': case 'P': case 'Q':    /*'R'*/case 'S': case 'T':    /*'U'*/
3716   case 'V': case 'W': case 'X': case 'Y': case 'Z':
3717   case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
3718   case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
3719   case 'o': case 'p': case 'q': case 'r': case 's': case 't':    /*'u'*/
3720   case 'v': case 'w': case 'x': case 'y': case 'z':
3721   case '_':
3722     // Notify MIOpt that we read a non-whitespace/non-comment token.
3723     MIOpt.ReadToken();
3724     return LexIdentifierContinue(Result, CurPtr);
3725 
3726   case '$':   // $ in identifiers.
3727     if (LangOpts.DollarIdents) {
3728       if (!isLexingRawMode())
3729         Diag(CurPtr-1, diag::ext_dollar_in_identifier);
3730       // Notify MIOpt that we read a non-whitespace/non-comment token.
3731       MIOpt.ReadToken();
3732       return LexIdentifierContinue(Result, CurPtr);
3733     }
3734 
3735     Kind = tok::unknown;
3736     break;
3737 
3738   // C99 6.4.4: Character Constants.
3739   case '\'':
3740     // Notify MIOpt that we read a non-whitespace/non-comment token.
3741     MIOpt.ReadToken();
3742     return LexCharConstant(Result, CurPtr, tok::char_constant);
3743 
3744   // C99 6.4.5: String Literals.
3745   case '"':
3746     // Notify MIOpt that we read a non-whitespace/non-comment token.
3747     MIOpt.ReadToken();
3748     return LexStringLiteral(Result, CurPtr,
3749                             ParsingFilename ? tok::header_name
3750                                             : tok::string_literal);
3751 
3752   // C99 6.4.6: Punctuators.
3753   case '?':
3754     Kind = tok::question;
3755     break;
3756   case '[':
3757     Kind = tok::l_square;
3758     break;
3759   case ']':
3760     Kind = tok::r_square;
3761     break;
3762   case '(':
3763     Kind = tok::l_paren;
3764     break;
3765   case ')':
3766     Kind = tok::r_paren;
3767     break;
3768   case '{':
3769     Kind = tok::l_brace;
3770     break;
3771   case '}':
3772     Kind = tok::r_brace;
3773     break;
3774   case '.':
3775     Char = getCharAndSize(CurPtr, SizeTmp);
3776     if (Char >= '0' && Char <= '9') {
3777       // Notify MIOpt that we read a non-whitespace/non-comment token.
3778       MIOpt.ReadToken();
3779 
3780       return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
3781     } else if (LangOpts.CPlusPlus && Char == '*') {
3782       Kind = tok::periodstar;
3783       CurPtr += SizeTmp;
3784     } else if (Char == '.' &&
3785                getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
3786       Kind = tok::ellipsis;
3787       CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3788                            SizeTmp2, Result);
3789     } else {
3790       Kind = tok::period;
3791     }
3792     break;
3793   case '&':
3794     Char = getCharAndSize(CurPtr, SizeTmp);
3795     if (Char == '&') {
3796       Kind = tok::ampamp;
3797       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3798     } else if (Char == '=') {
3799       Kind = tok::ampequal;
3800       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3801     } else {
3802       Kind = tok::amp;
3803     }
3804     break;
3805   case '*':
3806     if (getCharAndSize(CurPtr, SizeTmp) == '=') {
3807       Kind = tok::starequal;
3808       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3809     } else {
3810       Kind = tok::star;
3811     }
3812     break;
3813   case '+':
3814     Char = getCharAndSize(CurPtr, SizeTmp);
3815     if (Char == '+') {
3816       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3817       Kind = tok::plusplus;
3818     } else if (Char == '=') {
3819       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3820       Kind = tok::plusequal;
3821     } else {
3822       Kind = tok::plus;
3823     }
3824     break;
3825   case '-':
3826     Char = getCharAndSize(CurPtr, SizeTmp);
3827     if (Char == '-') {      // --
3828       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3829       Kind = tok::minusminus;
3830     } else if (Char == '>' && LangOpts.CPlusPlus &&
3831                getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') {  // C++ ->*
3832       CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3833                            SizeTmp2, Result);
3834       Kind = tok::arrowstar;
3835     } else if (Char == '>') {   // ->
3836       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3837       Kind = tok::arrow;
3838     } else if (Char == '=') {   // -=
3839       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3840       Kind = tok::minusequal;
3841     } else {
3842       Kind = tok::minus;
3843     }
3844     break;
3845   case '~':
3846     Kind = tok::tilde;
3847     break;
3848   case '!':
3849     if (getCharAndSize(CurPtr, SizeTmp) == '=') {
3850       Kind = tok::exclaimequal;
3851       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3852     } else {
3853       Kind = tok::exclaim;
3854     }
3855     break;
3856   case '/':
3857     // 6.4.9: Comments
3858     Char = getCharAndSize(CurPtr, SizeTmp);
3859     if (Char == '/') {         // Line comment.
3860       // Even if Line comments are disabled (e.g. in C89 mode), we generally
3861       // want to lex this as a comment.  There is one problem with this though,
3862       // that in one particular corner case, this can change the behavior of the
3863       // resultant program.  For example, In  "foo //**/ bar", C89 would lex
3864       // this as "foo / bar" and languages with Line comments would lex it as
3865       // "foo".  Check to see if the character after the second slash is a '*'.
3866       // If so, we will lex that as a "/" instead of the start of a comment.
3867       // However, we never do this if we are just preprocessing.
3868       bool TreatAsComment =
3869           LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP);
3870       if (!TreatAsComment)
3871         if (!(PP && PP->isPreprocessedOutput()))
3872           TreatAsComment = getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*';
3873 
3874       if (TreatAsComment) {
3875         if (SkipLineComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3876                             TokAtPhysicalStartOfLine))
3877           return true; // There is a token to return.
3878 
3879         // It is common for the tokens immediately after a // comment to be
3880         // whitespace (indentation for the next line).  Instead of going through
3881         // the big switch, handle it efficiently now.
3882         goto SkipIgnoredUnits;
3883       }
3884     }
3885 
3886     if (Char == '*') {  // /**/ comment.
3887       if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3888                            TokAtPhysicalStartOfLine))
3889         return true; // There is a token to return.
3890 
3891       // We only saw whitespace, so just try again with this lexer.
3892       // (We manually eliminate the tail call to avoid recursion.)
3893       goto LexNextToken;
3894     }
3895 
3896     if (Char == '=') {
3897       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3898       Kind = tok::slashequal;
3899     } else {
3900       Kind = tok::slash;
3901     }
3902     break;
3903   case '%':
3904     Char = getCharAndSize(CurPtr, SizeTmp);
3905     if (Char == '=') {
3906       Kind = tok::percentequal;
3907       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3908     } else if (LangOpts.Digraphs && Char == '>') {
3909       Kind = tok::r_brace;                             // '%>' -> '}'
3910       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3911     } else if (LangOpts.Digraphs && Char == ':') {
3912       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3913       Char = getCharAndSize(CurPtr, SizeTmp);
3914       if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
3915         Kind = tok::hashhash;                          // '%:%:' -> '##'
3916         CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3917                              SizeTmp2, Result);
3918       } else if (Char == '@' && LangOpts.MicrosoftExt) {// %:@ -> #@ -> Charize
3919         CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3920         if (!isLexingRawMode())
3921           Diag(BufferPtr, diag::ext_charize_microsoft);
3922         Kind = tok::hashat;
3923       } else {                                         // '%:' -> '#'
3924         // We parsed a # character.  If this occurs at the start of the line,
3925         // it's actually the start of a preprocessing directive.  Callback to
3926         // the preprocessor to handle it.
3927         // TODO: -fpreprocessed mode??
3928         if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
3929           goto HandleDirective;
3930 
3931         Kind = tok::hash;
3932       }
3933     } else {
3934       Kind = tok::percent;
3935     }
3936     break;
3937   case '<':
3938     Char = getCharAndSize(CurPtr, SizeTmp);
3939     if (ParsingFilename) {
3940       return LexAngledStringLiteral(Result, CurPtr);
3941     } else if (Char == '<') {
3942       char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
3943       if (After == '=') {
3944         Kind = tok::lesslessequal;
3945         CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3946                              SizeTmp2, Result);
3947       } else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) {
3948         // If this is actually a '<<<<<<<' version control conflict marker,
3949         // recognize it as such and recover nicely.
3950         goto LexNextToken;
3951       } else if (After == '<' && HandleEndOfConflictMarker(CurPtr-1)) {
3952         // If this is '<<<<' and we're in a Perforce-style conflict marker,
3953         // ignore it.
3954         goto LexNextToken;
3955       } else if (LangOpts.CUDA && After == '<') {
3956         Kind = tok::lesslessless;
3957         CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3958                              SizeTmp2, Result);
3959       } else {
3960         CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3961         Kind = tok::lessless;
3962       }
3963     } else if (Char == '=') {
3964       char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
3965       if (After == '>') {
3966         if (LangOpts.CPlusPlus20) {
3967           if (!isLexingRawMode())
3968             Diag(BufferPtr, diag::warn_cxx17_compat_spaceship);
3969           CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3970                                SizeTmp2, Result);
3971           Kind = tok::spaceship;
3972           break;
3973         }
3974         // Suggest adding a space between the '<=' and the '>' to avoid a
3975         // change in semantics if this turns up in C++ <=17 mode.
3976         if (LangOpts.CPlusPlus && !isLexingRawMode()) {
3977           Diag(BufferPtr, diag::warn_cxx20_compat_spaceship)
3978             << FixItHint::CreateInsertion(
3979                    getSourceLocation(CurPtr + SizeTmp, SizeTmp2), " ");
3980         }
3981       }
3982       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3983       Kind = tok::lessequal;
3984     } else if (LangOpts.Digraphs && Char == ':') {     // '<:' -> '['
3985       if (LangOpts.CPlusPlus11 &&
3986           getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') {
3987         // C++0x [lex.pptoken]p3:
3988         //  Otherwise, if the next three characters are <:: and the subsequent
3989         //  character is neither : nor >, the < is treated as a preprocessor
3990         //  token by itself and not as the first character of the alternative
3991         //  token <:.
3992         unsigned SizeTmp3;
3993         char After = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
3994         if (After != ':' && After != '>') {
3995           Kind = tok::less;
3996           if (!isLexingRawMode())
3997             Diag(BufferPtr, diag::warn_cxx98_compat_less_colon_colon);
3998           break;
3999         }
4000       }
4001 
4002       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4003       Kind = tok::l_square;
4004     } else if (LangOpts.Digraphs && Char == '%') {     // '<%' -> '{'
4005       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4006       Kind = tok::l_brace;
4007     } else if (Char == '#' && /*Not a trigraph*/ SizeTmp == 1 &&
4008                lexEditorPlaceholder(Result, CurPtr)) {
4009       return true;
4010     } else {
4011       Kind = tok::less;
4012     }
4013     break;
4014   case '>':
4015     Char = getCharAndSize(CurPtr, SizeTmp);
4016     if (Char == '=') {
4017       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4018       Kind = tok::greaterequal;
4019     } else if (Char == '>') {
4020       char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
4021       if (After == '=') {
4022         CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
4023                              SizeTmp2, Result);
4024         Kind = tok::greatergreaterequal;
4025       } else if (After == '>' && IsStartOfConflictMarker(CurPtr-1)) {
4026         // If this is actually a '>>>>' conflict marker, recognize it as such
4027         // and recover nicely.
4028         goto LexNextToken;
4029       } else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) {
4030         // If this is '>>>>>>>' and we're in a conflict marker, ignore it.
4031         goto LexNextToken;
4032       } else if (LangOpts.CUDA && After == '>') {
4033         Kind = tok::greatergreatergreater;
4034         CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
4035                              SizeTmp2, Result);
4036       } else {
4037         CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4038         Kind = tok::greatergreater;
4039       }
4040     } else {
4041       Kind = tok::greater;
4042     }
4043     break;
4044   case '^':
4045     Char = getCharAndSize(CurPtr, SizeTmp);
4046     if (Char == '=') {
4047       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4048       Kind = tok::caretequal;
4049     } else if (LangOpts.OpenCL && Char == '^') {
4050       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4051       Kind = tok::caretcaret;
4052     } else {
4053       Kind = tok::caret;
4054     }
4055     break;
4056   case '|':
4057     Char = getCharAndSize(CurPtr, SizeTmp);
4058     if (Char == '=') {
4059       Kind = tok::pipeequal;
4060       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4061     } else if (Char == '|') {
4062       // If this is '|||||||' and we're in a conflict marker, ignore it.
4063       if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1))
4064         goto LexNextToken;
4065       Kind = tok::pipepipe;
4066       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4067     } else {
4068       Kind = tok::pipe;
4069     }
4070     break;
4071   case ':':
4072     Char = getCharAndSize(CurPtr, SizeTmp);
4073     if (LangOpts.Digraphs && Char == '>') {
4074       Kind = tok::r_square; // ':>' -> ']'
4075       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4076     } else if ((LangOpts.CPlusPlus ||
4077                 LangOpts.DoubleSquareBracketAttributes) &&
4078                Char == ':') {
4079       Kind = tok::coloncolon;
4080       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4081     } else {
4082       Kind = tok::colon;
4083     }
4084     break;
4085   case ';':
4086     Kind = tok::semi;
4087     break;
4088   case '=':
4089     Char = getCharAndSize(CurPtr, SizeTmp);
4090     if (Char == '=') {
4091       // If this is '====' and we're in a conflict marker, ignore it.
4092       if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1))
4093         goto LexNextToken;
4094 
4095       Kind = tok::equalequal;
4096       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4097     } else {
4098       Kind = tok::equal;
4099     }
4100     break;
4101   case ',':
4102     Kind = tok::comma;
4103     break;
4104   case '#':
4105     Char = getCharAndSize(CurPtr, SizeTmp);
4106     if (Char == '#') {
4107       Kind = tok::hashhash;
4108       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4109     } else if (Char == '@' && LangOpts.MicrosoftExt) {  // #@ -> Charize
4110       Kind = tok::hashat;
4111       if (!isLexingRawMode())
4112         Diag(BufferPtr, diag::ext_charize_microsoft);
4113       CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
4114     } else {
4115       // We parsed a # character.  If this occurs at the start of the line,
4116       // it's actually the start of a preprocessing directive.  Callback to
4117       // the preprocessor to handle it.
4118       // TODO: -fpreprocessed mode??
4119       if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
4120         goto HandleDirective;
4121 
4122       Kind = tok::hash;
4123     }
4124     break;
4125 
4126   case '@':
4127     // Objective C support.
4128     if (CurPtr[-1] == '@' && LangOpts.ObjC)
4129       Kind = tok::at;
4130     else
4131       Kind = tok::unknown;
4132     break;
4133 
4134   // UCNs (C99 6.4.3, C++11 [lex.charset]p2)
4135   case '\\':
4136     if (!LangOpts.AsmPreprocessor) {
4137       if (uint32_t CodePoint = tryReadUCN(CurPtr, BufferPtr, &Result)) {
4138         if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
4139           if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
4140             return true; // KeepWhitespaceMode
4141 
4142           // We only saw whitespace, so just try again with this lexer.
4143           // (We manually eliminate the tail call to avoid recursion.)
4144           goto LexNextToken;
4145         }
4146 
4147         return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr);
4148       }
4149     }
4150 
4151     Kind = tok::unknown;
4152     break;
4153 
4154   default: {
4155     if (isASCII(Char)) {
4156       Kind = tok::unknown;
4157       break;
4158     }
4159 
4160     llvm::UTF32 CodePoint;
4161 
4162     // We can't just reset CurPtr to BufferPtr because BufferPtr may point to
4163     // an escaped newline.
4164     --CurPtr;
4165     llvm::ConversionResult Status =
4166         llvm::convertUTF8Sequence((const llvm::UTF8 **)&CurPtr,
4167                                   (const llvm::UTF8 *)BufferEnd,
4168                                   &CodePoint,
4169                                   llvm::strictConversion);
4170     if (Status == llvm::conversionOK) {
4171       if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
4172         if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
4173           return true; // KeepWhitespaceMode
4174 
4175         // We only saw whitespace, so just try again with this lexer.
4176         // (We manually eliminate the tail call to avoid recursion.)
4177         goto LexNextToken;
4178       }
4179       return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr);
4180     }
4181 
4182     if (isLexingRawMode() || ParsingPreprocessorDirective ||
4183         PP->isPreprocessedOutput()) {
4184       ++CurPtr;
4185       Kind = tok::unknown;
4186       break;
4187     }
4188 
4189     // Non-ASCII characters tend to creep into source code unintentionally.
4190     // Instead of letting the parser complain about the unknown token,
4191     // just diagnose the invalid UTF-8, then drop the character.
4192     Diag(CurPtr, diag::err_invalid_utf8);
4193 
4194     BufferPtr = CurPtr+1;
4195     // We're pretending the character didn't exist, so just try again with
4196     // this lexer.
4197     // (We manually eliminate the tail call to avoid recursion.)
4198     goto LexNextToken;
4199   }
4200   }
4201 
4202   // Notify MIOpt that we read a non-whitespace/non-comment token.
4203   MIOpt.ReadToken();
4204 
4205   // Update the location of token as well as BufferPtr.
4206   FormTokenWithChars(Result, CurPtr, Kind);
4207   return true;
4208 
4209 HandleDirective:
4210   // We parsed a # character and it's the start of a preprocessing directive.
4211 
4212   FormTokenWithChars(Result, CurPtr, tok::hash);
4213   PP->HandleDirective(Result);
4214 
4215   if (PP->hadModuleLoaderFatalFailure()) {
4216     // With a fatal failure in the module loader, we abort parsing.
4217     assert(Result.is(tok::eof) && "Preprocessor did not set tok:eof");
4218     return true;
4219   }
4220 
4221   // We parsed the directive; lex a token with the new state.
4222   return false;
4223 }
4224 
4225 const char *Lexer::convertDependencyDirectiveToken(
4226     const dependency_directives_scan::Token &DDTok, Token &Result) {
4227   const char *TokPtr = BufferStart + DDTok.Offset;
4228   Result.startToken();
4229   Result.setLocation(getSourceLocation(TokPtr));
4230   Result.setKind(DDTok.Kind);
4231   Result.setFlag((Token::TokenFlags)DDTok.Flags);
4232   Result.setLength(DDTok.Length);
4233   BufferPtr = TokPtr + DDTok.Length;
4234   return TokPtr;
4235 }
4236 
4237 bool Lexer::LexDependencyDirectiveToken(Token &Result) {
4238   assert(isDependencyDirectivesLexer());
4239 
4240   using namespace dependency_directives_scan;
4241 
4242   while (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size()) {
4243     if (DepDirectives.front().Kind == pp_eof)
4244       return LexEndOfFile(Result, BufferEnd);
4245     NextDepDirectiveTokenIndex = 0;
4246     DepDirectives = DepDirectives.drop_front();
4247   }
4248 
4249   const dependency_directives_scan::Token &DDTok =
4250       DepDirectives.front().Tokens[NextDepDirectiveTokenIndex++];
4251   if (NextDepDirectiveTokenIndex > 1 || DDTok.Kind != tok::hash) {
4252     // Read something other than a preprocessor directive hash.
4253     MIOpt.ReadToken();
4254   }
4255 
4256   const char *TokPtr = convertDependencyDirectiveToken(DDTok, Result);
4257 
4258   if (Result.is(tok::hash) && Result.isAtStartOfLine()) {
4259     PP->HandleDirective(Result);
4260     return false;
4261   }
4262   if (Result.is(tok::raw_identifier)) {
4263     Result.setRawIdentifierData(TokPtr);
4264     if (!isLexingRawMode()) {
4265       IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
4266       if (II->isHandleIdentifierCase())
4267         return PP->HandleIdentifier(Result);
4268     }
4269     return true;
4270   }
4271   if (Result.isLiteral()) {
4272     Result.setLiteralData(TokPtr);
4273     return true;
4274   }
4275   if (Result.is(tok::colon) &&
4276       (LangOpts.CPlusPlus || LangOpts.DoubleSquareBracketAttributes)) {
4277     // Convert consecutive colons to 'tok::coloncolon'.
4278     if (*BufferPtr == ':') {
4279       assert(DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is(
4280           tok::colon));
4281       ++NextDepDirectiveTokenIndex;
4282       Result.setKind(tok::coloncolon);
4283     }
4284     return true;
4285   }
4286   if (Result.is(tok::eod))
4287     ParsingPreprocessorDirective = false;
4288 
4289   return true;
4290 }
4291 
4292 bool Lexer::LexDependencyDirectiveTokenWhileSkipping(Token &Result) {
4293   assert(isDependencyDirectivesLexer());
4294 
4295   using namespace dependency_directives_scan;
4296 
4297   bool Stop = false;
4298   unsigned NestedIfs = 0;
4299   do {
4300     DepDirectives = DepDirectives.drop_front();
4301     switch (DepDirectives.front().Kind) {
4302     case pp_none:
4303       llvm_unreachable("unexpected 'pp_none'");
4304     case pp_include:
4305     case pp___include_macros:
4306     case pp_define:
4307     case pp_undef:
4308     case pp_import:
4309     case pp_pragma_import:
4310     case pp_pragma_once:
4311     case pp_pragma_push_macro:
4312     case pp_pragma_pop_macro:
4313     case pp_pragma_include_alias:
4314     case pp_include_next:
4315     case decl_at_import:
4316     case cxx_module_decl:
4317     case cxx_import_decl:
4318     case cxx_export_module_decl:
4319     case cxx_export_import_decl:
4320       break;
4321     case pp_if:
4322     case pp_ifdef:
4323     case pp_ifndef:
4324       ++NestedIfs;
4325       break;
4326     case pp_elif:
4327     case pp_elifdef:
4328     case pp_elifndef:
4329     case pp_else:
4330       if (!NestedIfs) {
4331         Stop = true;
4332       }
4333       break;
4334     case pp_endif:
4335       if (!NestedIfs) {
4336         Stop = true;
4337       } else {
4338         --NestedIfs;
4339       }
4340       break;
4341     case pp_eof:
4342       NextDepDirectiveTokenIndex = 0;
4343       return LexEndOfFile(Result, BufferEnd);
4344     }
4345   } while (!Stop);
4346 
4347   const dependency_directives_scan::Token &DDTok =
4348       DepDirectives.front().Tokens.front();
4349   assert(DDTok.is(tok::hash));
4350   NextDepDirectiveTokenIndex = 1;
4351 
4352   convertDependencyDirectiveToken(DDTok, Result);
4353   return false;
4354 }
4355