1# -*- coding: utf-8 -*-
2"""
3    Basic CLexer Test
4    ~~~~~~~~~~~~~~~~~
5
6    :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
7    :license: BSD, see LICENSE for details.
8"""
9
10import textwrap
11
12import pytest
13
14from pygments.token import Text, Number, Token
15from pygments.lexers import CLexer
16
17
18@pytest.fixture(scope='module')
19def lexer():
20    yield CLexer()
21
22
23def test_numbers(lexer):
24    code = '42 23.42 23. .42 023 0xdeadbeef 23e+42 42e-23'
25    wanted = []
26    for item in zip([Number.Integer, Number.Float, Number.Float,
27                     Number.Float, Number.Oct, Number.Hex,
28                     Number.Float, Number.Float], code.split()):
29        wanted.append(item)
30        wanted.append((Text, ' '))
31    wanted = wanted[:-1] + [(Text, '\n')]
32    assert list(lexer.get_tokens(code)) == wanted
33
34
35def test_switch(lexer):
36    fragment = '''\
37    int main()
38    {
39        switch (0)
40        {
41            case 0:
42            default:
43                ;
44        }
45    }
46    '''
47    tokens = [
48        (Token.Keyword.Type, 'int'),
49        (Token.Text, ' '),
50        (Token.Name.Function, 'main'),
51        (Token.Punctuation, '('),
52        (Token.Punctuation, ')'),
53        (Token.Text, '\n'),
54        (Token.Punctuation, '{'),
55        (Token.Text, '\n'),
56        (Token.Text, '    '),
57        (Token.Keyword, 'switch'),
58        (Token.Text, ' '),
59        (Token.Punctuation, '('),
60        (Token.Literal.Number.Integer, '0'),
61        (Token.Punctuation, ')'),
62        (Token.Text, '\n'),
63        (Token.Text, '    '),
64        (Token.Punctuation, '{'),
65        (Token.Text, '\n'),
66        (Token.Text, '        '),
67        (Token.Keyword, 'case'),
68        (Token.Text, ' '),
69        (Token.Literal.Number.Integer, '0'),
70        (Token.Operator, ':'),
71        (Token.Text, '\n'),
72        (Token.Text, '        '),
73        (Token.Keyword, 'default'),
74        (Token.Operator, ':'),
75        (Token.Text, '\n'),
76        (Token.Text, '            '),
77        (Token.Punctuation, ';'),
78        (Token.Text, '\n'),
79        (Token.Text, '    '),
80        (Token.Punctuation, '}'),
81        (Token.Text, '\n'),
82        (Token.Punctuation, '}'),
83        (Token.Text, '\n'),
84    ]
85    assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
86
87
88def test_switch_space_before_colon(lexer):
89    fragment = '''\
90    int main()
91    {
92        switch (0)
93        {
94            case 0 :
95            default :
96                ;
97        }
98    }
99    '''
100    tokens = [
101        (Token.Keyword.Type, 'int'),
102        (Token.Text, ' '),
103        (Token.Name.Function, 'main'),
104        (Token.Punctuation, '('),
105        (Token.Punctuation, ')'),
106        (Token.Text, '\n'),
107        (Token.Punctuation, '{'),
108        (Token.Text, '\n'),
109        (Token.Text, '    '),
110        (Token.Keyword, 'switch'),
111        (Token.Text, ' '),
112        (Token.Punctuation, '('),
113        (Token.Literal.Number.Integer, '0'),
114        (Token.Punctuation, ')'),
115        (Token.Text, '\n'),
116        (Token.Text, '    '),
117        (Token.Punctuation, '{'),
118        (Token.Text, '\n'),
119        (Token.Text, '        '),
120        (Token.Keyword, 'case'),
121        (Token.Text, ' '),
122        (Token.Literal.Number.Integer, '0'),
123        (Token.Text, ' '),
124        (Token.Operator, ':'),
125        (Token.Text, '\n'),
126        (Token.Text, '        '),
127        (Token.Keyword, 'default'),
128        (Token.Text, ' '),
129        (Token.Operator, ':'),
130        (Token.Text, '\n'),
131        (Token.Text, '            '),
132        (Token.Punctuation, ';'),
133        (Token.Text, '\n'),
134        (Token.Text, '    '),
135        (Token.Punctuation, '}'),
136        (Token.Text, '\n'),
137        (Token.Punctuation, '}'),
138        (Token.Text, '\n'),
139    ]
140    assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
141
142
143def test_label(lexer):
144    fragment = '''\
145    int main()
146    {
147    foo:
148      goto foo;
149    }
150    '''
151    tokens = [
152        (Token.Keyword.Type, 'int'),
153        (Token.Text, ' '),
154        (Token.Name.Function, 'main'),
155        (Token.Punctuation, '('),
156        (Token.Punctuation, ')'),
157        (Token.Text, '\n'),
158        (Token.Punctuation, '{'),
159        (Token.Text, '\n'),
160        (Token.Name.Label, 'foo'),
161        (Token.Punctuation, ':'),
162        (Token.Text, '\n'),
163        (Token.Text, '  '),
164        (Token.Keyword, 'goto'),
165        (Token.Text, ' '),
166        (Token.Name, 'foo'),
167        (Token.Punctuation, ';'),
168        (Token.Text, '\n'),
169        (Token.Punctuation, '}'),
170        (Token.Text, '\n'),
171    ]
172    assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
173
174
175def test_label_space_before_colon(lexer):
176    fragment = '''\
177    int main()
178    {
179    foo :
180      goto foo;
181    }
182    '''
183    tokens = [
184        (Token.Keyword.Type, 'int'),
185        (Token.Text, ' '),
186        (Token.Name.Function, 'main'),
187        (Token.Punctuation, '('),
188        (Token.Punctuation, ')'),
189        (Token.Text, '\n'),
190        (Token.Punctuation, '{'),
191        (Token.Text, '\n'),
192        (Token.Name.Label, 'foo'),
193        (Token.Text, ' '),
194        (Token.Punctuation, ':'),
195        (Token.Text, '\n'),
196        (Token.Text, '  '),
197        (Token.Keyword, 'goto'),
198        (Token.Text, ' '),
199        (Token.Name, 'foo'),
200        (Token.Punctuation, ';'),
201        (Token.Text, '\n'),
202        (Token.Punctuation, '}'),
203        (Token.Text, '\n'),
204    ]
205    assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
206
207
208def test_label_followed_by_statement(lexer):
209    fragment = '''\
210    int main()
211    {
212    foo:return 0;
213      goto foo;
214    }
215    '''
216    tokens = [
217        (Token.Keyword.Type, 'int'),
218        (Token.Text, ' '),
219        (Token.Name.Function, 'main'),
220        (Token.Punctuation, '('),
221        (Token.Punctuation, ')'),
222        (Token.Text, '\n'),
223        (Token.Punctuation, '{'),
224        (Token.Text, '\n'),
225        (Token.Name.Label, 'foo'),
226        (Token.Punctuation, ':'),
227        (Token.Keyword, 'return'),
228        (Token.Text, ' '),
229        (Token.Literal.Number.Integer, '0'),
230        (Token.Punctuation, ';'),
231        (Token.Text, '\n'),
232        (Token.Text, '  '),
233        (Token.Keyword, 'goto'),
234        (Token.Text, ' '),
235        (Token.Name, 'foo'),
236        (Token.Punctuation, ';'),
237        (Token.Text, '\n'),
238        (Token.Punctuation, '}'),
239        (Token.Text, '\n'),
240    ]
241    assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
242
243
244def test_preproc_file(lexer):
245    fragment = '#include <foo>\n'
246    tokens = [
247        (Token.Comment.Preproc, '#'),
248        (Token.Comment.Preproc, 'include'),
249        (Token.Text, ' '),
250        (Token.Comment.PreprocFile, '<foo>'),
251        (Token.Comment.Preproc, '\n'),
252    ]
253    assert list(lexer.get_tokens(fragment)) == tokens
254
255
256def test_preproc_file2(lexer):
257    fragment = '#include "foo.h"\n'
258    tokens = [
259        (Token.Comment.Preproc, '#'),
260        (Token.Comment.Preproc, 'include'),
261        (Token.Text, ' '),
262        (Token.Comment.PreprocFile, '"foo.h"'),
263        (Token.Comment.Preproc, '\n'),
264    ]
265    assert list(lexer.get_tokens(fragment)) == tokens
266