Home
last modified time | relevance | path

Searched refs:tokenizers (Results 1 – 25 of 254) sorted by relevance

1234567891011

/dports/converters/wkhtmltopdf/qt-5db36ec/tests/auto/qtokenautomaton/
H A Dqtokenautomaton.pro4 tokenizers/basic/basic.cpp \
5 tokenizers/basicNamespace/basicNamespace.cpp \
6 tokenizers/boilerplate/boilerplate.cpp \
7 tokenizers/noNamespace/noNamespace.cpp \
8 tokenizers/noToString/noToString.cpp \
9 tokenizers/withNamespace/withNamespace.cpp
11 HEADERS += tokenizers/basic/basic.h \
12 tokenizers/basicNamespace/basicNamespace.h \
13 tokenizers/boilerplate/boilerplate.h \
14 tokenizers/noNamespace/noNamespace.h \
[all …]
H A DgenerateTokenizers.sh43 tokenizers=`find tokenizers/* -type d`
44 for tokenizer in $tokenizers; do
/dports/math/vtk9/VTK-9.1.0/Infovis/BoostGraphAlgorithms/
H A DvtkBoostSplitTableField.cxx39 static void GenerateRows(const tokenizers_t& tokenizers, const unsigned int column_index, in GenerateRows() argument
42 if (column_index == tokenizers.size()) in GenerateRows()
48 tokenizer_t* const tokenizer = tokenizers[column_index]; in GenerateRows()
58 GenerateRows(tokenizers, column_index + 1, input_row, output_row, output_table); in GenerateRows()
64 GenerateRows(tokenizers, column_index + 1, input_row, output_row, output_table); in GenerateRows()
140 implementation::tokenizers_t tokenizers; in RequestData() local
143 tokenizers.push_back(static_cast<implementation::tokenizer_t*>(nullptr)); in RequestData()
149 tokenizers[column] = new implementation::tokenizer_t( in RequestData()
163 implementation::GenerateRows(tokenizers, 0, input_row, output_row, output); in RequestData()
172 for (implementation::tokenizers_t::iterator tokenizer = tokenizers.begin(); in RequestData()
[all …]
/dports/math/vtk8/VTK-8.2.0/Infovis/BoostGraphAlgorithms/
H A DvtkBoostSplitTableField.cxx39 …static void GenerateRows(const tokenizers_t& tokenizers, const unsigned int column_index, vtkVaria… in GenerateRows() argument
41 if(column_index == tokenizers.size()) in GenerateRows()
47 tokenizer_t* const tokenizer = tokenizers[column_index]; in GenerateRows()
57 GenerateRows(tokenizers, column_index + 1, input_row, output_row, output_table); in GenerateRows()
63 GenerateRows(tokenizers, column_index + 1, input_row, output_row, output_table); in GenerateRows()
141 implementation::tokenizers_t tokenizers; in RequestData() local
144 tokenizers.push_back(static_cast<implementation::tokenizer_t*>(0)); in RequestData()
150tokenizers[column] = new implementation::tokenizer_t(std::string(), implementation::delimiter_t(th… in RequestData()
163 implementation::GenerateRows(tokenizers, 0, input_row, output_row, output); in RequestData()
172 …for(implementation::tokenizers_t::iterator tokenizer = tokenizers.begin(); tokenizer != tokenizers in RequestData()
/dports/math/vtk6/VTK-6.2.0/Infovis/BoostGraphAlgorithms/
H A DvtkBoostSplitTableField.cxx39 …static void GenerateRows(const tokenizers_t& tokenizers, const unsigned int column_index, vtkVaria… in GenerateRows() argument
41 if(column_index == tokenizers.size()) in GenerateRows()
47 tokenizer_t* const tokenizer = tokenizers[column_index]; in GenerateRows()
57 GenerateRows(tokenizers, column_index + 1, input_row, output_row, output_table); in GenerateRows()
63 GenerateRows(tokenizers, column_index + 1, input_row, output_row, output_table); in GenerateRows()
141 implementation::tokenizers_t tokenizers; in RequestData() local
144 tokenizers.push_back(static_cast<implementation::tokenizer_t*>(0)); in RequestData()
150tokenizers[column] = new implementation::tokenizer_t(std::string(), implementation::delimiter_t(th… in RequestData()
163 implementation::GenerateRows(tokenizers, 0, input_row, output_row, output); in RequestData()
172 …for(implementation::tokenizers_t::iterator tokenizer = tokenizers.begin(); tokenizer != tokenizers in RequestData()
/dports/www/zola/zola-0.15.2/components/search/src/
H A Dlib.rs67 let mut tokenizers: Vec<TokenizerFn> = vec![]; in build_tokenizers() localVariable
69 tokenizers.push(text_tokenizer); in build_tokenizers()
73 tokenizers.push(text_tokenizer); in build_tokenizers()
77 tokenizers.push(path_tokenizer); in build_tokenizers()
81 tokenizers.push(text_tokenizer); in build_tokenizers()
84 tokenizers in build_tokenizers()
139 let tokenizers = build_tokenizers(&language_options.search, language); in build_index() localVariable
148 tokenizers.clone(), in build_index()
161 tokenizers: Vec<TokenizerFn>, in add_section_to_index()
178 tokenizers.clone(), in add_section_to_index()
[all …]
/dports/www/gitea/gitea-1.16.5/vendor/github.com/blevesearch/bleve/v2/registry/
H A Dtokenizer.go24 _, exists := tokenizers[name]
28 tokenizers[name] = constructor
45 cons, registered := tokenizers[name]
80 for name, cons := range tokenizers {
/dports/security/keybase/client-v5.7.1/go/vendor/github.com/blevesearch/bleve/registry/
H A Dtokenizer.go24 _, exists := tokenizers[name]
28 tokenizers[name] = constructor
45 cons, registered := tokenizers[name]
80 for name, cons := range tokenizers {
/dports/www/mattermost-server/mattermost-server-6.0.2/vendor/github.com/blevesearch/bleve/registry/
H A Dtokenizer.go24 _, exists := tokenizers[name]
28 tokenizers[name] = constructor
45 cons, registered := tokenizers[name]
80 for name, cons := range tokenizers {
/dports/audio/pms-devel/pms-c94e3c6/vendor/github.com/blevesearch/bleve/registry/
H A Dtokenizer.go24 _, exists := tokenizers[name]
28 tokenizers[name] = constructor
45 cons, registered := tokenizers[name]
80 for name, cons := range tokenizers {
/dports/textproc/py-whoosh/Whoosh-2.7.4/src/whoosh/analysis/
H A Danalyzers.py29 from whoosh.analysis.tokenizers import Tokenizer
34 from whoosh.analysis.tokenizers import default_pattern
35 from whoosh.analysis.tokenizers import CommaSeparatedTokenizer
36 from whoosh.analysis.tokenizers import IDTokenizer
37 from whoosh.analysis.tokenizers import RegexTokenizer
38 from whoosh.analysis.tokenizers import SpaceSeparatedTokenizer
/dports/deskutils/calibre/calibre-src-5.34.0/src/tinycss/tests/
H A Dtokenizing.py12 tokenizers = (python_tokenize_flat,) variable
14 tokenizers = (python_tokenize_flat, c_tokenize_flat)
27 for tokenize in tokenizers:
254 for tokenize in tokenizers:
/dports/www/ilias/ILIAS-5.4.25/libs/bower/bower_components/typeahead.js/doc/migration/
H A D0.10.0.md80 return Bloodhound.tokenizers.whitespace(d.num);
82 queryTokenizer: Bloodhound.tokenizers.whitespace,
109 return Bloodhound.tokenizers.whitespace(d.num);
111 queryTokenizer: Bloodhound.tokenizers.whitespace,
138 Bloodhound.tokenizers.whitespace(' one two twenty-five');
141 Bloodhound.tokenizers.nonword(' one two twenty-five');
160 var nameTokens = Bloodhound.tokenizers.whitespace(datum.name);
161 var ownerTokens = Bloodhound.tokenizers.whitespace(datum.owner);
162 var languageTokens = Bloodhound.tokenizers.whitespace(datum.language);
200 return Bloodhound.tokenizers.whitespace(d.value);
[all …]
/dports/www/ilias6/ILIAS-6.14/libs/bower/bower_components/typeahead.js/doc/migration/
H A D0.10.0.md80 return Bloodhound.tokenizers.whitespace(d.num);
82 queryTokenizer: Bloodhound.tokenizers.whitespace,
109 return Bloodhound.tokenizers.whitespace(d.num);
111 queryTokenizer: Bloodhound.tokenizers.whitespace,
138 Bloodhound.tokenizers.whitespace(' one two twenty-five');
141 Bloodhound.tokenizers.nonword(' one two twenty-five');
160 var nameTokens = Bloodhound.tokenizers.whitespace(datum.name);
161 var ownerTokens = Bloodhound.tokenizers.whitespace(datum.owner);
162 var languageTokens = Bloodhound.tokenizers.whitespace(datum.language);
200 return Bloodhound.tokenizers.whitespace(d.value);
[all …]
/dports/www/chromium-legacy/chromium-88.0.4324.182/third_party/devtools-frontend/src/node_modules/remark-parse/
H A Dreadme.md205 var tokenizers = Parser.prototype.inlineTokenizers
209 tokenizers.mention = tokenizeMention
227 Specifies the order in which tokenizers run.
260 Specifies the order in which tokenizers run.
284 There are two types of tokenizers: block level and inline level.
285 Both are functions, and work the same, but inline tokenizers must have a
332 * `locator` ([`Function`][locator]) — Required for inline tokenizers
348 Locators are required for inline tokenizers.
443 Preferably, use the [`remark-disable-tokenizers`][remark-disable-tokenizers]
444 plugin to turn off tokenizers.
[all …]
/dports/textproc/kibana7/kibana-7.16.2-darwin-x86_64/node_modules/remark-parse/
H A Dreadme.md209 var tokenizers = Parser.prototype.inlineTokenizers
213 tokenizers.mention = tokenizeMention
231 Specifies the order in which tokenizers run.
264 Specifies the order in which tokenizers run.
288 There are two types of tokenizers: block level and inline level.
289 Both are functions, and work the same, but inline tokenizers must have a
336 * `locator` ([`Function`][locator]) — Required for inline tokenizers
352 Locators are required for inline tokenizers.
447 Preferably, use the [`remark-disable-tokenizers`][remark-disable-tokenizers]
448 plugin to turn off tokenizers.
[all …]
/dports/textproc/opensearch-dashboards/opensearch-dashboards-1.2.0-linux-x64/node_modules/remark-parse/
H A Dreadme.md227 var tokenizers = Parser.prototype.inlineTokenizers
231 tokenizers.mention = tokenizeMention
249 Specifies the order in which tokenizers run.
283 Specifies the order in which tokenizers run.
306 There are two types of tokenizers: block level and inline level.
307 Both are functions, and work the same, but inline tokenizers must have a
354 * `locator` ([`Function`][locator]) — Required for inline tokenizers
371 Locators are required for inline tokenizers.
466 Preferably, use the [`remark-disable-tokenizers`][remark-disable-tokenizers]
467 plugin to turn off tokenizers.
[all …]
/dports/mail/dovecot/dovecot-2.3.17/src/plugins/fts/
H A Dfts-user.c160 const char *tokenizers_key, *const *tokenizers, *tokenizer_set_name; in fts_user_create_tokenizer() local
176 tokenizers = t_strsplit_spaces(str, " "); in fts_user_create_tokenizer()
178 for (i = 0; tokenizers[i] != NULL; i++) { in fts_user_create_tokenizer()
179 tokenizer_class = fts_tokenizer_find(tokenizers[i]); in fts_user_create_tokenizer()
182 tokenizers_key, tokenizers[i]); in fts_user_create_tokenizer()
187 tokenizer_set_name = t_str_replace(tokenizers[i], '-', '_'); in fts_user_create_tokenizer()
/dports/textproc/kibana7/kibana-7.16.2-darwin-x86_64/node_modules/react-markdown/node_modules/remark-parse/
H A Dreadme.md158 to the `processor`. Other plugins can add tokenizers to the parser’s prototype
168 var tokenizers = Parser.prototype.inlineTokenizers;
172 tokenizers.mention = tokenizeMention;
182 tokenizers (for example: `fencedCode`, `table`, and `paragraph`) eat
213 An object mapping tokenizer names to [tokenizer][]s. These tokenizers
265 The parser knows two types of tokenizers: block level and inline level.
266 Block level tokenizers are the same as inline level tokenizers, with
272 “eating”. Locators enable tokenizers to function faster by providing
289 — Required for inline tokenizers
314 performant. Locators enable inline tokenizers to function faster by
/dports/textproc/opensearch-dashboards/opensearch-dashboards-1.2.0-linux-x64/plugins/observabilityDashboards/node_modules/remark-parse/
H A Dreadme.md158 to the `processor`. Other plugins can add tokenizers to the parser’s prototype
168 var tokenizers = Parser.prototype.inlineTokenizers;
172 tokenizers.mention = tokenizeMention;
182 tokenizers (for example: `fencedCode`, `table`, and `paragraph`) eat
213 An object mapping tokenizer names to [tokenizer][]s. These tokenizers
265 The parser knows two types of tokenizers: block level and inline level.
266 Block level tokenizers are the same as inline level tokenizers, with
272 “eating”. Locators enable tokenizers to function faster by providing
289 — Required for inline tokenizers
314 performant. Locators enable inline tokenizers to function faster by
/dports/textproc/opensearch-dashboards/opensearch-dashboards-1.2.0-linux-x64/plugins/reportsDashboards/node_modules/remark-parse/
H A Dreadme.md158 to the `processor`. Other plugins can add tokenizers to the parser’s prototype
168 var tokenizers = Parser.prototype.inlineTokenizers;
172 tokenizers.mention = tokenizeMention;
182 tokenizers (for example: `fencedCode`, `table`, and `paragraph`) eat
213 An object mapping tokenizer names to [tokenizer][]s. These tokenizers
265 The parser knows two types of tokenizers: block level and inline level.
266 Block level tokenizers are the same as inline level tokenizers, with
272 “eating”. Locators enable tokenizers to function faster by providing
289 — Required for inline tokenizers
314 performant. Locators enable inline tokenizers to function faster by
/dports/textproc/py-Chameleon/Chameleon-3.6/src/chameleon/
H A Dprogram.py19 tokenizers = { variable in ElementProgram
28 tokenizer = self.tokenizers[mode]
/dports/editors/libreoffice/libreoffice-7.2.6.2/writerfilter/
H A DREADME.md15 * `dmapper`: the domain mapper, hiding UNO from the tokenizers, used by DOCX and RTF import
20 the dmapper + one of the tokenizers
/dports/editors/libreoffice6/libreoffice-6.4.7.2/writerfilter/
H A DREADME13 * dmapper: the domain mapper, hiding UNO from the tokenizers, used by DOCX and RTF import
18 the dmapper + one of the tokenizers
/dports/www/ilias/ILIAS-5.4.25/libs/bower/bower_components/typeahead.js/doc/
H A Dbloodhound.md48 queryTokenizer: Bloodhound.tokenizers.whitespace,
49 datumTokenizer: Bloodhound.tokenizers.whitespace
77 queryTokenizer: Bloodhound.tokenizers.whitespace,
78 datumTokenizer: Bloodhound.tokenizers.whitespace
124 queryTokenizer: Bloodhound.tokenizers.whitespace,
125 datumTokenizer: Bloodhound.tokenizers.whitespace

1234567891011