Home
last modified time | relevance | path

Searched refs:requiredColumns (Results 1 – 14 of 14) sorted by relevance

/dports/devel/spark/spark-2.1.1/sql/core/src/main/scala/org/apache/spark/sql/sources/
H A Dinterfaces.scala261 def buildScan(requiredColumns: Array[String]): RDD[Row]
279 def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row]
317 def buildScan(requiredColumns: Seq[Attribute], filters: Seq[Expression]): RDD[Row]
/dports/www/chromium-legacy/chromium-88.0.4324.182/third_party/perfetto/ui/src/frontend/
H A Dquery_table.ts39 const requiredColumns = ['ts', 'dur', 'track_id']; constant
40 for (const col of requiredColumns) {
/dports/devel/spark/spark-2.1.1/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/
H A DJDBCRelation.scala120 override def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
125 requiredColumns,
H A DJDBCRDD.scala166 requiredColumns: Array[String],
172 val quotedColumns = requiredColumns.map(colName => dialect.quoteIdentifier(colName))
176 pruneSchema(schema, requiredColumns),
/dports/devel/spark/spark-2.1.1/sql/core/src/test/scala/org/apache/spark/sql/sources/
H A DPrunedScanSuite.scala47 override def buildScan(requiredColumns: Array[String]): RDD[Row] = {
48 val rowBuilders = requiredColumns.map {
H A DFilteredScanSuite.scala73 override def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
74 val rowBuilders = requiredColumns.map {
83 ColumnsRequired.set = requiredColumns.toSet
/dports/java/eclipse/eclipse.platform.releng.aggregator-R4_16/eclipse.platform.ui.tools/bundles/org.eclipse.e4.tools.emf.ui/src/org/eclipse/e4/tools/emf/ui/internal/common/component/tabs/
H A DListTab.java157 LinkedHashMap<String, TableColumn> requiredColumns = new LinkedHashMap<>(); field in ListTab
187 if (requiredColumns.containsValue(col)) { in saveSettings()
188 id = getKey(requiredColumns, col); in saveSettings()
267 col = requiredColumns.get(colName); in loadSettings()
509 requiredColumns.put("GoTree", colGo.getColumn()); //$NON-NLS-1$ in postConstruct()
524 requiredColumns.put("GoXmi", colGoXmi.getColumn()); //$NON-NLS-1$ in postConstruct()
551 requiredColumns.put("Marked", colMarked.getColumn()); //$NON-NLS-1$ in postConstruct()
573 requiredColumns.put("Item", colItem.getColumn()); //$NON-NLS-1$ in postConstruct()
623 for (final TableColumn col : requiredColumns.values()) { in postConstruct()
/dports/graphics/qgis/qgis-3.22.3/src/analysis/processing/
H A Dqgsbookmarkalgorithms.cpp249 QSet< QString > requiredColumns = nameExpression.referencedColumns(); in processAlgorithm() local
257 requiredColumns.unite( groupExpression->referencedColumns() ); in processAlgorithm()
261 req.setSubsetOfAttributes( requiredColumns, source->fields() ); in processAlgorithm()
/dports/graphics/qgis-ltr/qgis-3.16.16/src/analysis/processing/
H A Dqgsbookmarkalgorithms.cpp248 QSet< QString > requiredColumns = nameExpression.referencedColumns(); in processAlgorithm() local
256 requiredColumns.unite( groupExpression->referencedColumns() ); in processAlgorithm()
260 req.setSubsetOfAttributes( requiredColumns, source->fields() ); in processAlgorithm()
/dports/devel/spark/spark-2.1.1/sql/hive/src/test/scala/org/apache/spark/sql/sources/
H A DSimpleTextRelation.scala74 SimpleTextRelation.requiredColumns = requiredSchema.fieldNames
156 var requiredColumns: Seq[String] = Nil variable
/dports/devel/spark/spark-2.1.1/mllib/src/main/scala/org/apache/spark/ml/source/libsvm/
H A DLibSVMRelation.scala181 val requiredColumns = GenerateUnsafeProjection.generate(requiredOutput, fullOutput) constant
185 requiredColumns(converter.toRow(Row(pt.label, features)))
/dports/devel/spark/spark-2.1.1/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/
H A DCSVRelation.scala74 requiredColumns: Array[String],
77 val requiredFields = StructType(requiredColumns.map(schema(_))).fields
/dports/www/qt5-webengine/qtwebengine-everywhere-src-5.15.2/src/3rdparty/chromium/third_party/perfetto/ui/src/frontend/
H A Dviewer_page.ts54 const requiredColumns = ['ts', 'dur', 'track_id']; constant
55 for (const col of requiredColumns) {
/dports/devel/spark/spark-2.1.1/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/
H A DAnalyzer.scala1211 requiredColumns: Int = 0)(
1233 if (requiredColumns > 0 && requiredColumns != current.output.size) {
1235 s"does not match the required number of columns ($requiredColumns)")