1/* 2 * Licensed to the Apache Software Foundation (ASF) under one or more 3 * contributor license agreements. See the NOTICE file distributed with 4 * this work for additional information regarding copyright ownership. 5 * The ASF licenses this file to You under the Apache License, Version 2.0 6 * (the "License"); you may not use this file except in compliance with 7 * the License. You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 */ 17 18package org.apache.spark.sql.hive.client 19 20import org.apache.hadoop.conf.Configuration 21import org.apache.hadoop.hive.conf.HiveConf 22 23import org.apache.spark.SparkFunSuite 24import org.apache.spark.sql.catalyst.catalog._ 25import org.apache.spark.sql.catalyst.expressions.{AttributeReference, EqualTo, Literal} 26import org.apache.spark.sql.hive.HiveUtils 27import org.apache.spark.sql.types.IntegerType 28 29class HiveClientSuite extends SparkFunSuite { 30 private val clientBuilder = new HiveClientBuilder 31 32 private val tryDirectSqlKey = HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL.varname 33 34 test(s"getPartitionsByFilter returns all partitions when $tryDirectSqlKey=false") { 35 val testPartitionCount = 5 36 37 val storageFormat = CatalogStorageFormat( 38 locationUri = None, 39 inputFormat = None, 40 outputFormat = None, 41 serde = None, 42 compressed = false, 43 properties = Map.empty) 44 45 val hadoopConf = new Configuration() 46 hadoopConf.setBoolean(tryDirectSqlKey, false) 47 val client = clientBuilder.buildClient(HiveUtils.hiveExecutionVersion, hadoopConf) 48 client.runSqlHive("CREATE TABLE test (value INT) PARTITIONED BY (part INT)") 49 50 val partitions = (1 to testPartitionCount).map { part => 51 CatalogTablePartition(Map("part" -> part.toString), storageFormat) 52 } 53 client.createPartitions( 54 "default", "test", partitions, ignoreIfExists = false) 55 56 val filteredPartitions = client.getPartitionsByFilter(client.getTable("default", "test"), 57 Seq(EqualTo(AttributeReference("part", IntegerType)(), Literal(3)))) 58 59 assert(filteredPartitions.size == testPartitionCount) 60 } 61} 62