|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one |
| 3 | + * or more contributor license agreements. See the NOTICE file |
| 4 | + * distributed with this work for additional information |
| 5 | + * regarding copyright ownership. The ASF licenses this file |
| 6 | + * to you under the Apache License, Version 2.0 (the |
| 7 | + * "License"); you may not use this file except in compliance |
| 8 | + * with the License. You may obtain a copy of the License at |
| 9 | + * |
| 10 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | + * |
| 12 | + * Unless required by applicable law or agreed to in writing, |
| 13 | + * software distributed under the License is distributed on an |
| 14 | + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 15 | + * KIND, either express or implied. See the License for the |
| 16 | + * specific language governing permissions and limitations |
| 17 | + * under the License. |
| 18 | + */ |
| 19 | + |
| 20 | +package org.apache.spark.sql.comet.shims |
| 21 | + |
| 22 | + |
| 23 | +import org.apache.hadoop.fs.Path |
| 24 | + |
| 25 | +import org.apache.spark.sql.SparkSession |
| 26 | +import org.apache.spark.sql.catalyst.InternalRow |
| 27 | +import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression} |
| 28 | +import org.apache.spark.sql.errors.QueryExecutionErrors |
| 29 | +import org.apache.spark.sql.execution.{FileSourceScanExec, PartitionedFileUtil} |
| 30 | +import org.apache.spark.sql.execution.datasources._ |
| 31 | +import org.apache.spark.sql.execution.datasources.parquet.ParquetOptions |
| 32 | +import org.apache.spark.sql.sources.Filter |
| 33 | +import org.apache.spark.sql.types.StructType |
| 34 | + |
| 35 | +trait ShimCometScanExec { |
| 36 | + def wrapped: FileSourceScanExec |
| 37 | + |
| 38 | + lazy val fileConstantMetadataColumns: Seq[AttributeReference] = |
| 39 | + wrapped.fileConstantMetadataColumns |
| 40 | + |
| 41 | + protected def newFileScanRDD( |
| 42 | + fsRelation: HadoopFsRelation, |
| 43 | + readFunction: PartitionedFile => Iterator[InternalRow], |
| 44 | + filePartitions: Seq[FilePartition], |
| 45 | + readSchema: StructType, |
| 46 | + options: ParquetOptions): FileScanRDD = new FileScanRDD( |
| 47 | + fsRelation.sparkSession, |
| 48 | + readFunction, |
| 49 | + filePartitions, |
| 50 | + readSchema, |
| 51 | + fileConstantMetadataColumns, |
| 52 | + fsRelation.fileFormat.fileConstantMetadataExtractors, |
| 53 | + options) |
| 54 | + |
| 55 | + protected def invalidBucketFile(path: String, sparkVersion: String): Throwable = |
| 56 | + QueryExecutionErrors.invalidBucketFile(path) |
| 57 | + |
| 58 | + // see SPARK-39634 |
| 59 | + protected def isNeededForSchema(sparkSchema: StructType): Boolean = false |
| 60 | + |
| 61 | + protected def getPartitionedFile(f: FileStatusWithMetadata, p: PartitionDirectory): PartitionedFile = |
| 62 | + PartitionedFileUtil.getPartitionedFile(f, f.getPath, p.values) |
| 63 | + |
| 64 | + protected def splitFiles(sparkSession: SparkSession, |
| 65 | + file: FileStatusWithMetadata, |
| 66 | + filePath: Path, |
| 67 | + isSplitable: Boolean, |
| 68 | + maxSplitBytes: Long, |
| 69 | + partitionValues: InternalRow): Seq[PartitionedFile] = |
| 70 | + PartitionedFileUtil.splitFiles(sparkSession, file, filePath, isSplitable, maxSplitBytes, partitionValues) |
| 71 | + |
| 72 | + protected def getPushedDownFilters(relation: HadoopFsRelation , dataFilters: Seq[Expression]): Seq[Filter] = { |
| 73 | + val supportNestedPredicatePushdown = DataSourceUtils.supportNestedPredicatePushdown(relation) |
| 74 | + dataFilters.flatMap(DataSourceStrategy.translateFilter(_, supportNestedPredicatePushdown)) |
| 75 | + } |
| 76 | +} |
0 commit comments