public class AppendingParquetOutputFormat
extends parquet.hadoop.ParquetOutputFormat<org.apache.spark.sql.catalyst.expressions.Row>
Constructor and Description |
---|
AppendingParquetOutputFormat(int offset) |
Modifier and Type | Method and Description |
---|---|
void |
checkOutputSpecs(org.apache.hadoop.mapreduce.JobContext job) |
org.apache.hadoop.fs.Path |
getDefaultWorkFile(org.apache.hadoop.mapreduce.TaskAttemptContext context,
String extension) |
getBlockSize, getBlockSize, getCompression, getCompression, getDictionaryPageSize, getDictionaryPageSize, getEnableDictionary, getEnableDictionary, getOutputCommitter, getPageSize, getPageSize, getRecordWriter, getRecordWriter, getRecordWriter, getValidation, getValidation, getWriterVersion, getWriteSupport, getWriteSupportClass, isCompressionSet, isCompressionSet, setBlockSize, setCompression, setDictionaryPageSize, setEnableDictionary, setPageSize, setValidation, setValidation, setWriteSupportClass, setWriteSupportClass
public void checkOutputSpecs(org.apache.hadoop.mapreduce.JobContext job)
checkOutputSpecs
in class org.apache.hadoop.mapreduce.lib.output.FileOutputFormat<Void,org.apache.spark.sql.catalyst.expressions.Row>
public org.apache.hadoop.fs.Path getDefaultWorkFile(org.apache.hadoop.mapreduce.TaskAttemptContext context, String extension)
getDefaultWorkFile
in class org.apache.hadoop.mapreduce.lib.output.FileOutputFormat<Void,org.apache.spark.sql.catalyst.expressions.Row>