1 // Licensed to the Apache Software Foundation (ASF) under one
2 // or more contributor license agreements. See the NOTICE file
3 // distributed with this work for additional information
4 // regarding copyright ownership. The ASF licenses this file
5 // to you under the Apache License, Version 2.0 (the
6 // "License"); you may not use this file except in compliance
7 // with the License. You may obtain a copy of the License at
9 // http://www.apache.org/licenses/LICENSE-2.0
11 // Unless required by applicable law or agreed to in writing,
12 // software distributed under the License is distributed on an
13 // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 // KIND, either express or implied. See the License for the
15 // specific language governing permissions and limitations
18 #include <gmock/gmock.h>
19 #include <gtest/gtest.h>
21 #include "arrow/testing/gtest_compat.h"
23 #include "parquet/column_reader.h"
24 #include "parquet/column_writer.h"
25 #include "parquet/file_reader.h"
26 #include "parquet/file_writer.h"
27 #include "parquet/platform.h"
28 #include "parquet/test_util.h"
29 #include "parquet/types.h"
33 using schema::GroupNode
;
34 using schema::NodePtr
;
35 using schema::PrimitiveNode
;
36 using ::testing::ElementsAre
;
40 template <typename TestType
>
41 class TestSerialize
: public PrimitiveTypedTest
<TestType
> {
46 rows_per_rowgroup_
= 50;
48 this->SetUpSchema(Repetition::OPTIONAL
, num_columns_
);
54 int rows_per_rowgroup_
;
57 void FileSerializeTest(Compression::type codec_type
) {
58 FileSerializeTest(codec_type
, codec_type
);
61 void FileSerializeTest(Compression::type codec_type
,
62 Compression::type expected_codec_type
) {
63 auto sink
= CreateOutputStream();
64 auto gnode
= std::static_pointer_cast
<GroupNode
>(this->node_
);
66 WriterProperties::Builder prop_builder
;
68 for (int i
= 0; i
< num_columns_
; ++i
) {
69 prop_builder
.compression(this->schema_
.Column(i
)->name(), codec_type
);
71 std::shared_ptr
<WriterProperties
> writer_properties
= prop_builder
.build();
73 auto file_writer
= ParquetFileWriter::Open(sink
, gnode
, writer_properties
);
74 this->GenerateData(rows_per_rowgroup_
);
75 for (int rg
= 0; rg
< num_rowgroups_
/ 2; ++rg
) {
76 RowGroupWriter
* row_group_writer
;
77 row_group_writer
= file_writer
->AppendRowGroup();
78 for (int col
= 0; col
< num_columns_
; ++col
) {
80 static_cast<TypedColumnWriter
<TestType
>*>(row_group_writer
->NextColumn());
81 column_writer
->WriteBatch(rows_per_rowgroup_
, this->def_levels_
.data(), nullptr,
83 column_writer
->Close();
84 // Ensure column() API which is specific to BufferedRowGroup cannot be called
85 ASSERT_THROW(row_group_writer
->column(col
), ParquetException
);
88 row_group_writer
->Close();
90 // Write half BufferedRowGroups
91 for (int rg
= 0; rg
< num_rowgroups_
/ 2; ++rg
) {
92 RowGroupWriter
* row_group_writer
;
93 row_group_writer
= file_writer
->AppendBufferedRowGroup();
94 for (int batch
= 0; batch
< (rows_per_rowgroup_
/ rows_per_batch_
); ++batch
) {
95 for (int col
= 0; col
< num_columns_
; ++col
) {
97 static_cast<TypedColumnWriter
<TestType
>*>(row_group_writer
->column(col
));
98 column_writer
->WriteBatch(
99 rows_per_batch_
, this->def_levels_
.data() + (batch
* rows_per_batch_
),
100 nullptr, this->values_ptr_
+ (batch
* rows_per_batch_
));
101 // Ensure NextColumn() API which is specific to RowGroup cannot be called
102 ASSERT_THROW(row_group_writer
->NextColumn(), ParquetException
);
105 for (int col
= 0; col
< num_columns_
; ++col
) {
107 static_cast<TypedColumnWriter
<TestType
>*>(row_group_writer
->column(col
));
108 column_writer
->Close();
110 row_group_writer
->Close();
112 file_writer
->Close();
114 PARQUET_ASSIGN_OR_THROW(auto buffer
, sink
->Finish());
116 int num_rows_
= num_rowgroups_
* rows_per_rowgroup_
;
118 auto source
= std::make_shared
<::arrow::io::BufferReader
>(buffer
);
119 auto file_reader
= ParquetFileReader::Open(source
);
120 ASSERT_EQ(num_columns_
, file_reader
->metadata()->num_columns());
121 ASSERT_EQ(num_rowgroups_
, file_reader
->metadata()->num_row_groups());
122 ASSERT_EQ(num_rows_
, file_reader
->metadata()->num_rows());
124 for (int rg
= 0; rg
< num_rowgroups_
; ++rg
) {
125 auto rg_reader
= file_reader
->RowGroup(rg
);
126 auto rg_metadata
= rg_reader
->metadata();
127 ASSERT_EQ(num_columns_
, rg_metadata
->num_columns());
128 ASSERT_EQ(rows_per_rowgroup_
, rg_metadata
->num_rows());
129 // Check that the specified compression was actually used.
130 ASSERT_EQ(expected_codec_type
, rg_metadata
->ColumnChunk(0)->compression());
132 const int64_t total_byte_size
= rg_metadata
->total_byte_size();
133 const int64_t total_compressed_size
= rg_metadata
->total_compressed_size();
134 if (expected_codec_type
== Compression::UNCOMPRESSED
) {
135 ASSERT_EQ(total_byte_size
, total_compressed_size
);
137 ASSERT_NE(total_byte_size
, total_compressed_size
);
140 int64_t total_column_byte_size
= 0;
141 int64_t total_column_compressed_size
= 0;
143 for (int i
= 0; i
< num_columns_
; ++i
) {
145 ASSERT_FALSE(rg_metadata
->ColumnChunk(i
)->has_index_page());
146 total_column_byte_size
+= rg_metadata
->ColumnChunk(i
)->total_uncompressed_size();
147 total_column_compressed_size
+=
148 rg_metadata
->ColumnChunk(i
)->total_compressed_size();
150 std::vector
<int16_t> def_levels_out(rows_per_rowgroup_
);
151 std::vector
<int16_t> rep_levels_out(rows_per_rowgroup_
);
153 std::static_pointer_cast
<TypedColumnReader
<TestType
>>(rg_reader
->Column(i
));
154 this->SetupValuesOut(rows_per_rowgroup_
);
155 col_reader
->ReadBatch(rows_per_rowgroup_
, def_levels_out
.data(),
156 rep_levels_out
.data(), this->values_out_ptr_
, &values_read
);
157 this->SyncValuesOut();
158 ASSERT_EQ(rows_per_rowgroup_
, values_read
);
159 ASSERT_EQ(this->values_
, this->values_out_
);
160 ASSERT_EQ(this->def_levels_
, def_levels_out
);
163 ASSERT_EQ(total_byte_size
, total_column_byte_size
);
164 ASSERT_EQ(total_compressed_size
, total_column_compressed_size
);
168 void UnequalNumRows(int64_t max_rows
, const std::vector
<int64_t> rows_per_column
) {
169 auto sink
= CreateOutputStream();
170 auto gnode
= std::static_pointer_cast
<GroupNode
>(this->node_
);
172 std::shared_ptr
<WriterProperties
> props
= WriterProperties::Builder().build();
174 auto file_writer
= ParquetFileWriter::Open(sink
, gnode
, props
);
176 RowGroupWriter
* row_group_writer
;
177 row_group_writer
= file_writer
->AppendRowGroup();
179 this->GenerateData(max_rows
);
180 for (int col
= 0; col
< num_columns_
; ++col
) {
182 static_cast<TypedColumnWriter
<TestType
>*>(row_group_writer
->NextColumn());
183 column_writer
->WriteBatch(rows_per_column
[col
], this->def_levels_
.data(), nullptr,
185 column_writer
->Close();
187 row_group_writer
->Close();
188 file_writer
->Close();
191 void UnequalNumRowsBuffered(int64_t max_rows
,
192 const std::vector
<int64_t> rows_per_column
) {
193 auto sink
= CreateOutputStream();
194 auto gnode
= std::static_pointer_cast
<GroupNode
>(this->node_
);
196 std::shared_ptr
<WriterProperties
> props
= WriterProperties::Builder().build();
198 auto file_writer
= ParquetFileWriter::Open(sink
, gnode
, props
);
200 RowGroupWriter
* row_group_writer
;
201 row_group_writer
= file_writer
->AppendBufferedRowGroup();
203 this->GenerateData(max_rows
);
204 for (int col
= 0; col
< num_columns_
; ++col
) {
206 static_cast<TypedColumnWriter
<TestType
>*>(row_group_writer
->column(col
));
207 column_writer
->WriteBatch(rows_per_column
[col
], this->def_levels_
.data(), nullptr,
209 column_writer
->Close();
211 row_group_writer
->Close();
212 file_writer
->Close();
215 void RepeatedUnequalRows() {
216 // Optional and repeated, so definition and repetition levels
217 this->SetUpSchema(Repetition::REPEATED
);
219 const int kNumRows
= 100;
220 this->GenerateData(kNumRows
);
222 auto sink
= CreateOutputStream();
223 auto gnode
= std::static_pointer_cast
<GroupNode
>(this->node_
);
224 std::shared_ptr
<WriterProperties
> props
= WriterProperties::Builder().build();
225 auto file_writer
= ParquetFileWriter::Open(sink
, gnode
, props
);
227 RowGroupWriter
* row_group_writer
;
228 row_group_writer
= file_writer
->AppendRowGroup();
230 this->GenerateData(kNumRows
);
232 std::vector
<int16_t> definition_levels(kNumRows
, 1);
233 std::vector
<int16_t> repetition_levels(kNumRows
, 0);
237 static_cast<TypedColumnWriter
<TestType
>*>(row_group_writer
->NextColumn());
238 column_writer
->WriteBatch(kNumRows
, definition_levels
.data(),
239 repetition_levels
.data(), this->values_ptr_
);
240 column_writer
->Close();
243 definition_levels
[1] = 0;
244 repetition_levels
[3] = 1;
248 static_cast<TypedColumnWriter
<TestType
>*>(row_group_writer
->NextColumn());
249 column_writer
->WriteBatch(kNumRows
, definition_levels
.data(),
250 repetition_levels
.data(), this->values_ptr_
);
251 column_writer
->Close();
255 void ZeroRowsRowGroup() {
256 auto sink
= CreateOutputStream();
257 auto gnode
= std::static_pointer_cast
<GroupNode
>(this->node_
);
259 std::shared_ptr
<WriterProperties
> props
= WriterProperties::Builder().build();
261 auto file_writer
= ParquetFileWriter::Open(sink
, gnode
, props
);
263 RowGroupWriter
* row_group_writer
;
265 row_group_writer
= file_writer
->AppendRowGroup();
266 for (int col
= 0; col
< num_columns_
; ++col
) {
268 static_cast<TypedColumnWriter
<TestType
>*>(row_group_writer
->NextColumn());
269 column_writer
->Close();
271 row_group_writer
->Close();
273 row_group_writer
= file_writer
->AppendBufferedRowGroup();
274 for (int col
= 0; col
< num_columns_
; ++col
) {
276 static_cast<TypedColumnWriter
<TestType
>*>(row_group_writer
->column(col
));
277 column_writer
->Close();
279 row_group_writer
->Close();
281 file_writer
->Close();
285 typedef ::testing::Types
<Int32Type
, Int64Type
, Int96Type
, FloatType
, DoubleType
,
286 BooleanType
, ByteArrayType
, FLBAType
>
289 TYPED_TEST_SUITE(TestSerialize
, TestTypes
);
291 TYPED_TEST(TestSerialize
, SmallFileUncompressed
) {
292 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::UNCOMPRESSED
));
295 TYPED_TEST(TestSerialize
, TooFewRows
) {
296 std::vector
<int64_t> num_rows
= {100, 100, 100, 99};
297 ASSERT_THROW(this->UnequalNumRows(100, num_rows
), ParquetException
);
298 ASSERT_THROW(this->UnequalNumRowsBuffered(100, num_rows
), ParquetException
);
301 TYPED_TEST(TestSerialize
, TooManyRows
) {
302 std::vector
<int64_t> num_rows
= {100, 100, 100, 101};
303 ASSERT_THROW(this->UnequalNumRows(101, num_rows
), ParquetException
);
304 ASSERT_THROW(this->UnequalNumRowsBuffered(101, num_rows
), ParquetException
);
307 TYPED_TEST(TestSerialize
, ZeroRows
) { ASSERT_NO_THROW(this->ZeroRowsRowGroup()); }
309 TYPED_TEST(TestSerialize
, RepeatedTooFewRows
) {
310 ASSERT_THROW(this->RepeatedUnequalRows(), ParquetException
);
313 #ifdef ARROW_WITH_SNAPPY
314 TYPED_TEST(TestSerialize
, SmallFileSnappy
) {
315 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::SNAPPY
));
319 #ifdef ARROW_WITH_BROTLI
320 TYPED_TEST(TestSerialize
, SmallFileBrotli
) {
321 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::BROTLI
));
325 #ifdef ARROW_WITH_GZIP
326 TYPED_TEST(TestSerialize
, SmallFileGzip
) {
327 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::GZIP
));
331 #ifdef ARROW_WITH_LZ4
332 TYPED_TEST(TestSerialize
, SmallFileLz4
) {
333 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::LZ4
));
336 TYPED_TEST(TestSerialize
, SmallFileLz4Hadoop
) {
337 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::LZ4_HADOOP
));
341 #ifdef ARROW_WITH_ZSTD
342 TYPED_TEST(TestSerialize
, SmallFileZstd
) {
343 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::ZSTD
));
347 TEST(TestBufferedRowGroupWriter
, DisabledDictionary
) {
349 // Wrong dictionary_page_offset when writing only data pages via BufferedPageWriter
350 auto sink
= CreateOutputStream();
351 auto writer_props
= parquet::WriterProperties::Builder().disable_dictionary()->build();
352 schema::NodeVector fields
;
354 PrimitiveNode::Make("col", parquet::Repetition::REQUIRED
, parquet::Type::INT32
));
355 auto schema
= std::static_pointer_cast
<GroupNode
>(
356 GroupNode::Make("schema", Repetition::REQUIRED
, fields
));
357 auto file_writer
= parquet::ParquetFileWriter::Open(sink
, schema
, writer_props
);
358 auto rg_writer
= file_writer
->AppendBufferedRowGroup();
359 auto col_writer
= static_cast<Int32Writer
*>(rg_writer
->column(0));
361 col_writer
->WriteBatch(1, nullptr, nullptr, &value
);
363 file_writer
->Close();
364 PARQUET_ASSIGN_OR_THROW(auto buffer
, sink
->Finish());
366 auto source
= std::make_shared
<::arrow::io::BufferReader
>(buffer
);
367 auto file_reader
= ParquetFileReader::Open(source
);
368 ASSERT_EQ(1, file_reader
->metadata()->num_row_groups());
369 auto rg_reader
= file_reader
->RowGroup(0);
370 ASSERT_EQ(1, rg_reader
->metadata()->num_columns());
371 ASSERT_EQ(1, rg_reader
->metadata()->num_rows());
372 ASSERT_FALSE(rg_reader
->metadata()->ColumnChunk(0)->has_dictionary_page());
375 TEST(TestBufferedRowGroupWriter
, MultiPageDisabledDictionary
) {
376 constexpr int kValueCount
= 10000;
377 constexpr int kPageSize
= 16384;
378 auto sink
= CreateOutputStream();
379 auto writer_props
= parquet::WriterProperties::Builder()
380 .disable_dictionary()
381 ->data_pagesize(kPageSize
)
383 schema::NodeVector fields
;
385 PrimitiveNode::Make("col", parquet::Repetition::REQUIRED
, parquet::Type::INT32
));
386 auto schema
= std::static_pointer_cast
<GroupNode
>(
387 GroupNode::Make("schema", Repetition::REQUIRED
, fields
));
388 auto file_writer
= parquet::ParquetFileWriter::Open(sink
, schema
, writer_props
);
389 auto rg_writer
= file_writer
->AppendBufferedRowGroup();
390 auto col_writer
= static_cast<Int32Writer
*>(rg_writer
->column(0));
391 std::vector
<int32_t> values_in
;
392 for (int i
= 0; i
< kValueCount
; ++i
) {
393 values_in
.push_back((i
% 100) + 1);
395 col_writer
->WriteBatch(kValueCount
, nullptr, nullptr, values_in
.data());
397 file_writer
->Close();
398 PARQUET_ASSIGN_OR_THROW(auto buffer
, sink
->Finish());
400 auto source
= std::make_shared
<::arrow::io::BufferReader
>(buffer
);
401 auto file_reader
= ParquetFileReader::Open(source
);
402 auto file_metadata
= file_reader
->metadata();
403 ASSERT_EQ(1, file_reader
->metadata()->num_row_groups());
404 std::vector
<int32_t> values_out(kValueCount
);
405 for (int r
= 0; r
< file_metadata
->num_row_groups(); ++r
) {
406 auto rg_reader
= file_reader
->RowGroup(r
);
407 ASSERT_EQ(1, rg_reader
->metadata()->num_columns());
408 ASSERT_EQ(kValueCount
, rg_reader
->metadata()->num_rows());
409 int64_t total_values_read
= 0;
410 std::shared_ptr
<parquet::ColumnReader
> col_reader
;
411 ASSERT_NO_THROW(col_reader
= rg_reader
->Column(0));
412 parquet::Int32Reader
* int32_reader
=
413 static_cast<parquet::Int32Reader
*>(col_reader
.get());
414 int64_t vn
= kValueCount
;
415 int32_t* vx
= values_out
.data();
416 while (int32_reader
->HasNext()) {
418 int32_reader
->ReadBatch(vn
, nullptr, nullptr, vx
, &values_read
);
421 total_values_read
+= values_read
;
423 ASSERT_EQ(kValueCount
, total_values_read
);
424 ASSERT_EQ(values_in
, values_out
);
428 TEST(ParquetRoundtrip
, AllNulls
) {
429 auto primitive_node
=
430 PrimitiveNode::Make("nulls", Repetition::OPTIONAL
, nullptr, Type::INT32
);
431 schema::NodeVector
columns({primitive_node
});
433 auto root_node
= GroupNode::Make("root", Repetition::REQUIRED
, columns
, nullptr);
435 auto sink
= CreateOutputStream();
438 ParquetFileWriter::Open(sink
, std::static_pointer_cast
<GroupNode
>(root_node
));
439 auto row_group_writer
= file_writer
->AppendRowGroup();
440 auto column_writer
= static_cast<Int32Writer
*>(row_group_writer
->NextColumn());
443 int16_t def_levels
[] = {0, 0, 0};
445 column_writer
->WriteBatch(3, def_levels
, nullptr, values
);
447 column_writer
->Close();
448 row_group_writer
->Close();
449 file_writer
->Close();
451 ReaderProperties props
= default_reader_properties();
452 props
.enable_buffered_stream();
453 PARQUET_ASSIGN_OR_THROW(auto buffer
, sink
->Finish());
455 auto source
= std::make_shared
<::arrow::io::BufferReader
>(buffer
);
456 auto file_reader
= ParquetFileReader::Open(source
, props
);
457 auto row_group_reader
= file_reader
->RowGroup(0);
458 auto column_reader
= std::static_pointer_cast
<Int32Reader
>(row_group_reader
->Column(0));
464 column_reader
->ReadBatch(3, def_levels
, nullptr, values
, &values_read
);
465 EXPECT_THAT(def_levels
, ElementsAre(0, 0, 0));
470 } // namespace parquet