]> git.proxmox.com Git - ceph.git/blob - ceph/src/arrow/cpp/src/parquet/file_serialize_test.cc
import quincy 17.2.0
[ceph.git] / ceph / src / arrow / cpp / src / parquet / file_serialize_test.cc
1 // Licensed to the Apache Software Foundation (ASF) under one
2 // or more contributor license agreements. See the NOTICE file
3 // distributed with this work for additional information
4 // regarding copyright ownership. The ASF licenses this file
5 // to you under the Apache License, Version 2.0 (the
6 // "License"); you may not use this file except in compliance
7 // with the License. You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing,
12 // software distributed under the License is distributed on an
13 // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 // KIND, either express or implied. See the License for the
15 // specific language governing permissions and limitations
16 // under the License.
17
18 #include <gmock/gmock.h>
19 #include <gtest/gtest.h>
20
21 #include "arrow/testing/gtest_compat.h"
22
23 #include "parquet/column_reader.h"
24 #include "parquet/column_writer.h"
25 #include "parquet/file_reader.h"
26 #include "parquet/file_writer.h"
27 #include "parquet/platform.h"
28 #include "parquet/test_util.h"
29 #include "parquet/types.h"
30
31 namespace parquet {
32
33 using schema::GroupNode;
34 using schema::NodePtr;
35 using schema::PrimitiveNode;
36 using ::testing::ElementsAre;
37
38 namespace test {
39
40 template <typename TestType>
41 class TestSerialize : public PrimitiveTypedTest<TestType> {
42 public:
43 void SetUp() {
44 num_columns_ = 4;
45 num_rowgroups_ = 4;
46 rows_per_rowgroup_ = 50;
47 rows_per_batch_ = 10;
48 this->SetUpSchema(Repetition::OPTIONAL, num_columns_);
49 }
50
51 protected:
52 int num_columns_;
53 int num_rowgroups_;
54 int rows_per_rowgroup_;
55 int rows_per_batch_;
56
57 void FileSerializeTest(Compression::type codec_type) {
58 FileSerializeTest(codec_type, codec_type);
59 }
60
61 void FileSerializeTest(Compression::type codec_type,
62 Compression::type expected_codec_type) {
63 auto sink = CreateOutputStream();
64 auto gnode = std::static_pointer_cast<GroupNode>(this->node_);
65
66 WriterProperties::Builder prop_builder;
67
68 for (int i = 0; i < num_columns_; ++i) {
69 prop_builder.compression(this->schema_.Column(i)->name(), codec_type);
70 }
71 std::shared_ptr<WriterProperties> writer_properties = prop_builder.build();
72
73 auto file_writer = ParquetFileWriter::Open(sink, gnode, writer_properties);
74 this->GenerateData(rows_per_rowgroup_);
75 for (int rg = 0; rg < num_rowgroups_ / 2; ++rg) {
76 RowGroupWriter* row_group_writer;
77 row_group_writer = file_writer->AppendRowGroup();
78 for (int col = 0; col < num_columns_; ++col) {
79 auto column_writer =
80 static_cast<TypedColumnWriter<TestType>*>(row_group_writer->NextColumn());
81 column_writer->WriteBatch(rows_per_rowgroup_, this->def_levels_.data(), nullptr,
82 this->values_ptr_);
83 column_writer->Close();
84 // Ensure column() API which is specific to BufferedRowGroup cannot be called
85 ASSERT_THROW(row_group_writer->column(col), ParquetException);
86 }
87
88 row_group_writer->Close();
89 }
90 // Write half BufferedRowGroups
91 for (int rg = 0; rg < num_rowgroups_ / 2; ++rg) {
92 RowGroupWriter* row_group_writer;
93 row_group_writer = file_writer->AppendBufferedRowGroup();
94 for (int batch = 0; batch < (rows_per_rowgroup_ / rows_per_batch_); ++batch) {
95 for (int col = 0; col < num_columns_; ++col) {
96 auto column_writer =
97 static_cast<TypedColumnWriter<TestType>*>(row_group_writer->column(col));
98 column_writer->WriteBatch(
99 rows_per_batch_, this->def_levels_.data() + (batch * rows_per_batch_),
100 nullptr, this->values_ptr_ + (batch * rows_per_batch_));
101 // Ensure NextColumn() API which is specific to RowGroup cannot be called
102 ASSERT_THROW(row_group_writer->NextColumn(), ParquetException);
103 }
104 }
105 for (int col = 0; col < num_columns_; ++col) {
106 auto column_writer =
107 static_cast<TypedColumnWriter<TestType>*>(row_group_writer->column(col));
108 column_writer->Close();
109 }
110 row_group_writer->Close();
111 }
112 file_writer->Close();
113
114 PARQUET_ASSIGN_OR_THROW(auto buffer, sink->Finish());
115
116 int num_rows_ = num_rowgroups_ * rows_per_rowgroup_;
117
118 auto source = std::make_shared<::arrow::io::BufferReader>(buffer);
119 auto file_reader = ParquetFileReader::Open(source);
120 ASSERT_EQ(num_columns_, file_reader->metadata()->num_columns());
121 ASSERT_EQ(num_rowgroups_, file_reader->metadata()->num_row_groups());
122 ASSERT_EQ(num_rows_, file_reader->metadata()->num_rows());
123
124 for (int rg = 0; rg < num_rowgroups_; ++rg) {
125 auto rg_reader = file_reader->RowGroup(rg);
126 auto rg_metadata = rg_reader->metadata();
127 ASSERT_EQ(num_columns_, rg_metadata->num_columns());
128 ASSERT_EQ(rows_per_rowgroup_, rg_metadata->num_rows());
129 // Check that the specified compression was actually used.
130 ASSERT_EQ(expected_codec_type, rg_metadata->ColumnChunk(0)->compression());
131
132 const int64_t total_byte_size = rg_metadata->total_byte_size();
133 const int64_t total_compressed_size = rg_metadata->total_compressed_size();
134 if (expected_codec_type == Compression::UNCOMPRESSED) {
135 ASSERT_EQ(total_byte_size, total_compressed_size);
136 } else {
137 ASSERT_NE(total_byte_size, total_compressed_size);
138 }
139
140 int64_t total_column_byte_size = 0;
141 int64_t total_column_compressed_size = 0;
142
143 for (int i = 0; i < num_columns_; ++i) {
144 int64_t values_read;
145 ASSERT_FALSE(rg_metadata->ColumnChunk(i)->has_index_page());
146 total_column_byte_size += rg_metadata->ColumnChunk(i)->total_uncompressed_size();
147 total_column_compressed_size +=
148 rg_metadata->ColumnChunk(i)->total_compressed_size();
149
150 std::vector<int16_t> def_levels_out(rows_per_rowgroup_);
151 std::vector<int16_t> rep_levels_out(rows_per_rowgroup_);
152 auto col_reader =
153 std::static_pointer_cast<TypedColumnReader<TestType>>(rg_reader->Column(i));
154 this->SetupValuesOut(rows_per_rowgroup_);
155 col_reader->ReadBatch(rows_per_rowgroup_, def_levels_out.data(),
156 rep_levels_out.data(), this->values_out_ptr_, &values_read);
157 this->SyncValuesOut();
158 ASSERT_EQ(rows_per_rowgroup_, values_read);
159 ASSERT_EQ(this->values_, this->values_out_);
160 ASSERT_EQ(this->def_levels_, def_levels_out);
161 }
162
163 ASSERT_EQ(total_byte_size, total_column_byte_size);
164 ASSERT_EQ(total_compressed_size, total_column_compressed_size);
165 }
166 }
167
168 void UnequalNumRows(int64_t max_rows, const std::vector<int64_t> rows_per_column) {
169 auto sink = CreateOutputStream();
170 auto gnode = std::static_pointer_cast<GroupNode>(this->node_);
171
172 std::shared_ptr<WriterProperties> props = WriterProperties::Builder().build();
173
174 auto file_writer = ParquetFileWriter::Open(sink, gnode, props);
175
176 RowGroupWriter* row_group_writer;
177 row_group_writer = file_writer->AppendRowGroup();
178
179 this->GenerateData(max_rows);
180 for (int col = 0; col < num_columns_; ++col) {
181 auto column_writer =
182 static_cast<TypedColumnWriter<TestType>*>(row_group_writer->NextColumn());
183 column_writer->WriteBatch(rows_per_column[col], this->def_levels_.data(), nullptr,
184 this->values_ptr_);
185 column_writer->Close();
186 }
187 row_group_writer->Close();
188 file_writer->Close();
189 }
190
191 void UnequalNumRowsBuffered(int64_t max_rows,
192 const std::vector<int64_t> rows_per_column) {
193 auto sink = CreateOutputStream();
194 auto gnode = std::static_pointer_cast<GroupNode>(this->node_);
195
196 std::shared_ptr<WriterProperties> props = WriterProperties::Builder().build();
197
198 auto file_writer = ParquetFileWriter::Open(sink, gnode, props);
199
200 RowGroupWriter* row_group_writer;
201 row_group_writer = file_writer->AppendBufferedRowGroup();
202
203 this->GenerateData(max_rows);
204 for (int col = 0; col < num_columns_; ++col) {
205 auto column_writer =
206 static_cast<TypedColumnWriter<TestType>*>(row_group_writer->column(col));
207 column_writer->WriteBatch(rows_per_column[col], this->def_levels_.data(), nullptr,
208 this->values_ptr_);
209 column_writer->Close();
210 }
211 row_group_writer->Close();
212 file_writer->Close();
213 }
214
215 void RepeatedUnequalRows() {
216 // Optional and repeated, so definition and repetition levels
217 this->SetUpSchema(Repetition::REPEATED);
218
219 const int kNumRows = 100;
220 this->GenerateData(kNumRows);
221
222 auto sink = CreateOutputStream();
223 auto gnode = std::static_pointer_cast<GroupNode>(this->node_);
224 std::shared_ptr<WriterProperties> props = WriterProperties::Builder().build();
225 auto file_writer = ParquetFileWriter::Open(sink, gnode, props);
226
227 RowGroupWriter* row_group_writer;
228 row_group_writer = file_writer->AppendRowGroup();
229
230 this->GenerateData(kNumRows);
231
232 std::vector<int16_t> definition_levels(kNumRows, 1);
233 std::vector<int16_t> repetition_levels(kNumRows, 0);
234
235 {
236 auto column_writer =
237 static_cast<TypedColumnWriter<TestType>*>(row_group_writer->NextColumn());
238 column_writer->WriteBatch(kNumRows, definition_levels.data(),
239 repetition_levels.data(), this->values_ptr_);
240 column_writer->Close();
241 }
242
243 definition_levels[1] = 0;
244 repetition_levels[3] = 1;
245
246 {
247 auto column_writer =
248 static_cast<TypedColumnWriter<TestType>*>(row_group_writer->NextColumn());
249 column_writer->WriteBatch(kNumRows, definition_levels.data(),
250 repetition_levels.data(), this->values_ptr_);
251 column_writer->Close();
252 }
253 }
254
255 void ZeroRowsRowGroup() {
256 auto sink = CreateOutputStream();
257 auto gnode = std::static_pointer_cast<GroupNode>(this->node_);
258
259 std::shared_ptr<WriterProperties> props = WriterProperties::Builder().build();
260
261 auto file_writer = ParquetFileWriter::Open(sink, gnode, props);
262
263 RowGroupWriter* row_group_writer;
264
265 row_group_writer = file_writer->AppendRowGroup();
266 for (int col = 0; col < num_columns_; ++col) {
267 auto column_writer =
268 static_cast<TypedColumnWriter<TestType>*>(row_group_writer->NextColumn());
269 column_writer->Close();
270 }
271 row_group_writer->Close();
272
273 row_group_writer = file_writer->AppendBufferedRowGroup();
274 for (int col = 0; col < num_columns_; ++col) {
275 auto column_writer =
276 static_cast<TypedColumnWriter<TestType>*>(row_group_writer->column(col));
277 column_writer->Close();
278 }
279 row_group_writer->Close();
280
281 file_writer->Close();
282 }
283 };
284
285 typedef ::testing::Types<Int32Type, Int64Type, Int96Type, FloatType, DoubleType,
286 BooleanType, ByteArrayType, FLBAType>
287 TestTypes;
288
289 TYPED_TEST_SUITE(TestSerialize, TestTypes);
290
291 TYPED_TEST(TestSerialize, SmallFileUncompressed) {
292 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::UNCOMPRESSED));
293 }
294
295 TYPED_TEST(TestSerialize, TooFewRows) {
296 std::vector<int64_t> num_rows = {100, 100, 100, 99};
297 ASSERT_THROW(this->UnequalNumRows(100, num_rows), ParquetException);
298 ASSERT_THROW(this->UnequalNumRowsBuffered(100, num_rows), ParquetException);
299 }
300
301 TYPED_TEST(TestSerialize, TooManyRows) {
302 std::vector<int64_t> num_rows = {100, 100, 100, 101};
303 ASSERT_THROW(this->UnequalNumRows(101, num_rows), ParquetException);
304 ASSERT_THROW(this->UnequalNumRowsBuffered(101, num_rows), ParquetException);
305 }
306
307 TYPED_TEST(TestSerialize, ZeroRows) { ASSERT_NO_THROW(this->ZeroRowsRowGroup()); }
308
309 TYPED_TEST(TestSerialize, RepeatedTooFewRows) {
310 ASSERT_THROW(this->RepeatedUnequalRows(), ParquetException);
311 }
312
313 #ifdef ARROW_WITH_SNAPPY
314 TYPED_TEST(TestSerialize, SmallFileSnappy) {
315 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::SNAPPY));
316 }
317 #endif
318
319 #ifdef ARROW_WITH_BROTLI
320 TYPED_TEST(TestSerialize, SmallFileBrotli) {
321 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::BROTLI));
322 }
323 #endif
324
325 #ifdef ARROW_WITH_GZIP
326 TYPED_TEST(TestSerialize, SmallFileGzip) {
327 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::GZIP));
328 }
329 #endif
330
331 #ifdef ARROW_WITH_LZ4
332 TYPED_TEST(TestSerialize, SmallFileLz4) {
333 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::LZ4));
334 }
335
336 TYPED_TEST(TestSerialize, SmallFileLz4Hadoop) {
337 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::LZ4_HADOOP));
338 }
339 #endif
340
341 #ifdef ARROW_WITH_ZSTD
342 TYPED_TEST(TestSerialize, SmallFileZstd) {
343 ASSERT_NO_FATAL_FAILURE(this->FileSerializeTest(Compression::ZSTD));
344 }
345 #endif
346
347 TEST(TestBufferedRowGroupWriter, DisabledDictionary) {
348 // PARQUET-1706:
349 // Wrong dictionary_page_offset when writing only data pages via BufferedPageWriter
350 auto sink = CreateOutputStream();
351 auto writer_props = parquet::WriterProperties::Builder().disable_dictionary()->build();
352 schema::NodeVector fields;
353 fields.push_back(
354 PrimitiveNode::Make("col", parquet::Repetition::REQUIRED, parquet::Type::INT32));
355 auto schema = std::static_pointer_cast<GroupNode>(
356 GroupNode::Make("schema", Repetition::REQUIRED, fields));
357 auto file_writer = parquet::ParquetFileWriter::Open(sink, schema, writer_props);
358 auto rg_writer = file_writer->AppendBufferedRowGroup();
359 auto col_writer = static_cast<Int32Writer*>(rg_writer->column(0));
360 int value = 0;
361 col_writer->WriteBatch(1, nullptr, nullptr, &value);
362 rg_writer->Close();
363 file_writer->Close();
364 PARQUET_ASSIGN_OR_THROW(auto buffer, sink->Finish());
365
366 auto source = std::make_shared<::arrow::io::BufferReader>(buffer);
367 auto file_reader = ParquetFileReader::Open(source);
368 ASSERT_EQ(1, file_reader->metadata()->num_row_groups());
369 auto rg_reader = file_reader->RowGroup(0);
370 ASSERT_EQ(1, rg_reader->metadata()->num_columns());
371 ASSERT_EQ(1, rg_reader->metadata()->num_rows());
372 ASSERT_FALSE(rg_reader->metadata()->ColumnChunk(0)->has_dictionary_page());
373 }
374
375 TEST(TestBufferedRowGroupWriter, MultiPageDisabledDictionary) {
376 constexpr int kValueCount = 10000;
377 constexpr int kPageSize = 16384;
378 auto sink = CreateOutputStream();
379 auto writer_props = parquet::WriterProperties::Builder()
380 .disable_dictionary()
381 ->data_pagesize(kPageSize)
382 ->build();
383 schema::NodeVector fields;
384 fields.push_back(
385 PrimitiveNode::Make("col", parquet::Repetition::REQUIRED, parquet::Type::INT32));
386 auto schema = std::static_pointer_cast<GroupNode>(
387 GroupNode::Make("schema", Repetition::REQUIRED, fields));
388 auto file_writer = parquet::ParquetFileWriter::Open(sink, schema, writer_props);
389 auto rg_writer = file_writer->AppendBufferedRowGroup();
390 auto col_writer = static_cast<Int32Writer*>(rg_writer->column(0));
391 std::vector<int32_t> values_in;
392 for (int i = 0; i < kValueCount; ++i) {
393 values_in.push_back((i % 100) + 1);
394 }
395 col_writer->WriteBatch(kValueCount, nullptr, nullptr, values_in.data());
396 rg_writer->Close();
397 file_writer->Close();
398 PARQUET_ASSIGN_OR_THROW(auto buffer, sink->Finish());
399
400 auto source = std::make_shared<::arrow::io::BufferReader>(buffer);
401 auto file_reader = ParquetFileReader::Open(source);
402 auto file_metadata = file_reader->metadata();
403 ASSERT_EQ(1, file_reader->metadata()->num_row_groups());
404 std::vector<int32_t> values_out(kValueCount);
405 for (int r = 0; r < file_metadata->num_row_groups(); ++r) {
406 auto rg_reader = file_reader->RowGroup(r);
407 ASSERT_EQ(1, rg_reader->metadata()->num_columns());
408 ASSERT_EQ(kValueCount, rg_reader->metadata()->num_rows());
409 int64_t total_values_read = 0;
410 std::shared_ptr<parquet::ColumnReader> col_reader;
411 ASSERT_NO_THROW(col_reader = rg_reader->Column(0));
412 parquet::Int32Reader* int32_reader =
413 static_cast<parquet::Int32Reader*>(col_reader.get());
414 int64_t vn = kValueCount;
415 int32_t* vx = values_out.data();
416 while (int32_reader->HasNext()) {
417 int64_t values_read;
418 int32_reader->ReadBatch(vn, nullptr, nullptr, vx, &values_read);
419 vn -= values_read;
420 vx += values_read;
421 total_values_read += values_read;
422 }
423 ASSERT_EQ(kValueCount, total_values_read);
424 ASSERT_EQ(values_in, values_out);
425 }
426 }
427
428 TEST(ParquetRoundtrip, AllNulls) {
429 auto primitive_node =
430 PrimitiveNode::Make("nulls", Repetition::OPTIONAL, nullptr, Type::INT32);
431 schema::NodeVector columns({primitive_node});
432
433 auto root_node = GroupNode::Make("root", Repetition::REQUIRED, columns, nullptr);
434
435 auto sink = CreateOutputStream();
436
437 auto file_writer =
438 ParquetFileWriter::Open(sink, std::static_pointer_cast<GroupNode>(root_node));
439 auto row_group_writer = file_writer->AppendRowGroup();
440 auto column_writer = static_cast<Int32Writer*>(row_group_writer->NextColumn());
441
442 int32_t values[3];
443 int16_t def_levels[] = {0, 0, 0};
444
445 column_writer->WriteBatch(3, def_levels, nullptr, values);
446
447 column_writer->Close();
448 row_group_writer->Close();
449 file_writer->Close();
450
451 ReaderProperties props = default_reader_properties();
452 props.enable_buffered_stream();
453 PARQUET_ASSIGN_OR_THROW(auto buffer, sink->Finish());
454
455 auto source = std::make_shared<::arrow::io::BufferReader>(buffer);
456 auto file_reader = ParquetFileReader::Open(source, props);
457 auto row_group_reader = file_reader->RowGroup(0);
458 auto column_reader = std::static_pointer_cast<Int32Reader>(row_group_reader->Column(0));
459
460 int64_t values_read;
461 def_levels[0] = -1;
462 def_levels[1] = -1;
463 def_levels[2] = -1;
464 column_reader->ReadBatch(3, def_levels, nullptr, values, &values_read);
465 EXPECT_THAT(def_levels, ElementsAre(0, 0, 0));
466 }
467
468 } // namespace test
469
470 } // namespace parquet