]> git.proxmox.com Git - ceph.git/blob - ceph/src/arrow/cpp/src/arrow/dbi/hiveserver2/thrift/hive_metastore.thrift
import quincy 17.2.0
[ceph.git] / ceph / src / arrow / cpp / src / arrow / dbi / hiveserver2 / thrift / hive_metastore.thrift
1 #!/usr/local/bin/thrift -java
2
3 /**
4 * Licensed to the Apache Software Foundation (ASF) under one
5 * or more contributor license agreements. See the NOTICE file
6 * distributed with this work for additional information
7 * regarding copyright ownership. The ASF licenses this file
8 * to you under the Apache License, Version 2.0 (the
9 * "License"); you may not use this file except in compliance
10 * with the License. You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21 #
22 # Thrift Service that the MetaStore is built on
23 #
24
25 include "fb303.thrift"
26
27 namespace java org.apache.hadoop.hive.metastore.api
28 namespace php metastore
29 namespace cpp Apache.Hadoop.Hive
30
31 const string DDL_TIME = "transient_lastDdlTime"
32
33 struct Version {
34 1: string version,
35 2: string comments
36 }
37
38 struct FieldSchema {
39 1: string name, // name of the field
40 2: string type, // type of the field. primitive types defined above, specify list<TYPE_NAME>, map<TYPE_NAME, TYPE_NAME> for lists & maps
41 3: string comment
42 }
43
44 struct Type {
45 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types
46 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE)
47 3: optional string type2, // val type if the name is 'map' (MAP_TYPE)
48 4: optional list<FieldSchema> fields // if the name is one of the user defined types
49 }
50
51 enum HiveObjectType {
52 GLOBAL = 1,
53 DATABASE = 2,
54 TABLE = 3,
55 PARTITION = 4,
56 COLUMN = 5,
57 }
58
59 enum PrincipalType {
60 USER = 1,
61 ROLE = 2,
62 GROUP = 3,
63 }
64
65 const string HIVE_FILTER_FIELD_OWNER = "hive_filter_field_owner__"
66 const string HIVE_FILTER_FIELD_PARAMS = "hive_filter_field_params__"
67 const string HIVE_FILTER_FIELD_LAST_ACCESS = "hive_filter_field_last_access__"
68
69 enum PartitionEventType {
70 LOAD_DONE = 1,
71 }
72
73 // Enums for transaction and lock management
74 enum TxnState {
75 COMMITTED = 1,
76 ABORTED = 2,
77 OPEN = 3,
78 }
79
80 enum LockLevel {
81 DB = 1,
82 TABLE = 2,
83 PARTITION = 3,
84 }
85
86 enum LockState {
87 ACQUIRED = 1, // requester has the lock
88 WAITING = 2, // requester is waiting for the lock and should call checklock at a later point to see if the lock has been obtained.
89 ABORT = 3, // the lock has been aborted, most likely due to timeout
90 NOT_ACQUIRED = 4, // returned only with lockNoWait, indicates the lock was not available and was not acquired
91 }
92
93 enum LockType {
94 SHARED_READ = 1,
95 SHARED_WRITE = 2,
96 EXCLUSIVE = 3,
97 }
98
99 enum CompactionType {
100 MINOR = 1,
101 MAJOR = 2,
102 }
103
104 enum GrantRevokeType {
105 GRANT = 1,
106 REVOKE = 2,
107 }
108
109 struct HiveObjectRef{
110 1: HiveObjectType objectType,
111 2: string dbName,
112 3: string objectName,
113 4: list<string> partValues,
114 5: string columnName,
115 }
116
117 struct PrivilegeGrantInfo {
118 1: string privilege,
119 2: i32 createTime,
120 3: string grantor,
121 4: PrincipalType grantorType,
122 5: bool grantOption,
123 }
124
125 struct HiveObjectPrivilege {
126 1: HiveObjectRef hiveObject,
127 2: string principalName,
128 3: PrincipalType principalType,
129 4: PrivilegeGrantInfo grantInfo,
130 }
131
132 struct PrivilegeBag {
133 1: list<HiveObjectPrivilege> privileges,
134 }
135
136 struct PrincipalPrivilegeSet {
137 1: map<string, list<PrivilegeGrantInfo>> userPrivileges, // user name -> privilege grant info
138 2: map<string, list<PrivilegeGrantInfo>> groupPrivileges, // group name -> privilege grant info
139 3: map<string, list<PrivilegeGrantInfo>> rolePrivileges, //role name -> privilege grant info
140 }
141
142 struct GrantRevokePrivilegeRequest {
143 1: GrantRevokeType requestType;
144 2: PrivilegeBag privileges;
145 3: optional bool revokeGrantOption; // Only for revoke request
146 }
147
148 struct GrantRevokePrivilegeResponse {
149 1: optional bool success;
150 }
151
152 struct Role {
153 1: string roleName,
154 2: i32 createTime,
155 3: string ownerName,
156 }
157
158 // Representation of a grant for a principal to a role
159 struct RolePrincipalGrant {
160 1: string roleName,
161 2: string principalName,
162 3: PrincipalType principalType,
163 4: bool grantOption,
164 5: i32 grantTime,
165 6: string grantorName,
166 7: PrincipalType grantorPrincipalType
167 }
168
169 struct GetRoleGrantsForPrincipalRequest {
170 1: required string principal_name,
171 2: required PrincipalType principal_type
172 }
173
174 struct GetRoleGrantsForPrincipalResponse {
175 1: required list<RolePrincipalGrant> principalGrants;
176 }
177
178 struct GetPrincipalsInRoleRequest {
179 1: required string roleName;
180 }
181
182 struct GetPrincipalsInRoleResponse {
183 1: required list<RolePrincipalGrant> principalGrants;
184 }
185
186 struct GrantRevokeRoleRequest {
187 1: GrantRevokeType requestType;
188 2: string roleName;
189 3: string principalName;
190 4: PrincipalType principalType;
191 5: optional string grantor; // Needed for grant
192 6: optional PrincipalType grantorType; // Needed for grant
193 7: optional bool grantOption;
194 }
195
196 struct GrantRevokeRoleResponse {
197 1: optional bool success;
198 }
199
200 // namespace for tables
201 struct Database {
202 1: string name,
203 2: string description,
204 3: string locationUri,
205 4: map<string, string> parameters, // properties associated with the database
206 5: optional PrincipalPrivilegeSet privileges,
207 6: optional string ownerName,
208 7: optional PrincipalType ownerType
209 }
210
211 // This object holds the information needed by SerDes
212 struct SerDeInfo {
213 1: string name, // name of the serde, table name by default
214 2: string serializationLib, // usually the class that implements the extractor & loader
215 3: map<string, string> parameters // initialization parameters
216 }
217
218 // sort order of a column (column name along with asc(1)/desc(0))
219 struct Order {
220 1: string col, // sort column name
221 2: i32 order // asc(1) or desc(0)
222 }
223
224 // this object holds all the information about skewed table
225 struct SkewedInfo {
226 1: list<string> skewedColNames, // skewed column names
227 2: list<list<string>> skewedColValues, //skewed values
228 3: map<list<string>, string> skewedColValueLocationMaps, //skewed value to location mappings
229 }
230
231 // this object holds all the information about physical storage of the data belonging to a table
232 struct StorageDescriptor {
233 1: list<FieldSchema> cols, // required (refer to types defined above)
234 2: string location, // defaults to <warehouse loc>/<db loc>/tablename
235 3: string inputFormat, // SequenceFileInputFormat (binary) or TextInputFormat` or custom format
236 4: string outputFormat, // SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format
237 5: bool compressed, // compressed or not
238 6: i32 numBuckets, // this must be specified if there are any dimension columns
239 7: SerDeInfo serdeInfo, // serialization and deserialization information
240 8: list<string> bucketCols, // reducer grouping columns and clustering columns and bucketing columns`
241 9: list<Order> sortCols, // sort order of the data in each bucket
242 10: map<string, string> parameters, // any user supplied key value hash
243 11: optional SkewedInfo skewedInfo, // skewed information
244 12: optional bool storedAsSubDirectories // stored as subdirectories or not
245 }
246
247 // table information
248 struct Table {
249 1: string tableName, // name of the table
250 2: string dbName, // database name ('default')
251 3: string owner, // owner of this table
252 4: i32 createTime, // creation time of the table
253 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on)
254 6: i32 retention, // retention time
255 7: StorageDescriptor sd, // storage descriptor of the table
256 8: list<FieldSchema> partitionKeys, // partition keys of the table. only primitive types are supported
257 9: map<string, string> parameters, // to store comments or any other user level parameters
258 10: string viewOriginalText, // original view text, null for non-view
259 11: string viewExpandedText, // expanded view text, null for non-view
260 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE
261 13: optional PrincipalPrivilegeSet privileges,
262 14: optional bool temporary=false
263 }
264
265 struct Partition {
266 1: list<string> values // string value is converted to appropriate partition key type
267 2: string dbName,
268 3: string tableName,
269 4: i32 createTime,
270 5: i32 lastAccessTime,
271 6: StorageDescriptor sd,
272 7: map<string, string> parameters,
273 8: optional PrincipalPrivilegeSet privileges
274 }
275
276 struct PartitionWithoutSD {
277 1: list<string> values // string value is converted to appropriate partition key type
278 2: i32 createTime,
279 3: i32 lastAccessTime,
280 4: string relativePath,
281 5: map<string, string> parameters,
282 6: optional PrincipalPrivilegeSet privileges
283 }
284
285 struct PartitionSpecWithSharedSD {
286 1: list<PartitionWithoutSD> partitions,
287 2: StorageDescriptor sd,
288 }
289
290 struct PartitionListComposingSpec {
291 1: list<Partition> partitions
292 }
293
294 struct PartitionSpec {
295 1: string dbName,
296 2: string tableName,
297 3: string rootPath,
298 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec,
299 5: optional PartitionListComposingSpec partitionList
300 }
301
302 struct Index {
303 1: string indexName, // unique with in the whole database namespace
304 2: string indexHandlerClass, // reserved
305 3: string dbName,
306 4: string origTableName,
307 5: i32 createTime,
308 6: i32 lastAccessTime,
309 7: string indexTableName,
310 8: StorageDescriptor sd,
311 9: map<string, string> parameters,
312 10: bool deferredRebuild
313 }
314
315 // column statistics
316 struct BooleanColumnStatsData {
317 1: required i64 numTrues,
318 2: required i64 numFalses,
319 3: required i64 numNulls
320 }
321
322 struct DoubleColumnStatsData {
323 1: optional double lowValue,
324 2: optional double highValue,
325 3: required i64 numNulls,
326 4: required i64 numDVs
327 }
328
329 struct LongColumnStatsData {
330 1: optional i64 lowValue,
331 2: optional i64 highValue,
332 3: required i64 numNulls,
333 4: required i64 numDVs
334 }
335
336 struct StringColumnStatsData {
337 1: required i64 maxColLen,
338 2: required double avgColLen,
339 3: required i64 numNulls,
340 4: required i64 numDVs
341 }
342
343 struct BinaryColumnStatsData {
344 1: required i64 maxColLen,
345 2: required double avgColLen,
346 3: required i64 numNulls
347 }
348
349
350 struct Decimal {
351 1: required binary unscaled,
352 3: required i16 scale
353 }
354
355 struct DecimalColumnStatsData {
356 1: optional Decimal lowValue,
357 2: optional Decimal highValue,
358 3: required i64 numNulls,
359 4: required i64 numDVs
360 }
361
362 union ColumnStatisticsData {
363 1: BooleanColumnStatsData booleanStats,
364 2: LongColumnStatsData longStats,
365 3: DoubleColumnStatsData doubleStats,
366 4: StringColumnStatsData stringStats,
367 5: BinaryColumnStatsData binaryStats,
368 6: DecimalColumnStatsData decimalStats
369 }
370
371 struct ColumnStatisticsObj {
372 1: required string colName,
373 2: required string colType,
374 3: required ColumnStatisticsData statsData
375 }
376
377 struct ColumnStatisticsDesc {
378 1: required bool isTblLevel,
379 2: required string dbName,
380 3: required string tableName,
381 4: optional string partName,
382 5: optional i64 lastAnalyzed
383 }
384
385 struct ColumnStatistics {
386 1: required ColumnStatisticsDesc statsDesc,
387 2: required list<ColumnStatisticsObj> statsObj;
388 }
389
390 struct AggrStats {
391 1: required list<ColumnStatisticsObj> colStats,
392 2: required i64 partsFound // number of partitions for which stats were found
393 }
394
395 struct SetPartitionsStatsRequest {
396 1: required list<ColumnStatistics> colStats
397 }
398
399 // schema of the table/query results etc.
400 struct Schema {
401 // column names, types, comments
402 1: list<FieldSchema> fieldSchemas, // delimiters etc
403 2: map<string, string> properties
404 }
405
406 // Key-value store to be used with selected
407 // Metastore APIs (create, alter methods).
408 // The client can pass environment properties / configs that can be
409 // accessed in hooks.
410 struct EnvironmentContext {
411 1: map<string, string> properties
412 }
413
414 // Return type for get_partitions_by_expr
415 struct PartitionsByExprResult {
416 1: required list<Partition> partitions,
417 // Whether the results has any (currently, all) partitions which may or may not match
418 2: required bool hasUnknownPartitions
419 }
420
421 struct PartitionsByExprRequest {
422 1: required string dbName,
423 2: required string tblName,
424 3: required binary expr,
425 4: optional string defaultPartitionName,
426 5: optional i16 maxParts=-1
427 }
428
429 struct TableStatsResult {
430 1: required list<ColumnStatisticsObj> tableStats
431 }
432
433 struct PartitionsStatsResult {
434 1: required map<string, list<ColumnStatisticsObj>> partStats
435 }
436
437 struct TableStatsRequest {
438 1: required string dbName,
439 2: required string tblName,
440 3: required list<string> colNames
441 }
442
443 struct PartitionsStatsRequest {
444 1: required string dbName,
445 2: required string tblName,
446 3: required list<string> colNames,
447 4: required list<string> partNames
448 }
449
450 // Return type for add_partitions_req
451 struct AddPartitionsResult {
452 1: optional list<Partition> partitions,
453 }
454
455 // Request type for add_partitions_req
456 struct AddPartitionsRequest {
457 1: required string dbName,
458 2: required string tblName,
459 3: required list<Partition> parts,
460 4: required bool ifNotExists,
461 5: optional bool needResult=true
462 }
463
464 // Return type for drop_partitions_req
465 struct DropPartitionsResult {
466 1: optional list<Partition> partitions,
467 }
468
469 struct DropPartitionsExpr {
470 1: required binary expr;
471 2: optional i32 partArchiveLevel;
472 }
473
474 union RequestPartsSpec {
475 1: list<string> names;
476 2: list<DropPartitionsExpr> exprs;
477 }
478
479 // Request type for drop_partitions_req
480 // TODO: we might want to add "bestEffort" flag; where a subset can fail
481 struct DropPartitionsRequest {
482 1: required string dbName,
483 2: required string tblName,
484 3: required RequestPartsSpec parts,
485 4: optional bool deleteData,
486 5: optional bool ifExists=true, // currently verified on client
487 6: optional bool ignoreProtection,
488 7: optional EnvironmentContext environmentContext,
489 8: optional bool needResult=true
490 }
491
492 enum FunctionType {
493 JAVA = 1,
494 }
495
496 enum ResourceType {
497 JAR = 1,
498 FILE = 2,
499 ARCHIVE = 3,
500 }
501
502 struct ResourceUri {
503 1: ResourceType resourceType,
504 2: string uri,
505 }
506
507 // User-defined function
508 struct Function {
509 1: string functionName,
510 2: string dbName,
511 3: string className,
512 4: string ownerName,
513 5: PrincipalType ownerType,
514 6: i32 createTime,
515 7: FunctionType functionType,
516 8: list<ResourceUri> resourceUris,
517 }
518
519 // Structs for transaction and locks
520 struct TxnInfo {
521 1: required i64 id,
522 2: required TxnState state,
523 3: required string user, // used in 'show transactions' to help admins find who has open transactions
524 4: required string hostname, // used in 'show transactions' to help admins find who has open transactions
525 }
526
527 struct GetOpenTxnsInfoResponse {
528 1: required i64 txn_high_water_mark,
529 2: required list<TxnInfo> open_txns,
530 }
531
532 struct GetOpenTxnsResponse {
533 1: required i64 txn_high_water_mark,
534 2: required set<i64> open_txns,
535 }
536
537 struct OpenTxnRequest {
538 1: required i32 num_txns,
539 2: required string user,
540 3: required string hostname,
541 }
542
543 struct OpenTxnsResponse {
544 1: required list<i64> txn_ids,
545 }
546
547 struct AbortTxnRequest {
548 1: required i64 txnid,
549 }
550
551 struct CommitTxnRequest {
552 1: required i64 txnid,
553 }
554
555 struct LockComponent {
556 1: required LockType type,
557 2: required LockLevel level,
558 3: required string dbname,
559 4: optional string tablename,
560 5: optional string partitionname,
561 }
562
563 struct LockRequest {
564 1: required list<LockComponent> component,
565 2: optional i64 txnid,
566 3: required string user, // used in 'show locks' to help admins find who has open locks
567 4: required string hostname, // used in 'show locks' to help admins find who has open locks
568 }
569
570 struct LockResponse {
571 1: required i64 lockid,
572 2: required LockState state,
573 }
574
575 struct CheckLockRequest {
576 1: required i64 lockid,
577 }
578
579 struct UnlockRequest {
580 1: required i64 lockid,
581 }
582
583 struct ShowLocksRequest {
584 }
585
586 struct ShowLocksResponseElement {
587 1: required i64 lockid,
588 2: required string dbname,
589 3: optional string tablename,
590 4: optional string partname,
591 5: required LockState state,
592 6: required LockType type,
593 7: optional i64 txnid,
594 8: required i64 lastheartbeat,
595 9: optional i64 acquiredat,
596 10: required string user,
597 11: required string hostname,
598 }
599
600 struct ShowLocksResponse {
601 1: list<ShowLocksResponseElement> locks,
602 }
603
604 struct HeartbeatRequest {
605 1: optional i64 lockid,
606 2: optional i64 txnid
607 }
608
609 struct HeartbeatTxnRangeRequest {
610 1: required i64 min,
611 2: required i64 max
612 }
613
614 struct HeartbeatTxnRangeResponse {
615 1: required set<i64> aborted,
616 2: required set<i64> nosuch
617 }
618
619 struct CompactionRequest {
620 1: required string dbname,
621 2: required string tablename,
622 3: optional string partitionname,
623 4: required CompactionType type,
624 5: optional string runas,
625 }
626
627 struct ShowCompactRequest {
628 }
629
630 struct ShowCompactResponseElement {
631 1: required string dbname,
632 2: required string tablename,
633 3: optional string partitionname,
634 4: required CompactionType type,
635 5: required string state,
636 6: optional string workerid,
637 7: optional i64 start,
638 8: optional string runAs,
639 }
640
641 struct ShowCompactResponse {
642 1: required list<ShowCompactResponseElement> compacts,
643 }
644
645 struct NotificationEventRequest {
646 1: required i64 lastEvent,
647 2: optional i32 maxEvents,
648 }
649
650 struct NotificationEvent {
651 1: required i64 eventId,
652 2: required i32 eventTime,
653 3: required string eventType,
654 4: optional string dbName,
655 5: optional string tableName,
656 6: required string message,
657 }
658
659 struct NotificationEventResponse {
660 1: required list<NotificationEvent> events,
661 }
662
663 struct CurrentNotificationEventId {
664 1: required i64 eventId,
665 }
666
667 struct InsertEventRequestData {
668 1: required list<string> filesAdded
669 }
670
671 union FireEventRequestData {
672 1: InsertEventRequestData insertData
673 }
674
675 struct FireEventRequest {
676 1: required bool successful,
677 2: required FireEventRequestData data
678 // dbname, tablename, and partition vals are included as optional in the top level event rather than placed in each type of
679 // subevent as I assume they'll be used across most event types.
680 3: optional string dbName,
681 4: optional string tableName,
682 5: optional list<string> partitionVals,
683 }
684
685 struct FireEventResponse {
686 // NOP for now, this is just a place holder for future responses
687 }
688
689
690 struct GetAllFunctionsResponse {
691 1: optional list<Function> functions
692 }
693
694 struct TableMeta {
695 1: required string dbName;
696 2: required string tableName;
697 3: required string tableType;
698 4: optional string comments;
699 }
700
701 exception MetaException {
702 1: string message
703 }
704
705 exception UnknownTableException {
706 1: string message
707 }
708
709 exception UnknownDBException {
710 1: string message
711 }
712
713 exception AlreadyExistsException {
714 1: string message
715 }
716
717 exception InvalidPartitionException {
718 1: string message
719 }
720
721 exception UnknownPartitionException {
722 1: string message
723 }
724
725 exception InvalidObjectException {
726 1: string message
727 }
728
729 exception NoSuchObjectException {
730 1: string message
731 }
732
733 exception IndexAlreadyExistsException {
734 1: string message
735 }
736
737 exception InvalidOperationException {
738 1: string message
739 }
740
741 exception ConfigValSecurityException {
742 1: string message
743 }
744
745 exception InvalidInputException {
746 1: string message
747 }
748
749 // Transaction and lock exceptions
750 exception NoSuchTxnException {
751 1: string message
752 }
753
754 exception TxnAbortedException {
755 1: string message
756 }
757
758 exception TxnOpenException {
759 1: string message
760 }
761
762 exception NoSuchLockException {
763 1: string message
764 }
765
766 /**
767 * This interface is live.
768 */
769 service ThriftHiveMetastore extends fb303.FacebookService
770 {
771 string getMetaConf(1:string key) throws(1:MetaException o1)
772 void setMetaConf(1:string key, 2:string value) throws(1:MetaException o1)
773
774 void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
775 Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2)
776 void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
777 list<string> get_databases(1:string pattern) throws(1:MetaException o1)
778 list<string> get_all_databases() throws(1:MetaException o1)
779 void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2)
780
781 // returns the type with given name (make separate calls for the dependent types if needed)
782 Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2)
783 bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
784 bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2)
785 map<string, Type> get_type_all(1:string name)
786 throws(1:MetaException o2)
787
788 // Gets a list of FieldSchemas describing the columns of a particular table
789 list<FieldSchema> get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3),
790 list<FieldSchema> get_fields_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
791
792 // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table
793 list<FieldSchema> get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
794 list<FieldSchema> get_schema_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
795
796 // create a Hive table. Following fields must be set
797 // tableName
798 // database (only 'default' for now until Hive QL supports databases)
799 // owner (not needed, but good to have for tracking purposes)
800 // sd.cols (list of field schemas)
801 // sd.inputFormat (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat)
802 // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat)
803 // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe
804 // * See notes on DDL_TIME
805 void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4)
806 void create_table_with_environment_context(1:Table tbl,
807 2:EnvironmentContext environment_context)
808 throws (1:AlreadyExistsException o1,
809 2:InvalidObjectException o2, 3:MetaException o3,
810 4:NoSuchObjectException o4)
811 // drops the table and all the partitions associated with it if the table has partitions
812 // delete data (including partitions) if deleteData is set to true
813 void drop_table(1:string dbname, 2:string name, 3:bool deleteData)
814 throws(1:NoSuchObjectException o1, 2:MetaException o3)
815 void drop_table_with_environment_context(1:string dbname, 2:string name, 3:bool deleteData,
816 4:EnvironmentContext environment_context)
817 throws(1:NoSuchObjectException o1, 2:MetaException o3)
818 list<string> get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1)
819 list<TableMeta> get_table_meta(1: string db_patterns, 2: string tbl_patterns, 3: list<string> tbl_types)
820 throws (1: MetaException o1)
821 list<string> get_all_tables(1: string db_name) throws (1: MetaException o1)
822
823 Table get_table(1:string dbname, 2:string tbl_name)
824 throws (1:MetaException o1, 2:NoSuchObjectException o2)
825 list<Table> get_table_objects_by_name(1:string dbname, 2:list<string> tbl_names)
826 throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
827
828 // Get a list of table names that match a filter.
829 // The filter operators are LIKE, <, <=, >, >=, =, <>
830 //
831 // In the filter statement, values interpreted as strings must be enclosed in quotes,
832 // while values interpreted as integers should not be. Strings and integers are the only
833 // supported value types.
834 //
835 // The currently supported key names in the filter are:
836 // Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name
837 // and supports all filter operators
838 // Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times
839 // and supports all filter operators except LIKE
840 // Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values
841 // and only supports the filter operators = and <>.
842 // Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement.
843 // For example, to filter on parameter keys called "retention", the key name in the filter
844 // statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
845 // Also, = and <> only work for keys that exist
846 // in the tables. E.g., if you are looking for tables where key1 <> value, it will only
847 // look at tables that have a value for the parameter key1.
848 // Some example filter statements include:
849 // filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
850 // Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0";
851 // filter = Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " +
852 // Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\""
853 // @param dbName
854 // The name of the database from which you will retrieve the table names
855 // @param filterType
856 // The type of filter
857 // @param filter
858 // The filter string
859 // @param max_tables
860 // The maximum number of tables returned
861 // @return A list of table names that match the desired filter
862 list<string> get_table_names_by_filter(1:string dbname, 2:string filter, 3:i16 max_tables=-1)
863 throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
864
865 // alter table applies to only future partitions not for existing partitions
866 // * See notes on DDL_TIME
867 void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl)
868 throws (1:InvalidOperationException o1, 2:MetaException o2)
869 void alter_table_with_environment_context(1:string dbname, 2:string tbl_name,
870 3:Table new_tbl, 4:EnvironmentContext environment_context)
871 throws (1:InvalidOperationException o1, 2:MetaException o2)
872 // alter table not only applies to future partitions but also cascade to existing partitions
873 void alter_table_with_cascade(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:bool cascade)
874 throws (1:InvalidOperationException o1, 2:MetaException o2)
875 // the following applies to only tables that have partitions
876 // * See notes on DDL_TIME
877 Partition add_partition(1:Partition new_part)
878 throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
879 Partition add_partition_with_environment_context(1:Partition new_part,
880 2:EnvironmentContext environment_context)
881 throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2,
882 3:MetaException o3)
883 i32 add_partitions(1:list<Partition> new_parts)
884 throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
885 i32 add_partitions_pspec(1:list<PartitionSpec> new_parts)
886 throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
887 Partition append_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
888 throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
889 AddPartitionsResult add_partitions_req(1:AddPartitionsRequest request)
890 throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
891 Partition append_partition_with_environment_context(1:string db_name, 2:string tbl_name,
892 3:list<string> part_vals, 4:EnvironmentContext environment_context)
893 throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
894 Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name)
895 throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
896 Partition append_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name,
897 3:string part_name, 4:EnvironmentContext environment_context)
898 throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
899 bool drop_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:bool deleteData)
900 throws(1:NoSuchObjectException o1, 2:MetaException o2)
901 bool drop_partition_with_environment_context(1:string db_name, 2:string tbl_name,
902 3:list<string> part_vals, 4:bool deleteData, 5:EnvironmentContext environment_context)
903 throws(1:NoSuchObjectException o1, 2:MetaException o2)
904 bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData)
905 throws(1:NoSuchObjectException o1, 2:MetaException o2)
906 bool drop_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name,
907 3:string part_name, 4:bool deleteData, 5:EnvironmentContext environment_context)
908 throws(1:NoSuchObjectException o1, 2:MetaException o2)
909 DropPartitionsResult drop_partitions_req(1: DropPartitionsRequest req)
910 throws(1:NoSuchObjectException o1, 2:MetaException o2)
911
912 Partition get_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
913 throws(1:MetaException o1, 2:NoSuchObjectException o2)
914 Partition exchange_partition(1:map<string, string> partitionSpecs, 2:string source_db,
915 3:string source_table_name, 4:string dest_db, 5:string dest_table_name)
916 throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3,
917 4:InvalidInputException o4)
918
919 Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals,
920 4: string user_name, 5: list<string> group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2)
921
922 Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name)
923 throws(1:MetaException o1, 2:NoSuchObjectException o2)
924
925 // returns all the partitions for this table in reverse chronological order.
926 // If max parts is given then it will return only that many.
927 list<Partition> get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
928 throws(1:NoSuchObjectException o1, 2:MetaException o2)
929 list<Partition> get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1,
930 4: string user_name, 5: list<string> group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
931
932 list<PartitionSpec> get_partitions_pspec(1:string db_name, 2:string tbl_name, 3:i32 max_parts=-1)
933 throws(1:NoSuchObjectException o1, 2:MetaException o2)
934
935 list<string> get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
936 throws(1:MetaException o2)
937
938 // get_partition*_ps methods allow filtering by a partial partition specification,
939 // as needed for dynamic partitions. The values that are not restricted should
940 // be empty strings. Nulls were considered (instead of "") but caused errors in
941 // generated Python code. The size of part_vals may be smaller than the
942 // number of partition columns - the unspecified values are considered the same
943 // as "".
944 list<Partition> get_partitions_ps(1:string db_name 2:string tbl_name
945 3:list<string> part_vals, 4:i16 max_parts=-1)
946 throws(1:MetaException o1, 2:NoSuchObjectException o2)
947 list<Partition> get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1,
948 5: string user_name, 6: list<string> group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
949
950 list<string> get_partition_names_ps(1:string db_name,
951 2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1)
952 throws(1:MetaException o1, 2:NoSuchObjectException o2)
953
954 // get the partitions matching the given partition filter
955 list<Partition> get_partitions_by_filter(1:string db_name 2:string tbl_name
956 3:string filter, 4:i16 max_parts=-1)
957 throws(1:MetaException o1, 2:NoSuchObjectException o2)
958
959 // List partitions as PartitionSpec instances.
960 list<PartitionSpec> get_part_specs_by_filter(1:string db_name 2:string tbl_name
961 3:string filter, 4:i32 max_parts=-1)
962 throws(1:MetaException o1, 2:NoSuchObjectException o2)
963
964 // get the partitions matching the given partition filter
965 // unlike get_partitions_by_filter, takes serialized hive expression, and with that can work
966 // with any filter (get_partitions_by_filter only works if the filter can be pushed down to JDOQL.
967 PartitionsByExprResult get_partitions_by_expr(1:PartitionsByExprRequest req)
968 throws(1:MetaException o1, 2:NoSuchObjectException o2)
969
970 // get partitions give a list of partition names
971 list<Partition> get_partitions_by_names(1:string db_name 2:string tbl_name 3:list<string> names)
972 throws(1:MetaException o1, 2:NoSuchObjectException o2)
973
974 // changes the partition to the new partition object. partition is identified from the part values
975 // in the new_part
976 // * See notes on DDL_TIME
977 void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part)
978 throws (1:InvalidOperationException o1, 2:MetaException o2)
979
980 // change a list of partitions. All partitions are altered atomically and all
981 // prehooks are fired together followed by all post hooks
982 void alter_partitions(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts)
983 throws (1:InvalidOperationException o1, 2:MetaException o2)
984
985 void alter_partition_with_environment_context(1:string db_name,
986 2:string tbl_name, 3:Partition new_part,
987 4:EnvironmentContext environment_context)
988 throws (1:InvalidOperationException o1, 2:MetaException o2)
989
990 // rename the old partition to the new partition object by changing old part values to the part values
991 // in the new_part. old partition is identified from part_vals.
992 // partition keys in new_part should be the same as those in old partition.
993 void rename_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:Partition new_part)
994 throws (1:InvalidOperationException o1, 2:MetaException o2)
995
996 // returns whether or not the partition name is valid based on the value of the config
997 // hive.metastore.partition.name.whitelist.pattern
998 bool partition_name_has_valid_characters(1:list<string> part_vals, 2:bool throw_exception)
999 throws(1: MetaException o1)
1000
1001 // gets the value of the configuration key in the metastore server. returns
1002 // defaultValue if the key does not exist. if the configuration key does not
1003 // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is
1004 // thrown.
1005 string get_config_value(1:string name, 2:string defaultValue)
1006 throws(1:ConfigValSecurityException o1)
1007
1008 // converts a partition name into a partition values array
1009 list<string> partition_name_to_vals(1: string part_name)
1010 throws(1: MetaException o1)
1011 // converts a partition name into a partition specification (a mapping from
1012 // the partition cols to the values)
1013 map<string, string> partition_name_to_spec(1: string part_name)
1014 throws(1: MetaException o1)
1015
1016 void markPartitionForEvent(1:string db_name, 2:string tbl_name, 3:map<string,string> part_vals,
1017 4:PartitionEventType eventType) throws (1: MetaException o1, 2: NoSuchObjectException o2,
1018 3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5,
1019 6: InvalidPartitionException o6)
1020 bool isPartitionMarkedForEvent(1:string db_name, 2:string tbl_name, 3:map<string,string> part_vals,
1021 4: PartitionEventType eventType) throws (1: MetaException o1, 2:NoSuchObjectException o2,
1022 3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5,
1023 6: InvalidPartitionException o6)
1024
1025 //index
1026 Index add_index(1:Index new_index, 2: Table index_table)
1027 throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
1028 void alter_index(1:string dbname, 2:string base_tbl_name, 3:string idx_name, 4:Index new_idx)
1029 throws (1:InvalidOperationException o1, 2:MetaException o2)
1030 bool drop_index_by_name(1:string db_name, 2:string tbl_name, 3:string index_name, 4:bool deleteData)
1031 throws(1:NoSuchObjectException o1, 2:MetaException o2)
1032 Index get_index_by_name(1:string db_name 2:string tbl_name, 3:string index_name)
1033 throws(1:MetaException o1, 2:NoSuchObjectException o2)
1034
1035 list<Index> get_indexes(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1)
1036 throws(1:NoSuchObjectException o1, 2:MetaException o2)
1037 list<string> get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1)
1038 throws(1:MetaException o2)
1039
1040 // column statistics interfaces
1041
1042 // update APIs persist the column statistics object(s) that are passed in. If statistics already
1043 // exists for one or more columns, the existing statistics will be overwritten. The update APIs
1044 // validate that the dbName, tableName, partName, colName[] passed in as part of the ColumnStatistics
1045 // struct are valid, throws InvalidInputException/NoSuchObjectException if found to be invalid
1046 bool update_table_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1,
1047 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
1048 bool update_partition_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1,
1049 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
1050
1051 // get APIs return the column statistics corresponding to db_name, tbl_name, [part_name], col_name if
1052 // such statistics exists. If the required statistics doesn't exist, get APIs throw NoSuchObjectException
1053 // For instance, if get_table_column_statistics is called on a partitioned table for which only
1054 // partition level column stats exist, get_table_column_statistics will throw NoSuchObjectException
1055 ColumnStatistics get_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws
1056 (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidInputException o3, 4:InvalidObjectException o4)
1057 ColumnStatistics get_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name,
1058 4:string col_name) throws (1:NoSuchObjectException o1, 2:MetaException o2,
1059 3:InvalidInputException o3, 4:InvalidObjectException o4)
1060 TableStatsResult get_table_statistics_req(1:TableStatsRequest request) throws
1061 (1:NoSuchObjectException o1, 2:MetaException o2)
1062 PartitionsStatsResult get_partitions_statistics_req(1:PartitionsStatsRequest request) throws
1063 (1:NoSuchObjectException o1, 2:MetaException o2)
1064 AggrStats get_aggr_stats_for(1:PartitionsStatsRequest request) throws
1065 (1:NoSuchObjectException o1, 2:MetaException o2)
1066 bool set_aggr_stats_for(1:SetPartitionsStatsRequest request) throws
1067 (1:NoSuchObjectException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
1068
1069
1070 // delete APIs attempt to delete column statistics, if found, associated with a given db_name, tbl_name, [part_name]
1071 // and col_name. If the delete API doesn't find the statistics record in the metastore, throws NoSuchObjectException
1072 // Delete API validates the input and if the input is invalid throws InvalidInputException/InvalidObjectException.
1073 bool delete_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string col_name) throws
1074 (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3,
1075 4:InvalidInputException o4)
1076 bool delete_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws
1077 (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3,
1078 4:InvalidInputException o4)
1079
1080 //
1081 // user-defined functions
1082 //
1083
1084 void create_function(1:Function func)
1085 throws (1:AlreadyExistsException o1,
1086 2:InvalidObjectException o2,
1087 3:MetaException o3,
1088 4:NoSuchObjectException o4)
1089
1090 void drop_function(1:string dbName, 2:string funcName)
1091 throws (1:NoSuchObjectException o1, 2:MetaException o3)
1092
1093 void alter_function(1:string dbName, 2:string funcName, 3:Function newFunc)
1094 throws (1:InvalidOperationException o1, 2:MetaException o2)
1095
1096 list<string> get_functions(1:string dbName, 2:string pattern)
1097 throws (1:MetaException o1)
1098 Function get_function(1:string dbName, 2:string funcName)
1099 throws (1:MetaException o1, 2:NoSuchObjectException o2)
1100
1101 GetAllFunctionsResponse get_all_functions() throws (1:MetaException o1)
1102
1103 //authorization privileges
1104
1105 bool create_role(1:Role role) throws(1:MetaException o1)
1106 bool drop_role(1:string role_name) throws(1:MetaException o1)
1107 list<string> get_role_names() throws(1:MetaException o1)
1108 // Deprecated, use grant_revoke_role()
1109 bool grant_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type,
1110 4:string grantor, 5:PrincipalType grantorType, 6:bool grant_option) throws(1:MetaException o1)
1111 // Deprecated, use grant_revoke_role()
1112 bool revoke_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type)
1113 throws(1:MetaException o1)
1114 list<Role> list_roles(1:string principal_name, 2:PrincipalType principal_type) throws(1:MetaException o1)
1115 GrantRevokeRoleResponse grant_revoke_role(1:GrantRevokeRoleRequest request) throws(1:MetaException o1)
1116
1117 // get all role-grants for users/roles that have been granted the given role
1118 // Note that in the returned list of RolePrincipalGrants, the roleName is
1119 // redundant as it would match the role_name argument of this function
1120 GetPrincipalsInRoleResponse get_principals_in_role(1: GetPrincipalsInRoleRequest request) throws(1:MetaException o1)
1121
1122 // get grant information of all roles granted to the given principal
1123 // Note that in the returned list of RolePrincipalGrants, the principal name,type is
1124 // redundant as it would match the principal name,type arguments of this function
1125 GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(1: GetRoleGrantsForPrincipalRequest request) throws(1:MetaException o1)
1126
1127 PrincipalPrivilegeSet get_privilege_set(1:HiveObjectRef hiveObject, 2:string user_name,
1128 3: list<string> group_names) throws(1:MetaException o1)
1129 list<HiveObjectPrivilege> list_privileges(1:string principal_name, 2:PrincipalType principal_type,
1130 3: HiveObjectRef hiveObject) throws(1:MetaException o1)
1131
1132 // Deprecated, use grant_revoke_privileges()
1133 bool grant_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1)
1134 // Deprecated, use grant_revoke_privileges()
1135 bool revoke_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1)
1136 GrantRevokePrivilegeResponse grant_revoke_privileges(1:GrantRevokePrivilegeRequest request) throws(1:MetaException o1);
1137
1138 // this is used by metastore client to send UGI information to metastore server immediately
1139 // after setting up a connection.
1140 list<string> set_ugi(1:string user_name, 2:list<string> group_names) throws (1:MetaException o1)
1141
1142 //Authentication (delegation token) interfaces
1143
1144 // get metastore server delegation token for use from the map/reduce tasks to authenticate
1145 // to metastore server
1146 string get_delegation_token(1:string token_owner, 2:string renewer_kerberos_principal_name)
1147 throws (1:MetaException o1)
1148
1149 // method to renew delegation token obtained from metastore server
1150 i64 renew_delegation_token(1:string token_str_form) throws (1:MetaException o1)
1151
1152 // method to cancel delegation token obtained from metastore server
1153 void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1)
1154
1155 // Transaction and lock management calls
1156 // Get just list of open transactions
1157 GetOpenTxnsResponse get_open_txns()
1158 // Get list of open transactions with state (open, aborted)
1159 GetOpenTxnsInfoResponse get_open_txns_info()
1160 OpenTxnsResponse open_txns(1:OpenTxnRequest rqst)
1161 void abort_txn(1:AbortTxnRequest rqst) throws (1:NoSuchTxnException o1)
1162 void commit_txn(1:CommitTxnRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
1163 LockResponse lock(1:LockRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
1164 LockResponse check_lock(1:CheckLockRequest rqst)
1165 throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:NoSuchLockException o3)
1166 void unlock(1:UnlockRequest rqst) throws (1:NoSuchLockException o1, 2:TxnOpenException o2)
1167 ShowLocksResponse show_locks(1:ShowLocksRequest rqst)
1168 void heartbeat(1:HeartbeatRequest ids) throws (1:NoSuchLockException o1, 2:NoSuchTxnException o2, 3:TxnAbortedException o3)
1169 HeartbeatTxnRangeResponse heartbeat_txn_range(1:HeartbeatTxnRangeRequest txns)
1170 void compact(1:CompactionRequest rqst)
1171 ShowCompactResponse show_compact(1:ShowCompactRequest rqst)
1172
1173 // Notification logging calls
1174 NotificationEventResponse get_next_notification(1:NotificationEventRequest rqst)
1175 CurrentNotificationEventId get_current_notificationEventId()
1176 }
1177
1178 // * Note about the DDL_TIME: When creating or altering a table or a partition,
1179 // if the DDL_TIME is not set, the current time will be used.
1180
1181 // For storing info about archived partitions in parameters
1182
1183 // Whether the partition is archived
1184 const string IS_ARCHIVED = "is_archived",
1185 // The original location of the partition, before archiving. After archiving,
1186 // this directory will contain the archive. When the partition
1187 // is dropped, this directory will be deleted
1188 const string ORIGINAL_LOCATION = "original_location",
1189
1190 // Whether or not the table is considered immutable - immutable tables can only be
1191 // overwritten or created if unpartitioned, or if partitioned, partitions inside them
1192 // can only be overwritten or created. Immutability supports write-once and replace
1193 // semantics, but not append.
1194 const string IS_IMMUTABLE = "immutable",
1195
1196 // these should be needed only for backward compatibility with filestore
1197 const string META_TABLE_COLUMNS = "columns",
1198 const string META_TABLE_COLUMN_TYPES = "columns.types",
1199 const string BUCKET_FIELD_NAME = "bucket_field_name",
1200 const string BUCKET_COUNT = "bucket_count",
1201 const string FIELD_TO_DIMENSION = "field_to_dimension",
1202 const string META_TABLE_NAME = "name",
1203 const string META_TABLE_DB = "db",
1204 const string META_TABLE_LOCATION = "location",
1205 const string META_TABLE_SERDE = "serde",
1206 const string META_TABLE_PARTITION_COLUMNS = "partition_columns",
1207 const string META_TABLE_PARTITION_COLUMN_TYPES = "partition_columns.types",
1208 const string FILE_INPUT_FORMAT = "file.inputformat",
1209 const string FILE_OUTPUT_FORMAT = "file.outputformat",
1210 const string META_TABLE_STORAGE = "storage_handler",
1211 const string TABLE_IS_TRANSACTIONAL = "transactional",
1212 const string TABLE_NO_AUTO_COMPACT = "no_auto_compaction",
1213
1214