1818
1919package org .apache .hadoop .hive .ql .ddl .table .storage .compact ;
2020
21+ import org .apache .hadoop .hive .common .ServerUtils ;
2122import org .apache .hadoop .hive .conf .HiveConf ;
2223import org .apache .hadoop .hive .metastore .HiveMetaStoreClient ;
2324
2728import org .apache .hadoop .hive .metastore .api .ShowCompactResponse ;
2829import org .apache .hadoop .hive .metastore .api .ShowCompactResponseElement ;
2930import org .apache .hadoop .hive .metastore .txn .TxnStore ;
31+ import org .apache .hadoop .hive .metastore .txn .TxnUtils ;
3032import org .apache .hadoop .hive .metastore .utils .JavaUtils ;
3133import org .apache .hadoop .hive .ql .ErrorMsg ;
3234import org .apache .hadoop .hive .ql .ddl .DDLOperation ;
3537import org .apache .hadoop .hive .ql .metadata .HiveException ;
3638import org .apache .hadoop .hive .ql .metadata .Partition ;
3739import org .apache .hadoop .hive .ql .metadata .Table ;
38- import org .apache .hadoop .hive .ql .txn .compactor .InitiatorBase ;
40+ import org .apache .hadoop .hive .ql .txn .compactor .CompactorUtil ;
3941
40- import java .util .*;
41- import java .util .concurrent .atomic .AtomicBoolean ;
42+ import java .util .List ;
43+ import java .util .ArrayList ;
44+ import java .util .Map ;
45+ import java .util .LinkedHashMap ;
46+ import java .util .Optional ;
4247
4348import static org .apache .hadoop .hive .ql .io .AcidUtils .compactionTypeStr2ThriftType ;
4449
4550/**
4651 * Operation process of compacting a table.
4752 */
4853public class AlterTableCompactOperation extends DDLOperation <AlterTableCompactDesc > {
54+
4955 public AlterTableCompactOperation (DDLOperationContext context , AlterTableCompactDesc desc ) {
5056 super (context , desc );
5157 }
@@ -56,6 +62,11 @@ public AlterTableCompactOperation(DDLOperationContext context, AlterTableCompact
5662 throw new HiveException (ErrorMsg .NONACID_COMPACTION_NOT_SUPPORTED , table .getDbName (), table .getTableName ());
5763 }
5864
65+ Map <String , org .apache .hadoop .hive .metastore .api .Partition > partitionMap =
66+ convertPartitionsFromThriftToDB (getPartitions (table , desc , context ));
67+
68+ TxnStore txnHandler = TxnUtils .getTxnStore (context .getConf ());
69+
5970 CompactionRequest compactionRequest = new CompactionRequest (table .getDbName (), table .getTableName (),
6071 compactionTypeStr2ThriftType (desc .getCompactionType ()));
6172
@@ -69,40 +80,48 @@ public AlterTableCompactOperation(DDLOperationContext context, AlterTableCompact
6980 compactionRequest .setNumberOfBuckets (desc .getNumberOfBuckets ());
7081 }
7182
72- InitiatorBase initiatorBase = new InitiatorBase ();
73- initiatorBase .setConf (context .getConf ());
74- initiatorBase .init (new AtomicBoolean ());
75-
76- Map <String , org .apache .hadoop .hive .metastore .api .Partition > partitionMap =
77- convertPartitionsFromThriftToDB (getPartitions (table , desc , context ));
78-
79- if (desc .getPartitionSpec () != null ){
80- Optional <String > partitionName = partitionMap .keySet ().stream ().findFirst ();
81- partitionName .ifPresent (compactionRequest ::setPartitionname );
82- }
83- List <CompactionResponse > compactionResponses =
84- initiatorBase .initiateCompactionForTable (compactionRequest , table .getTTable (), partitionMap );
85- for (CompactionResponse compactionResponse : compactionResponses ) {
86- if (!compactionResponse .isAccepted ()) {
87- String message ;
88- if (compactionResponse .isSetErrormessage ()) {
89- message = compactionResponse .getErrormessage ();
90- throw new HiveException (ErrorMsg .COMPACTION_REFUSED , table .getDbName (), table .getTableName (),
91- "CompactionId: " + compactionResponse .getId (), message );
92- }
93- context .getConsole ().printInfo (
94- "Compaction already enqueued with id " + compactionResponse .getId () + "; State is "
95- + compactionResponse .getState ());
96- continue ;
83+ //Will directly initiate compaction if an un-partitioned table/a partition is specified in the request
84+ if (desc .getPartitionSpec () != null || !table .isPartitioned ()) {
85+ if (desc .getPartitionSpec () != null ) {
86+ Optional <String > partitionName = partitionMap .keySet ().stream ().findFirst ();
87+ partitionName .ifPresent (compactionRequest ::setPartitionname );
9788 }
98- context .getConsole ().printInfo ("Compaction enqueued with id " + compactionResponse .getId ());
99- if (desc .isBlocking () && compactionResponse .isAccepted ()) {
100- waitForCompactionToFinish (compactionResponse , context );
89+ CompactionResponse compactionResponse = txnHandler .compact (compactionRequest );
90+ parseCompactionResponse (compactionResponse , table , compactionRequest .getPartitionname ());
91+ } else { // Check for eligible partitions and initiate compaction
92+ for (Map .Entry <String , org .apache .hadoop .hive .metastore .api .Partition > partitionMapEntry : partitionMap .entrySet ()) {
93+ compactionRequest .setPartitionname (partitionMapEntry .getKey ());
94+ CompactionResponse compactionResponse =
95+ CompactorUtil .initiateCompactionForPartition (table .getTTable (), partitionMapEntry .getValue (),
96+ compactionRequest , ServerUtils .hostname (), txnHandler , context .getConf ());
97+ parseCompactionResponse (compactionResponse , table , partitionMapEntry .getKey ());
10198 }
10299 }
103100 return 0 ;
104101 }
105102
103+ private void parseCompactionResponse (CompactionResponse compactionResponse , Table table , String partitionName )
104+ throws HiveException {
105+ if (compactionResponse == null ) {
106+ context .getConsole ().printInfo (
107+ "Not enough deltas to initiate compaction for table=" + table .getTableName () + "partition=" + partitionName );
108+ return ;
109+ }
110+ if (!compactionResponse .isAccepted ()) {
111+ if (compactionResponse .isSetErrormessage ()) {
112+ throw new HiveException (ErrorMsg .COMPACTION_REFUSED , table .getDbName (), table .getTableName (),
113+ partitionName == null ? "" : " partition(" + partitionName + ")" , compactionResponse .getErrormessage ());
114+ }
115+ context .getConsole ().printInfo (
116+ "Compaction already enqueued with id " + compactionResponse .getId () + "; State is " + compactionResponse .getState ());
117+ return ;
118+ }
119+ context .getConsole ().printInfo ("Compaction enqueued with id " + compactionResponse .getId ());
120+ if (desc .isBlocking () && compactionResponse .isAccepted ()) {
121+ waitForCompactionToFinish (compactionResponse , context );
122+ }
123+ }
124+
106125 private List <Partition > getPartitions (Table table , AlterTableCompactDesc desc , DDLOperationContext context )
107126 throws HiveException {
108127 List <Partition > partitions = new ArrayList <>();
@@ -117,7 +136,7 @@ private List<Partition> getPartitions(Table table, AlterTableCompactDesc desc, D
117136 partitions = context .getDb ().getPartitions (table , partitionSpec );
118137 if (partitions .size () > 1 ) {
119138 throw new HiveException (ErrorMsg .TOO_MANY_COMPACTION_PARTITIONS );
120- } else if (partitions .size () == 0 ) {
139+ } else if (partitions .isEmpty () ) {
121140 throw new HiveException (ErrorMsg .INVALID_PARTITION_SPEC );
122141 }
123142 }
0 commit comments