From f8e53e7bc70576d5e4bbf3bf2f143e5bb9002660 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Fri, 6 Nov 2020 23:05:47 -0600 Subject: [PATCH 01/22] chore: refactor BulkWriter to use retryBatch --- .../cloud/firestore/BulkCommitBatch.java | 16 - .../google/cloud/firestore/BulkWriter.java | 601 ++++++++++++------ .../cloud/firestore/BulkWriterError.java | 61 ++ .../google/cloud/firestore/UpdateBuilder.java | 107 +--- .../cloud/firestore/BulkWriterTest.java | 39 +- 5 files changed, 507 insertions(+), 317 deletions(-) create mode 100644 google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterError.java diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java index 8ef106fc3a..ceb22c6519 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java @@ -17,7 +17,6 @@ package com.google.cloud.firestore; import com.google.api.core.ApiFuture; -import com.google.common.base.Preconditions; /** Used to represent a batch on the BatchQueue. */ class BulkCommitBatch extends UpdateBuilder> { @@ -26,21 +25,6 @@ class BulkCommitBatch extends UpdateBuilder> { super(firestore, maxBatchSize); } - BulkCommitBatch(FirestoreImpl firestore, BulkCommitBatch retryBatch) { - super(firestore); - - // Create a new BulkCommitBatch containing only the indexes from the provided indexes to retry. - for (int index : retryBatch.getPendingIndexes()) { - this.writes.add(retryBatch.writes.get(index)); - } - - Preconditions.checkState( - retryBatch.state == BatchState.SENT, - "Batch should be SENT when creating a new BulkCommitBatch for retry"); - this.state = retryBatch.state; - this.pendingOperations = retryBatch.pendingOperations; - } - ApiFuture wrapResult(ApiFuture result) { return result; } diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java index 8fad7dea2c..1ec33273ce 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java @@ -23,16 +23,20 @@ import com.google.api.core.SettableApiFuture; import com.google.api.gax.retrying.ExponentialRetryAlgorithm; import com.google.api.gax.retrying.TimedAttemptSettings; +import com.google.api.gax.rpc.StatusCode.Code; import com.google.cloud.firestore.UpdateBuilder.BatchState; +import com.google.cloud.firestore.v1.FirestoreSettings; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.base.Predicate; -import com.google.common.collect.FluentIterable; +import com.google.common.collect.Lists; import com.google.common.util.concurrent.MoreExecutors; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.ExecutionException; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -42,9 +46,31 @@ import javax.annotation.Nullable; final class BulkWriter implements AutoCloseable { + public interface WriteResultListener { + void onResult(DocumentReference documentReference, WriteResult result); + }; + + public interface WriteErrorListener { + boolean shouldRetryListener(BulkWriterError error); + } + + private interface BulkWriterOperationCallback { + ApiFuture apply(BulkCommitBatch batch); + } + + enum OperationType { + CREATE, + SET, + UPDATE, + DELETE + } /** The maximum number of writes that can be in a single batch. */ public static final int MAX_BATCH_SIZE = 500; + /** + * The maximum number of retries that will be attempted by backoff before stopping all retry + * attempts. + */ public static final int MAX_RETRY_ATTEMPTS = 10; /** @@ -84,12 +110,53 @@ final class BulkWriter implements AutoCloseable { */ private final List batchQueue = new CopyOnWriteArrayList<>(); + /** + * A queue of batches to be retried. Use a synchronized list to avoid multi-thread concurrent + * modification errors (as this list is modified from both the user thread and the network + * thread). + */ + private final List retryBatchQueue = new CopyOnWriteArrayList<>(); + + /** + * A set of futures that represent pending BulkWriter operations. Each future is completed when + * the BulkWriter operation resolves. This set includes retries. Each retry's promise is added, + * attempted, and removed from this set before scheduling the next retry. + */ + private final Set> pendingOperations = new CopyOnWriteArraySet<>(); + + /** + * A list of future that represent sent batches. Each future is completed when the batch's + * response is received. This includes batches from both the batchQueue and retryBatchQueue. + */ + private final Set> pendingBatches = new CopyOnWriteArraySet<>(); + /** Whether this BulkWriter instance is closed. Once closed, it cannot be opened again. */ private boolean closed = false; /** Rate limiter used to throttle requests as per the 500/50/5 rule. */ private final RateLimiter rateLimiter; + private WriteResultListener successListener = + new WriteResultListener() { + public void onResult(DocumentReference documentReference, WriteResult result) {} + }; + + private WriteErrorListener errorListener = + new WriteErrorListener() { + public boolean shouldRetryListener(BulkWriterError error) { + if (error.getFailedAttempts() == MAX_RETRY_ATTEMPTS) { + return false; + } + Set codes = FirestoreSettings.newBuilder().batchWriteSettings().getRetryableCodes(); + for (Code code : codes) { + if (code.equals(Code.valueOf(error.getStatus().getCode().name()))) { + return true; + } + } + return false; + } + }; + private final FirestoreImpl firestore; private final ScheduledExecutorService firestoreExecutor; @@ -153,12 +220,17 @@ final class BulkWriter implements AutoCloseable { */ @Nonnull public ApiFuture create( - @Nonnull DocumentReference documentReference, @Nonnull Map fields) { + @Nonnull final DocumentReference documentReference, + @Nonnull final Map fields) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = bulkCommitBatch.create(documentReference, fields); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.CREATE, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.create(documentReference, fields); + } + }); } /** @@ -171,12 +243,16 @@ public ApiFuture create( */ @Nonnull public ApiFuture create( - @Nonnull DocumentReference documentReference, @Nonnull Object pojo) { + @Nonnull final DocumentReference documentReference, @Nonnull final Object pojo) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = bulkCommitBatch.create(documentReference, pojo); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.CREATE, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.set(documentReference, pojo); + } + }); } /** @@ -187,12 +263,16 @@ public ApiFuture create( * fails. */ @Nonnull - public ApiFuture delete(@Nonnull DocumentReference documentReference) { + public ApiFuture delete(@Nonnull final DocumentReference documentReference) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = bulkCommitBatch.delete(documentReference); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.DELETE, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.delete(documentReference); + } + }); } /** @@ -205,12 +285,17 @@ public ApiFuture delete(@Nonnull DocumentReference documentReferenc */ @Nonnull public ApiFuture delete( - @Nonnull DocumentReference documentReference, @Nonnull Precondition precondition) { + @Nonnull final DocumentReference documentReference, + @Nonnull final Precondition precondition) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = bulkCommitBatch.delete(documentReference, precondition); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.SET, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.delete(documentReference, precondition); + } + }); } /** @@ -224,12 +309,17 @@ public ApiFuture delete( */ @Nonnull public ApiFuture set( - @Nonnull DocumentReference documentReference, @Nonnull Map fields) { + @Nonnull final DocumentReference documentReference, + @Nonnull final Map fields) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = bulkCommitBatch.set(documentReference, fields); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.SET, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.set(documentReference, fields); + } + }); } /** @@ -244,14 +334,18 @@ public ApiFuture set( */ @Nonnull public ApiFuture set( - @Nonnull DocumentReference documentReference, - @Nonnull Map fields, - @Nonnull SetOptions options) { + @Nonnull final DocumentReference documentReference, + @Nonnull final Map fields, + @Nonnull final SetOptions options) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = bulkCommitBatch.set(documentReference, fields, options); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.SET, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.set(documentReference, fields, options); + } + }); } /** @@ -266,12 +360,18 @@ public ApiFuture set( */ @Nonnull public ApiFuture set( - @Nonnull DocumentReference documentReference, Object pojo, @Nonnull SetOptions options) { + @Nonnull final DocumentReference documentReference, + final Object pojo, + @Nonnull final SetOptions options) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = bulkCommitBatch.set(documentReference, pojo, options); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.SET, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.set(documentReference, pojo, options); + } + }); } /** @@ -283,12 +383,17 @@ public ApiFuture set( * @return An ApiFuture containing the result of the write. Contains an error if the write fails. */ @Nonnull - public ApiFuture set(@Nonnull DocumentReference documentReference, Object pojo) { + public ApiFuture set( + @Nonnull final DocumentReference documentReference, final Object pojo) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = bulkCommitBatch.set(documentReference, pojo); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.SET, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.set(documentReference, pojo); + } + }); } /** @@ -306,12 +411,17 @@ public ApiFuture set(@Nonnull DocumentReference documentReference, */ @Nonnull public ApiFuture update( - @Nonnull DocumentReference documentReference, @Nonnull Map fields) { + @Nonnull final DocumentReference documentReference, + @Nonnull final Map fields) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = bulkCommitBatch.update(documentReference, fields); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.UPDATE, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.update(documentReference, fields); + } + }); } /** @@ -330,14 +440,18 @@ public ApiFuture update( */ @Nonnull public ApiFuture update( - @Nonnull DocumentReference documentReference, - @Nonnull Map fields, - Precondition precondition) { + @Nonnull final DocumentReference documentReference, + @Nonnull final Map fields, + final Precondition precondition) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = bulkCommitBatch.update(documentReference, fields, precondition); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.UPDATE, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.update(documentReference, fields, precondition); + } + }); } /** @@ -357,16 +471,19 @@ public ApiFuture update( */ @Nonnull public ApiFuture update( - @Nonnull DocumentReference documentReference, - @Nonnull String field, - @Nullable Object value, - Object... moreFieldsAndValues) { + @Nonnull final DocumentReference documentReference, + @Nonnull final String field, + @Nullable final Object value, + final Object... moreFieldsAndValues) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = - bulkCommitBatch.update(documentReference, field, value, moreFieldsAndValues); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.UPDATE, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.update(documentReference, field, value, moreFieldsAndValues); + } + }); } /** @@ -386,16 +503,19 @@ public ApiFuture update( */ @Nonnull public ApiFuture update( - @Nonnull DocumentReference documentReference, - @Nonnull FieldPath fieldPath, - @Nullable Object value, - Object... moreFieldsAndValues) { + @Nonnull final DocumentReference documentReference, + @Nonnull final FieldPath fieldPath, + @Nullable final Object value, + final Object... moreFieldsAndValues) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = - bulkCommitBatch.update(documentReference, fieldPath, value, moreFieldsAndValues); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.UPDATE, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.update(documentReference, fieldPath, value, moreFieldsAndValues); + } + }); } /** @@ -415,17 +535,20 @@ public ApiFuture update( */ @Nonnull public ApiFuture update( - @Nonnull DocumentReference documentReference, - @Nonnull Precondition precondition, - @Nonnull String field, - @Nullable Object value, - Object... moreFieldsAndValues) { + @Nonnull final DocumentReference documentReference, + @Nonnull final Precondition precondition, + @Nonnull final String field, + @Nullable final Object value, + final Object... moreFieldsAndValues) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = - bulkCommitBatch.update(documentReference, precondition, field, value, moreFieldsAndValues); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.UPDATE, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.update(documentReference, precondition, field, value, moreFieldsAndValues); + } + }); } /** @@ -446,18 +569,95 @@ public ApiFuture update( */ @Nonnull public ApiFuture update( - @Nonnull DocumentReference documentReference, - @Nonnull Precondition precondition, - @Nonnull FieldPath fieldPath, - @Nullable Object value, - Object... moreFieldsAndValues) { + @Nonnull final DocumentReference documentReference, + @Nonnull final Precondition precondition, + @Nonnull final FieldPath fieldPath, + @Nullable final Object value, + final Object... moreFieldsAndValues) { verifyNotClosed(); - BulkCommitBatch bulkCommitBatch = getEligibleBatch(); - ApiFuture future = - bulkCommitBatch.update( - documentReference, precondition, fieldPath, value, moreFieldsAndValues); - sendReadyBatches(); - return future; + return executeWrite( + documentReference, + OperationType.UPDATE, + new BulkWriterOperationCallback() { + public ApiFuture apply(BulkCommitBatch batch) { + return batch.update( + documentReference, precondition, fieldPath, value, moreFieldsAndValues); + } + }); + } + + private ApiFuture executeWrite( + DocumentReference documentReference, + OperationType operationType, + BulkWriterOperationCallback operationCallback) { + final SettableApiFuture operationCompletedFuture = SettableApiFuture.create(); + pendingOperations.add(operationCompletedFuture); + + ApiFuture writeResultApiFuture = + continueCallback(documentReference, operationType, operationCallback, 0); + writeResultApiFuture.addListener( + new Runnable() { + public void run() { + operationCompletedFuture.set(null); + pendingOperations.remove(operationCompletedFuture); + } + }, + MoreExecutors.directExecutor()); + + return writeResultApiFuture; + } + + private ApiFuture continueCallback( + final DocumentReference documentReference, + final OperationType operationType, + final BulkWriterOperationCallback operationCallback, + final int failedAttempts) { + List operationBatchQueue = failedAttempts > 0 ? retryBatchQueue : batchQueue; + BulkCommitBatch bulkCommitBatch = getEligibleBatch(operationBatchQueue); + + // Send ready batches if this is the first attempt. Subsequent retry batches are scheduled + // after the initial batch returns. + if (failedAttempts == 0) { + sendReadyBatches(operationBatchQueue); + } + + return ApiFutures.transformAsync( + ApiFutures.catchingAsync( + operationCallback.apply(bulkCommitBatch), + FirestoreException.class, + new ApiAsyncFunction() { + public ApiFuture apply(FirestoreException exception) + throws BulkWriterError { + BulkWriterError bulkWriterError = + new BulkWriterError( + exception.getStatus(), + exception.getMessage(), + documentReference, + operationType, + failedAttempts); + + boolean shouldRetry = errorListener.shouldRetryListener(bulkWriterError); + logger.log( + Level.INFO, + String.format( + "Running error callback on error code: %d, shouldRetry: %b", + exception.getStatus().getCode().value(), shouldRetry)); + if (!shouldRetry) { + throw bulkWriterError; + } else { + return continueCallback( + documentReference, operationType, operationCallback, failedAttempts + 1); + } + } + }, + MoreExecutors.directExecutor()), + new ApiAsyncFunction() { + public ApiFuture apply(WriteResult result) throws Exception { + successListener.onResult(documentReference, result); + return ApiFutures.immediateFuture(result); + } + }, + MoreExecutors.directExecutor()); } /** @@ -477,21 +677,51 @@ public ApiFuture update( @Nonnull public ApiFuture flush() { verifyNotClosed(); - final SettableApiFuture flushComplete = SettableApiFuture.create(); - List> writeFutures = new ArrayList<>(); + return performFlush(Lists.newArrayList(pendingOperations)); + } + + private ApiFuture performFlush(final List> pendingOperations) { for (BulkCommitBatch batch : batchQueue) { batch.markReadyToSend(); - writeFutures.addAll(batch.getPendingFutures()); } - sendReadyBatches(); - ApiFutures.successfulAsList(writeFutures) + + // Send all scheduled operations on the BatchQueue first. + sendReadyBatches(batchQueue); + final SettableApiFuture batchQueueFlushComplete = SettableApiFuture.create(); + final SettableApiFuture flushComplete = SettableApiFuture.create(); + ApiFutures.successfulAsList(pendingBatches) .addListener( new Runnable() { public void run() { - flushComplete.set(null); + batchQueueFlushComplete.set(null); } }, MoreExecutors.directExecutor()); + + // Afterwards, send all accumulated retry operations. Wait until the retryBatchQueue is cleared. + // This way, operations scheduled after flush() will not be sent until the retries are done. + batchQueueFlushComplete.addListener( + new Runnable() { + public void run() { + if (retryBatchQueue.size() > 0) { + for (BulkCommitBatch batch : retryBatchQueue) { + batch.markReadyToSend(); + } + sendReadyBatches(retryBatchQueue); + ApiFutures.successfulAsList(pendingOperations) + .addListener( + new Runnable() { + public void run() { + flushComplete.set(null); + } + }, + MoreExecutors.directExecutor()); + } else { + flushComplete.set(null); + } + } + }, + MoreExecutors.directExecutor()); return flushComplete; } @@ -515,70 +745,72 @@ private void verifyNotClosed() { } } + public void addWriteResultListener(WriteResultListener writeResultListener) { + successListener = writeResultListener; + } + + public void addWriteErrorListener(WriteErrorListener shouldRetryListener) { + errorListener = shouldRetryListener; + } + /** * Return the first eligible batch that can hold a write to the provided reference, or creates one * if no eligible batches are found. */ - private BulkCommitBatch getEligibleBatch() { + private BulkCommitBatch getEligibleBatch(List batchQueue) { if (batchQueue.size() > 0) { BulkCommitBatch lastBatch = batchQueue.get(batchQueue.size() - 1); if (lastBatch.getState() == UpdateBuilder.BatchState.OPEN) { return lastBatch; } } - return createNewBatch(); + return createNewBatch(batchQueue); } /** * Creates a new batch and adds it to the BatchQueue. If there is already a batch enqueued, sends * the batch after a new one is created. */ - private BulkCommitBatch createNewBatch() { + private BulkCommitBatch createNewBatch(List batchQueue) { BulkCommitBatch newBatch = new BulkCommitBatch(firestore, maxBatchSize); if (batchQueue.size() > 0) { batchQueue.get(batchQueue.size() - 1).markReadyToSend(); - sendReadyBatches(); + sendReadyBatches(batchQueue); } batchQueue.add(newBatch); return newBatch; } /** - * Attempts to send batches starting from the front of the BatchQueue until a batch cannot be - * sent. + * Attempts to send batches starting from the front of the provided batch queue until a batch + * cannot be sent. * *

After a batch is complete, try sending batches again. */ - private void sendReadyBatches() { - List unsentBatches = - FluentIterable.from(batchQueue) - .filter( - new Predicate() { - @Override - public boolean apply(BulkCommitBatch batch) { - return batch.getState() == UpdateBuilder.BatchState.READY_TO_SEND; - } - }) - .toList(); - + private void sendReadyBatches(final List batchQueue) { int index = 0; - while (index < unsentBatches.size() - && unsentBatches.get(index).state == BatchState.READY_TO_SEND) { - final BulkCommitBatch batch = unsentBatches.get(index); + while (index < batchQueue.size() && batchQueue.get(index).state == BatchState.READY_TO_SEND) { + final BulkCommitBatch batch = batchQueue.get(index); + + // Future that completes when the current or its scheduling attempts completes. + final SettableApiFuture batchCompletedFuture = SettableApiFuture.create(); + pendingBatches.add(batchCompletedFuture); // Send the batch if it is under the rate limit, or schedule another attempt after the // appropriate timeout. long delayMs = rateLimiter.getNextRequestDelayMs(batch.getPendingOperationCount()); Preconditions.checkState(delayMs != -1, "Batch size should be under capacity"); if (delayMs == 0) { - sendBatch(batch); + sendBatch(batch, batchQueue, batchCompletedFuture); } else { firestoreExecutor.schedule( new Runnable() { @Override public void run() { - sendBatch(batch); + sendReadyBatches(batchQueue); + batchCompletedFuture.set(null); + pendingBatches.remove(batchCompletedFuture); } }, delayMs, @@ -594,7 +826,10 @@ public void run() { * Sends the provided batch and processes the results. After the batch is committed, sends the * next group of ready batches. */ - private void sendBatch(final BulkCommitBatch batch) { + private void sendBatch( + final BulkCommitBatch batch, + final List batchQueue, + final SettableApiFuture batchCompletedFuture) { Preconditions.checkState( batch.state == BatchState.READY_TO_SEND, "The batch should be marked as READY_TO_SEND before committing"); @@ -602,104 +837,52 @@ private void sendBatch(final BulkCommitBatch batch) { boolean success = rateLimiter.tryMakeRequest(batch.getPendingOperationCount()); Preconditions.checkState(success, "Batch should be under rate limit to be sent."); - ApiFuture commitFuture = bulkCommit(batch); + ApiFuture commitFuture = newBulkCommit(batch); commitFuture.addListener( new Runnable() { public void run() { boolean removed = batchQueue.remove(batch); Preconditions.checkState( removed, "The batch should be in the BatchQueue." + batchQueue.size()); - sendReadyBatches(); - } - }, - MoreExecutors.directExecutor()); - } - private ApiFuture bulkCommit(BulkCommitBatch batch) { - return bulkCommit(batch, 0); - } + if (batchQueue.equals(retryBatchQueue)) { + for (BulkCommitBatch batch : retryBatchQueue) { + batch.markReadyToSend(); + } + } - private ApiFuture bulkCommit(final BulkCommitBatch batch, final int attempt) { - final SettableApiFuture backoffFuture = SettableApiFuture.create(); + batchCompletedFuture.set(null); + pendingBatches.remove(batchCompletedFuture); - // Add a backoff delay. At first, this is 0. - firestoreExecutor.schedule( - new Runnable() { - @Override - public void run() { - backoffFuture.set(null); + sendReadyBatches(batchQueue); } }, - nextAttempt.getRandomizedRetryDelay().toMillis(), - TimeUnit.MILLISECONDS); - - return ApiFutures.transformAsync( - backoffFuture, new BackoffCallback(batch, attempt), firestoreExecutor); + MoreExecutors.directExecutor()); } - private class BackoffCallback implements ApiAsyncFunction { - final BulkCommitBatch batch; - final int attempt; - - public BackoffCallback(BulkCommitBatch batch, int attempt) { - this.batch = batch; - this.attempt = attempt; - } - - @Override - public ApiFuture apply(Void ignored) { - - return ApiFutures.transformAsync( - ApiFutures.catchingAsync( - batch.bulkCommit(), - Exception.class, - new ApiAsyncFunction>() { - public ApiFuture> apply(Exception exception) { - List results = new ArrayList<>(); - // If the BatchWrite RPC fails, map the exception to each individual result. - for (int i = 0; i < batch.getPendingDocumentPaths().size(); ++i) { - results.add(new BatchWriteResult(null, exception)); - } - return ApiFutures.immediateFuture(results); + private ApiFuture newBulkCommit(final BulkCommitBatch batch) { + return ApiFutures.transformAsync( + ApiFutures.catchingAsync( + batch.bulkCommit(), + Exception.class, + new ApiAsyncFunction>() { + public ApiFuture> apply(Exception exception) { + List results = new ArrayList<>(); + // If the BatchWrite RPC fails, map the exception to each individual result. + for (int i = 0; i < batch.newPendingOperations.size(); ++i) { + results.add(new BatchWriteResult(null, exception)); } - }, - MoreExecutors.directExecutor()), - new ProcessBulkCommitCallback(batch, attempt), - MoreExecutors.directExecutor()); - } - } - - private class ProcessBulkCommitCallback - implements ApiAsyncFunction, Void> { - final BulkCommitBatch batch; - final int attempt; - - public ProcessBulkCommitCallback(BulkCommitBatch batch, int attempt) { - this.batch = batch; - this.attempt = attempt; - } - - @Override - public ApiFuture apply(List results) { - batch.processResults(results, /* allowRetry= */ true); - List remainingOps = batch.getPendingDocumentPaths(); - if (!remainingOps.isEmpty()) { - logger.log( - Level.WARNING, - String.format( - "Current batch failed at retry #%d. Num failures: %d", - attempt, remainingOps.size())); - - if (attempt < MAX_RETRY_ATTEMPTS) { - nextAttempt = backoff.createNextAttempt(nextAttempt); - BulkCommitBatch newBatch = new BulkCommitBatch(firestore, batch); - return bulkCommit(newBatch, attempt + 1); - } else { - batch.processResults(results, /* allowRetry= */ false); - } - } - return ApiFutures.immediateFuture(null); - } + return ApiFutures.immediateFuture(results); + } + }, + MoreExecutors.directExecutor()), + new ApiAsyncFunction, Void>() { + public ApiFuture apply(List results) throws Exception { + batch.newProcessResults(results); + return ApiFutures.immediateFuture(null); + } + }, + MoreExecutors.directExecutor()); } @VisibleForTesting diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterError.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterError.java new file mode 100644 index 0000000000..c9cef660ef --- /dev/null +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterError.java @@ -0,0 +1,61 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.firestore; + +import com.google.cloud.firestore.BulkWriter.OperationType; +import io.grpc.Status; + +public class BulkWriterError extends Exception { + public DocumentReference getDocumentReference() { + return documentReference; + } + + public int getFailedAttempts() { + return failedAttempts; + } + + public String getMessage() { + return message; + } + + public OperationType getOperationType() { + return operationType; + } + + public Status getStatus() { + return status; + } + + private final Status status; + private final String message; + private final DocumentReference documentReference; + private final OperationType operationType; + private final int failedAttempts; + + public BulkWriterError( + Status status, + String message, + DocumentReference reference, + OperationType type, + int attempts) { + this.status = status; + this.message = message; + documentReference = reference; + operationType = type; + failedAttempts = attempts; + } +} diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java index fc9e04902b..26d3abc73f 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java @@ -16,18 +16,15 @@ package com.google.cloud.firestore; +import com.google.api.core.ApiAsyncFunction; import com.google.api.core.ApiFunction; import com.google.api.core.ApiFuture; import com.google.api.core.ApiFutures; import com.google.api.core.InternalExtensionOnly; import com.google.api.core.SettableApiFuture; -import com.google.api.gax.rpc.StatusCode.Code; import com.google.cloud.Timestamp; import com.google.cloud.firestore.UserDataConverter.EncodingOptions; -import com.google.cloud.firestore.v1.FirestoreSettings; -import com.google.common.base.Function; import com.google.common.base.Preconditions; -import com.google.common.collect.FluentIterable; import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.MoreExecutors; import com.google.firestore.v1.BatchWriteRequest; @@ -44,7 +41,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import javax.annotation.Nonnull; @@ -90,7 +86,7 @@ static class PendingOperation { private final int maxBatchSize; protected BatchState state = BatchState.OPEN; - protected List pendingOperations = new ArrayList<>(); + protected List> newPendingOperations = new ArrayList<>(); /** * Used to represent the state of batch. @@ -196,7 +192,7 @@ private T performCreate( writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processOperation(documentReference)); + return wrapResult(processLastOperation()); } private void verifyNotCommitted() { @@ -326,7 +322,7 @@ private T performSet( writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processOperation(documentReference)); + return wrapResult(processLastOperation()); } /** Removes all values in 'fields' that are not specified in 'fieldMask'. */ @@ -595,7 +591,7 @@ public boolean allowTransform() { } writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processOperation(documentReference)); + return wrapResult(processLastOperation()); } /** @@ -633,7 +629,7 @@ private T performDelete( } writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processOperation(documentReference)); + return wrapResult(processLastOperation()); } /** Commit the current batch. */ @@ -748,96 +744,47 @@ BatchState getState() { return state; } - List getPendingDocumentPaths() { - return FluentIterable.from(pendingOperations) - .transform( - new Function() { - @Override - public String apply(PendingOperation input) { - return input.documentKey; - } - }) - .toList(); - } - - List getPendingIndexes() { - return FluentIterable.from(pendingOperations) - .transform( - new Function() { - @Override - public Integer apply(PendingOperation input) { - return input.writeBatchIndex; - } - }) - .toList(); - } - - List> getPendingFutures() { - return FluentIterable.from(pendingOperations) - .transform( - new Function>() { - @Override - public SettableApiFuture apply(PendingOperation input) { - return input.future; - } - }) - .toList(); - } - int getPendingOperationCount() { - return pendingOperations.size(); + return newPendingOperations.size(); } - private ApiFuture processOperation(DocumentReference documentReference) { + ApiFuture processLastOperation() { Preconditions.checkState(state == BatchState.OPEN, "Batch should be OPEN when adding writes"); - SettableApiFuture result = SettableApiFuture.create(); - int nextBatchIndex = getPendingOperationCount(); - pendingOperations.add( - new PendingOperation(nextBatchIndex, documentReference.getPath(), result)); + SettableApiFuture resultFuture = SettableApiFuture.create(); + newPendingOperations.add(resultFuture); if (getPendingOperationCount() == maxBatchSize) { state = BatchState.READY_TO_SEND; } - return result; + return ApiFutures.transformAsync( + resultFuture, + new ApiAsyncFunction() { + public ApiFuture apply(BatchWriteResult batchWriteResult) throws Exception { + if (batchWriteResult.getException() == null) { + return ApiFutures.immediateFuture(new WriteResult(batchWriteResult.getWriteTime())); + } else { + throw batchWriteResult.getException(); + } + } + }, + MoreExecutors.directExecutor()); } /** * Resolves the individual operations in the batch with the results and removes the entry from the * pendingOperations map if the result is not retryable. */ - void processResults(List results, boolean allowRetry) { - List newPendingOperations = new ArrayList<>(); - for (int i = 0; i < results.size(); ++i) { + void newProcessResults(List results) { + for (int i = 0; i < results.size(); i++) { + SettableApiFuture resultFuture = newPendingOperations.get(i); BatchWriteResult result = results.get(i); - PendingOperation operation = pendingOperations.get(i); - if (result.getException() == null) { - operation.future.set(new WriteResult(result.getWriteTime())); - } else if (!allowRetry || !shouldRetry(result.getException())) { - operation.future.setException(result.getException()); + resultFuture.set(result); } else { - // Retry the operation if it has not been processed. Store the current index of - // pendingOperations to preserve the mapping of this operation's index in the underlying - // writes array. - newPendingOperations.add(new PendingOperation(i, operation.documentKey, operation.future)); - } - } - pendingOperations = newPendingOperations; - } - - private boolean shouldRetry(Exception exception) { - if (!(exception instanceof FirestoreException)) { - return false; - } - Set codes = FirestoreSettings.newBuilder().batchWriteSettings().getRetryableCodes(); - Status status = ((FirestoreException) exception).getStatus(); - for (Code code : codes) { - if (code.equals(Code.valueOf(status.getCode().name()))) { - return true; + resultFuture.setException(result.getException()); } } - return false; } void markReadyToSend() { diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java index 529f3b4b4a..5ae49924be 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java @@ -49,9 +49,7 @@ import javax.annotation.Nonnull; import org.junit.Assert; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.Timeout; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Captor; @@ -65,7 +63,7 @@ @RunWith(MockitoJUnitRunner.class) public class BulkWriterTest { - @Rule public Timeout timeout = new Timeout(500, TimeUnit.MILLISECONDS); + // @Rule public Timeout timeout = new Timeout(500, TimeUnit.MILLISECONDS); @Spy private final FirestoreRpc firestoreRpc = Mockito.mock(FirestoreRpc.class); @@ -221,8 +219,8 @@ public void surfacesErrors() throws Exception { result.get(); fail("set() should have failed"); } catch (Exception e) { - assertTrue(e.getCause() instanceof FirestoreException); - assertEquals(Status.DEADLINE_EXCEEDED, ((FirestoreException) e.getCause()).getStatus()); + assertTrue(e.getCause() instanceof BulkWriterError); + assertEquals(Status.DEADLINE_EXCEEDED, ((BulkWriterError) e.getCause()).getStatus()); } } @@ -392,7 +390,7 @@ public void retriesIndividualWritesThatFailWithAbortedOrUnavailable() throws Exc put( batchWrite( set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1"), - set(LocalFirestoreHelper.UPDATED_FIELD_PROTO, "coll/doc1"), + set(LocalFirestoreHelper.UPDATED_FIELD_PROTO, "coll/doc2"), set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc3")), mergeResponses( failedResponse(), @@ -400,7 +398,7 @@ public void retriesIndividualWritesThatFailWithAbortedOrUnavailable() throws Exc failedResponse(Code.ABORTED_VALUE))); put( batchWrite( - set(LocalFirestoreHelper.UPDATED_FIELD_PROTO, "coll/doc1"), + set(LocalFirestoreHelper.UPDATED_FIELD_PROTO, "coll/doc2"), set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc3")), mergeResponses(successResponse(2), failedResponse(Code.ABORTED_VALUE))); put( @@ -413,7 +411,7 @@ public void retriesIndividualWritesThatFailWithAbortedOrUnavailable() throws Exc // Test writes to the same document in order to verify that retry logic unaffected by document // key. ApiFuture result1 = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); - ApiFuture result2 = bulkWriter.set(doc1, LocalFirestoreHelper.UPDATED_FIELD_MAP); + ApiFuture result2 = bulkWriter.set(doc2, LocalFirestoreHelper.UPDATED_FIELD_MAP); ApiFuture result3 = bulkWriter.set(firestoreMock.document("coll/doc3"), LocalFirestoreHelper.SINGLE_FIELD_MAP); bulkWriter.close(); @@ -422,8 +420,8 @@ public void retriesIndividualWritesThatFailWithAbortedOrUnavailable() throws Exc result1.get(); fail("set() should have failed"); } catch (Exception e) { - assertTrue(e.getCause() instanceof FirestoreException); - assertEquals(Status.DEADLINE_EXCEEDED, ((FirestoreException) e.getCause()).getStatus()); + assertTrue(e.getCause() instanceof BulkWriterError); + assertEquals(Status.DEADLINE_EXCEEDED, ((BulkWriterError) e.getCause()).getStatus()); } assertEquals(Timestamp.ofTimeSecondsAndNanos(2, 0), result2.get().getUpdateTime()); assertEquals(Timestamp.ofTimeSecondsAndNanos(3, 0), result3.get().getUpdateTime()); @@ -513,8 +511,9 @@ public void flushCompletesWhenAllWritesComplete() throws Exception { } @Test - public void doesNotSendBatchesIfDoingSoExceedsRateLimit() { + public void doesNotSendBatchesIfDoingSoExceedsRateLimit() throws Exception { final boolean[] timeoutCalled = {false}; + final ScheduledExecutorService timeoutExecutor = new ScheduledThreadPoolExecutor(1) { @Override @@ -527,11 +526,27 @@ public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) } }; doReturn(timeoutExecutor).when(firestoreRpc).getExecutor(); + + // Stub responses for the BulkWriter batches. + ResponseStubber responseStubber = + new ResponseStubber() { + { + put( + batchWrite( + set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc"), + set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc"), + set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc"), + set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc"), + set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc") ), + mergeResponses(successResponse(1), successResponse(2), successResponse(3), successResponse(4), successResponse(5))); + } + }; + responseStubber.initializeStub(batchWriteCapture, firestoreMock); BulkWriter bulkWriter = firestoreMock.bulkWriter(BulkWriterOptions.builder().setInitialOpsPerSecond(5).build()); for (int i = 0; i < 600; ++i) { - bulkWriter.set(firestoreMock.document("coll/doc" + i), LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.set(firestoreMock.document("coll/doc"), LocalFirestoreHelper.SINGLE_FIELD_MAP); } bulkWriter.flush(); From f4d53b40f1f6c1fc032901cea64a4a2ba8c622d7 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Fri, 6 Nov 2020 23:06:16 -0600 Subject: [PATCH 02/22] fix: create new batch for writes to the same doc --- .../google/cloud/firestore/BulkCommitBatch.java | 2 ++ .../com/google/cloud/firestore/BulkWriter.java | 10 ++++++---- .../google/cloud/firestore/UpdateBuilder.java | 17 ++++++++++++----- .../google/cloud/firestore/BulkWriterTest.java | 17 ++++++++--------- 4 files changed, 28 insertions(+), 18 deletions(-) diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java index ceb22c6519..0d14612bfd 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java @@ -17,6 +17,8 @@ package com.google.cloud.firestore; import com.google.api.core.ApiFuture; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; /** Used to represent a batch on the BatchQueue. */ class BulkCommitBatch extends UpdateBuilder> { diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java index 1ec33273ce..dd5753c593 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java @@ -31,7 +31,6 @@ import com.google.common.collect.Lists; import com.google.common.util.concurrent.MoreExecutors; import java.util.ArrayList; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -613,7 +612,7 @@ private ApiFuture continueCallback( final BulkWriterOperationCallback operationCallback, final int failedAttempts) { List operationBatchQueue = failedAttempts > 0 ? retryBatchQueue : batchQueue; - BulkCommitBatch bulkCommitBatch = getEligibleBatch(operationBatchQueue); + BulkCommitBatch bulkCommitBatch = getEligibleBatch(documentReference, operationBatchQueue); // Send ready batches if this is the first attempt. Subsequent retry batches are scheduled // after the initial batch returns. @@ -757,10 +756,13 @@ public void addWriteErrorListener(WriteErrorListener shouldRetryListener) { * Return the first eligible batch that can hold a write to the provided reference, or creates one * if no eligible batches are found. */ - private BulkCommitBatch getEligibleBatch(List batchQueue) { + private BulkCommitBatch getEligibleBatch( + DocumentReference documentReference, + List batchQueue) { if (batchQueue.size() > 0) { BulkCommitBatch lastBatch = batchQueue.get(batchQueue.size() - 1); - if (lastBatch.getState() == UpdateBuilder.BatchState.OPEN) { + if (lastBatch.getState() == UpdateBuilder.BatchState.OPEN && + !lastBatch.documentPaths.contains(documentReference)) { return lastBatch; } } diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java index 26d3abc73f..144b108004 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java @@ -41,8 +41,10 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; +import java.util.concurrent.CopyOnWriteArraySet; import javax.annotation.Nonnull; import javax.annotation.Nullable; @@ -88,6 +90,9 @@ static class PendingOperation { protected BatchState state = BatchState.OPEN; protected List> newPendingOperations = new ArrayList<>(); + Set documentPaths = new CopyOnWriteArraySet<>(); + + /** * Used to represent the state of batch. * @@ -192,7 +197,7 @@ private T performCreate( writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processLastOperation()); + return wrapResult(processLastOperation(documentReference)); } private void verifyNotCommitted() { @@ -322,7 +327,7 @@ private T performSet( writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processLastOperation()); + return wrapResult(processLastOperation(documentReference)); } /** Removes all values in 'fields' that are not specified in 'fieldMask'. */ @@ -591,7 +596,7 @@ public boolean allowTransform() { } writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processLastOperation()); + return wrapResult(processLastOperation(documentReference)); } /** @@ -629,7 +634,7 @@ private T performDelete( } writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processLastOperation()); + return wrapResult(processLastOperation(documentReference)); } /** Commit the current batch. */ @@ -748,7 +753,9 @@ int getPendingOperationCount() { return newPendingOperations.size(); } - ApiFuture processLastOperation() { + ApiFuture processLastOperation(DocumentReference documentReference) { + Preconditions.checkState(!documentPaths.contains(documentReference), "Batch should not contain writes to the same document"); + documentPaths.add(documentReference); Preconditions.checkState(state == BatchState.OPEN, "Batch should be OPEN when adding writes"); SettableApiFuture resultFuture = SettableApiFuture.create(); newPendingOperations.add(resultFuture); diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java index 5ae49924be..ce43dfd602 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java @@ -302,15 +302,18 @@ public void cannotCallMethodsAfterClose() throws Exception { } @Test - public void canSendWritesToSameDocInSameBatch() throws Exception { + public void sendsWritesToSameDocInDifferentBatches() throws Exception { ResponseStubber responseStubber = new ResponseStubber() { { put( batchWrite( - set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1"), + set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + successResponse(1)); + put( + batchWrite( update(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), - mergeResponses(successResponse(1), successResponse(2))); + successResponse(2)); } }; responseStubber.initializeStub(batchWriteCapture, firestoreMock); @@ -533,12 +536,8 @@ public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { put( batchWrite( - set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc"), - set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc"), - set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc"), - set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc"), - set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc") ), - mergeResponses(successResponse(1), successResponse(2), successResponse(3), successResponse(4), successResponse(5))); + set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc")), + successResponse(5)); } }; responseStubber.initializeStub(batchWriteCapture, firestoreMock); From 9673dfaa8f4b49286f13e153f82b3bc110b78c7e Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Mon, 9 Nov 2020 11:41:15 -0600 Subject: [PATCH 03/22] refactor BulkWriter's logic in UpdateBuilder into BulkCommitBatch --- .../cloud/firestore/BulkCommitBatch.java | 154 ++++++++++++++++- .../google/cloud/firestore/BulkWriter.java | 17 +- .../google/cloud/firestore/Transaction.java | 2 +- .../google/cloud/firestore/UpdateBuilder.java | 160 +----------------- .../google/cloud/firestore/WriteBatch.java | 2 +- .../cloud/firestore/BulkWriterTest.java | 11 +- .../cloud/firestore/WriteBatchTest.java | 41 ----- 7 files changed, 168 insertions(+), 219 deletions(-) diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java index 0d14612bfd..15014f9c9e 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java @@ -16,18 +16,164 @@ package com.google.cloud.firestore; +import com.google.api.core.ApiAsyncFunction; +import com.google.api.core.ApiFunction; import com.google.api.core.ApiFuture; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArraySet; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.Timestamp; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.firestore.v1.BatchWriteRequest; +import com.google.firestore.v1.BatchWriteResponse; +import io.grpc.Status; +import io.opencensus.trace.AttributeValue; +import io.opencensus.trace.Tracing; +import java.util.ArrayList; +import java.util.List; +import javax.annotation.Nullable; /** Used to represent a batch on the BatchQueue. */ class BulkCommitBatch extends UpdateBuilder> { + /** + * Used to represent the state of batch. + * + *

Writes can only be added while the batch is OPEN. For a batch to be sent, the batch must be + * READY_TO_SEND. After a batch is sent, it is marked as SENT. + */ + enum BatchState { + OPEN, + READY_TO_SEND, + SENT, + } + + protected BatchState state = BatchState.OPEN; BulkCommitBatch(FirestoreImpl firestore, int maxBatchSize) { super(firestore, maxBatchSize); } - ApiFuture wrapResult(ApiFuture result) { - return result; + ApiFuture wrapResult(DocumentReference documentReference) { + return processLastOperation(documentReference); + } + + /** + * Commits all pending operations to the database and verifies all preconditions. + * + *

The writes in the batch are not applied atomically and can be applied out of order. + */ + ApiFuture> bulkCommit() { + Tracing.getTracer() + .getCurrentSpan() + .addAnnotation( + TraceUtil.SPAN_NAME_BATCHWRITE, + ImmutableMap.of("numDocuments", AttributeValue.longAttributeValue(writes.size()))); + + Preconditions.checkState( + isReadyToSend(), "The batch should be marked as READY_TO_SEND before committing"); + state = BatchState.SENT; + + final BatchWriteRequest.Builder request = BatchWriteRequest.newBuilder(); + request.setDatabase(firestore.getDatabaseName()); + + for (WriteOperation writeOperation : writes) { + request.addWrites(writeOperation.write); + } + + ApiFuture response = + firestore.sendRequest(request.build(), firestore.getClient().batchWriteCallable()); + + return ApiFutures.transform( + response, + new ApiFunction>() { + @Override + public List apply(BatchWriteResponse batchWriteResponse) { + List writeResults = + batchWriteResponse.getWriteResultsList(); + + List statuses = batchWriteResponse.getStatusList(); + + List result = new ArrayList<>(); + + for (int i = 0; i < writeResults.size(); ++i) { + com.google.firestore.v1.WriteResult writeResult = writeResults.get(i); + com.google.rpc.Status status = statuses.get(i); + Status code = Status.fromCodeValue(status.getCode()); + @Nullable Timestamp updateTime = null; + @Nullable Exception exception = null; + if (code == Status.OK) { + updateTime = Timestamp.fromProto(writeResult.getUpdateTime()); + } else { + exception = FirestoreException.serverRejected(code, status.getMessage()); + } + result.add(new BatchWriteResult(updateTime, exception)); + } + + return result; + } + }, + MoreExecutors.directExecutor()); + } + + int getPendingOperationCount() { + return newPendingOperations.size(); + } + + ApiFuture processLastOperation(DocumentReference documentReference) { + Preconditions.checkState( + !documentPaths.contains(documentReference), + "Batch should not contain writes to the same document"); + documentPaths.add(documentReference); + Preconditions.checkState(state == BatchState.OPEN, "Batch should be OPEN when adding writes"); + SettableApiFuture resultFuture = SettableApiFuture.create(); + newPendingOperations.add(resultFuture); + + if (getPendingOperationCount() == maxBatchSize) { + state = BatchState.READY_TO_SEND; + } + + return ApiFutures.transformAsync( + resultFuture, + new ApiAsyncFunction() { + public ApiFuture apply(BatchWriteResult batchWriteResult) throws Exception { + if (batchWriteResult.getException() == null) { + return ApiFutures.immediateFuture(new WriteResult(batchWriteResult.getWriteTime())); + } else { + throw batchWriteResult.getException(); + } + } + }, + MoreExecutors.directExecutor()); + } + + /** + * Resolves the individual operations in the batch with the results and removes the entry from the + * pendingOperations map if the result is not retryable. + */ + void newProcessResults(List results) { + for (int i = 0; i < results.size(); i++) { + SettableApiFuture resultFuture = newPendingOperations.get(i); + BatchWriteResult result = results.get(i); + if (result.getException() == null) { + resultFuture.set(result); + } else { + resultFuture.setException(result.getException()); + } + } + } + + void markReadyToSend() { + if (state == BatchState.OPEN) { + state = BatchState.READY_TO_SEND; + } + } + + boolean isOpen() { + return state == BatchState.OPEN; + } + + boolean isReadyToSend() { + return state == BatchState.READY_TO_SEND; } } diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java index dd5753c593..753d63905a 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java @@ -24,7 +24,6 @@ import com.google.api.gax.retrying.ExponentialRetryAlgorithm; import com.google.api.gax.retrying.TimedAttemptSettings; import com.google.api.gax.rpc.StatusCode.Code; -import com.google.cloud.firestore.UpdateBuilder.BatchState; import com.google.cloud.firestore.v1.FirestoreSettings; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -757,12 +756,10 @@ public void addWriteErrorListener(WriteErrorListener shouldRetryListener) { * if no eligible batches are found. */ private BulkCommitBatch getEligibleBatch( - DocumentReference documentReference, - List batchQueue) { + DocumentReference documentReference, List batchQueue) { if (batchQueue.size() > 0) { BulkCommitBatch lastBatch = batchQueue.get(batchQueue.size() - 1); - if (lastBatch.getState() == UpdateBuilder.BatchState.OPEN && - !lastBatch.documentPaths.contains(documentReference)) { + if (lastBatch.isOpen() && !lastBatch.documentPaths.contains(documentReference)) { return lastBatch; } } @@ -792,7 +789,7 @@ private BulkCommitBatch createNewBatch(List batchQueue) { */ private void sendReadyBatches(final List batchQueue) { int index = 0; - while (index < batchQueue.size() && batchQueue.get(index).state == BatchState.READY_TO_SEND) { + while (index < batchQueue.size() && batchQueue.get(index).isReadyToSend()) { final BulkCommitBatch batch = batchQueue.get(index); // Future that completes when the current or its scheduling attempts completes. @@ -832,14 +829,10 @@ private void sendBatch( final BulkCommitBatch batch, final List batchQueue, final SettableApiFuture batchCompletedFuture) { - Preconditions.checkState( - batch.state == BatchState.READY_TO_SEND, - "The batch should be marked as READY_TO_SEND before committing"); - batch.state = BatchState.SENT; boolean success = rateLimiter.tryMakeRequest(batch.getPendingOperationCount()); Preconditions.checkState(success, "Batch should be under rate limit to be sent."); - ApiFuture commitFuture = newBulkCommit(batch); + ApiFuture commitFuture = bulkCommit(batch); commitFuture.addListener( new Runnable() { public void run() { @@ -862,7 +855,7 @@ public void run() { MoreExecutors.directExecutor()); } - private ApiFuture newBulkCommit(final BulkCommitBatch batch) { + private ApiFuture bulkCommit(final BulkCommitBatch batch) { return ApiFutures.transformAsync( ApiFutures.catchingAsync( batch.bulkCommit(), diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/Transaction.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/Transaction.java index 26eec365ec..a07146ed63 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/Transaction.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/Transaction.java @@ -80,7 +80,7 @@ public boolean hasTransactionId() { return transactionId != null; } - Transaction wrapResult(ApiFuture result) { + Transaction wrapResult(DocumentReference documentReference) { return this; } diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java index 144b108004..39c7070314 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java @@ -16,24 +16,19 @@ package com.google.cloud.firestore; -import com.google.api.core.ApiAsyncFunction; import com.google.api.core.ApiFunction; import com.google.api.core.ApiFuture; import com.google.api.core.ApiFutures; import com.google.api.core.InternalExtensionOnly; import com.google.api.core.SettableApiFuture; -import com.google.cloud.Timestamp; import com.google.cloud.firestore.UserDataConverter.EncodingOptions; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.MoreExecutors; -import com.google.firestore.v1.BatchWriteRequest; -import com.google.firestore.v1.BatchWriteResponse; import com.google.firestore.v1.CommitRequest; import com.google.firestore.v1.CommitResponse; import com.google.firestore.v1.Write; import com.google.protobuf.ByteString; -import io.grpc.Status; import io.opencensus.trace.AttributeValue; import io.opencensus.trace.Tracing; import java.util.ArrayList; @@ -64,47 +59,15 @@ static class WriteOperation { } } - /** - * Used to represent a pending write operation. - * - *

Contains a pending write's index, document path, and the corresponding result. - */ - static class PendingOperation { - int writeBatchIndex; - String documentKey; - SettableApiFuture future; - - PendingOperation( - int writeBatchIndex, String documentKey, SettableApiFuture future) { - this.writeBatchIndex = writeBatchIndex; - this.documentKey = documentKey; - this.future = future; - } - } - final FirestoreImpl firestore; protected final List writes; + protected final int maxBatchSize; private boolean committed; - private final int maxBatchSize; - protected BatchState state = BatchState.OPEN; protected List> newPendingOperations = new ArrayList<>(); Set documentPaths = new CopyOnWriteArraySet<>(); - - /** - * Used to represent the state of batch. - * - *

Writes can only be added while the batch is OPEN. For a batch to be sent, the batch must be - * READY_TO_SEND. After a batch is sent, it is marked as SENT. - */ - enum BatchState { - OPEN, - READY_TO_SEND, - SENT, - } - UpdateBuilder(FirestoreImpl firestore) { this(firestore, BulkWriter.MAX_BATCH_SIZE); } @@ -119,10 +82,10 @@ enum BatchState { * Wraps the result of the write operation before it is returned. * *

This method is used to generate the return value for all public methods. It allows - * operations on Transaction and Writebatch to return the object for chaining, while also allowing + * operations on Transaction and WriteBatch to return the object for chaining, while also allowing * BulkWriter operations to return the future directly. */ - abstract T wrapResult(ApiFuture result); + abstract T wrapResult(DocumentReference documentReference); /** * Turns a field map that contains field paths into a nested map. Turns {a.b : c} into {a : {b : @@ -197,7 +160,7 @@ private T performCreate( writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processLastOperation(documentReference)); + return wrapResult(documentReference); } private void verifyNotCommitted() { @@ -327,7 +290,7 @@ private T performSet( writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processLastOperation(documentReference)); + return wrapResult(documentReference); } /** Removes all values in 'fields' that are not specified in 'fieldMask'. */ @@ -596,7 +559,7 @@ public boolean allowTransform() { } writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processLastOperation(documentReference)); + return wrapResult(documentReference); } /** @@ -634,7 +597,7 @@ private T performDelete( } writes.add(new WriteOperation(documentReference, write)); - return wrapResult(processLastOperation(documentReference)); + return wrapResult(documentReference); } /** Commit the current batch. */ @@ -681,60 +644,6 @@ public List apply(CommitResponse commitResponse) { MoreExecutors.directExecutor()); } - /** - * Commits all pending operations to the database and verifies all preconditions. - * - *

The writes in the batch are not applied atomically and can be applied out of order. - */ - ApiFuture> bulkCommit() { - Tracing.getTracer() - .getCurrentSpan() - .addAnnotation( - TraceUtil.SPAN_NAME_BATCHWRITE, - ImmutableMap.of("numDocuments", AttributeValue.longAttributeValue(writes.size()))); - - final BatchWriteRequest.Builder request = BatchWriteRequest.newBuilder(); - request.setDatabase(firestore.getDatabaseName()); - - for (WriteOperation writeOperation : writes) { - request.addWrites(writeOperation.write); - } - - ApiFuture response = - firestore.sendRequest(request.build(), firestore.getClient().batchWriteCallable()); - - return ApiFutures.transform( - response, - new ApiFunction>() { - @Override - public List apply(BatchWriteResponse batchWriteResponse) { - List writeResults = - batchWriteResponse.getWriteResultsList(); - - List statuses = batchWriteResponse.getStatusList(); - - List result = new ArrayList<>(); - - for (int i = 0; i < writeResults.size(); ++i) { - com.google.firestore.v1.WriteResult writeResult = writeResults.get(i); - com.google.rpc.Status status = statuses.get(i); - Status code = Status.fromCodeValue(status.getCode()); - @Nullable Timestamp updateTime = null; - @Nullable Exception exception = null; - if (code == Status.OK) { - updateTime = Timestamp.fromProto(writeResult.getUpdateTime()); - } else { - exception = FirestoreException.serverRejected(code, status.getMessage()); - } - result.add(new BatchWriteResult(updateTime, exception)); - } - - return result; - } - }, - MoreExecutors.directExecutor()); - } - /** Checks whether any updates have been queued. */ boolean isEmpty() { return writes.isEmpty(); @@ -744,59 +653,4 @@ boolean isEmpty() { public int getMutationsSize() { return writes.size(); } - - BatchState getState() { - return state; - } - - int getPendingOperationCount() { - return newPendingOperations.size(); - } - - ApiFuture processLastOperation(DocumentReference documentReference) { - Preconditions.checkState(!documentPaths.contains(documentReference), "Batch should not contain writes to the same document"); - documentPaths.add(documentReference); - Preconditions.checkState(state == BatchState.OPEN, "Batch should be OPEN when adding writes"); - SettableApiFuture resultFuture = SettableApiFuture.create(); - newPendingOperations.add(resultFuture); - - if (getPendingOperationCount() == maxBatchSize) { - state = BatchState.READY_TO_SEND; - } - - return ApiFutures.transformAsync( - resultFuture, - new ApiAsyncFunction() { - public ApiFuture apply(BatchWriteResult batchWriteResult) throws Exception { - if (batchWriteResult.getException() == null) { - return ApiFutures.immediateFuture(new WriteResult(batchWriteResult.getWriteTime())); - } else { - throw batchWriteResult.getException(); - } - } - }, - MoreExecutors.directExecutor()); - } - - /** - * Resolves the individual operations in the batch with the results and removes the entry from the - * pendingOperations map if the result is not retryable. - */ - void newProcessResults(List results) { - for (int i = 0; i < results.size(); i++) { - SettableApiFuture resultFuture = newPendingOperations.get(i); - BatchWriteResult result = results.get(i); - if (result.getException() == null) { - resultFuture.set(result); - } else { - resultFuture.setException(result.getException()); - } - } - } - - void markReadyToSend() { - if (state == BatchState.OPEN) { - state = BatchState.READY_TO_SEND; - } - } } diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/WriteBatch.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/WriteBatch.java index ba69ab4209..baedefaa09 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/WriteBatch.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/WriteBatch.java @@ -41,7 +41,7 @@ public ApiFuture> commit() { return super.commit(null); } - WriteBatch wrapResult(ApiFuture result) { + WriteBatch wrapResult(DocumentReference documentReference) { return this; } } diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java index ce43dfd602..2d877dafaf 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java @@ -307,12 +307,10 @@ public void sendsWritesToSameDocInDifferentBatches() throws Exception { new ResponseStubber() { { put( - batchWrite( - set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), successResponse(1)); put( - batchWrite( - update(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + batchWrite(update(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), successResponse(2)); } }; @@ -535,9 +533,8 @@ public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) new ResponseStubber() { { put( - batchWrite( - set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc")), - successResponse(5)); + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc")), + successResponse(5)); } }; responseStubber.initializeStub(batchWriteCapture, firestoreMock); diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/WriteBatchTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/WriteBatchTest.java index ecf2930c34..7dace68293 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/WriteBatchTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/WriteBatchTest.java @@ -18,7 +18,6 @@ import static com.google.cloud.firestore.LocalFirestoreHelper.UPDATED_SINGLE_FIELD_PROTO; import static com.google.cloud.firestore.LocalFirestoreHelper.UPDATE_SINGLE_FIELD_OBJECT; -import static com.google.cloud.firestore.LocalFirestoreHelper.batchWrite; import static com.google.cloud.firestore.LocalFirestoreHelper.commit; import static com.google.cloud.firestore.LocalFirestoreHelper.commitResponse; import static com.google.cloud.firestore.LocalFirestoreHelper.create; @@ -27,19 +26,15 @@ import static com.google.cloud.firestore.LocalFirestoreHelper.set; import static com.google.cloud.firestore.LocalFirestoreHelper.update; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; import static org.mockito.Mockito.doReturn; -import com.google.api.core.ApiFutures; import com.google.api.gax.rpc.UnaryCallable; import com.google.cloud.Timestamp; import com.google.cloud.firestore.spi.v1.FirestoreRpc; import com.google.firestore.v1.BatchWriteRequest; -import com.google.firestore.v1.BatchWriteResponse; import com.google.firestore.v1.CommitRequest; import com.google.firestore.v1.CommitResponse; import com.google.firestore.v1.Write; -import io.grpc.Status; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -336,40 +331,4 @@ public void deleteDocument() throws Exception { CommitRequest commitRequest = commitCapture.getValue(); assertEquals(commit(writes.toArray(new Write[] {})), commitRequest); } - - @Test - public void bulkCommit() throws Exception { - BatchWriteResponse.Builder response = BatchWriteResponse.newBuilder(); - response.addWriteResultsBuilder().getUpdateTimeBuilder().setNanos(1); - response.addWriteResultsBuilder(); - response.addStatusBuilder().build(); - response.addStatusBuilder().setCode(14).build(); - doReturn(ApiFutures.immediateFuture(response.build())) - .when(firestoreMock) - .sendRequest( - batchWriteCapture.capture(), - Matchers.>any()); - - List writes = new ArrayList<>(); - batch.set(documentReference, LocalFirestoreHelper.SINGLE_FIELD_MAP); - writes.add(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO)); - - batch.create(documentReference, LocalFirestoreHelper.SINGLE_FIELD_MAP); - writes.add(create(LocalFirestoreHelper.SINGLE_FIELD_PROTO)); - - assertEquals(2, batch.getMutationsSize()); - - batch.markReadyToSend(); - List batchWriteResults = batch.bulkCommit().get(); - - assertNull(batchWriteResults.get(0).getException()); - assertEquals(Timestamp.ofTimeSecondsAndNanos(0, 1), batchWriteResults.get(0).getWriteTime()); - assertEquals( - Status.UNAVAILABLE, - ((FirestoreException) batchWriteResults.get(1).getException()).getStatus()); - assertNull(batchWriteResults.get(1).getWriteTime()); - - BatchWriteRequest batchWriteRequest = batchWriteCapture.getValue(); - assertEquals(batchWrite(writes.toArray(new Write[] {})), batchWriteRequest); - } } From d92d1dacdc9f0ae9d587dc90fe7b1997ada8db13 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Mon, 9 Nov 2020 11:45:33 -0600 Subject: [PATCH 04/22] set MAX_BATCH_SIZE to 20 now that it won't affect WriteBatch/Transaction limits --- .../google/cloud/firestore/BulkWriter.java | 209 +++++++---- .../cloud/firestore/BulkWriterError.java | 36 +- .../cloud/firestore/BulkWriterTest.java | 324 +++++++++++++++++- 3 files changed, 477 insertions(+), 92 deletions(-) diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java index 753d63905a..31c2c105e1 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java @@ -19,10 +19,7 @@ import com.google.api.core.ApiAsyncFunction; import com.google.api.core.ApiFuture; import com.google.api.core.ApiFutures; -import com.google.api.core.CurrentMillisClock; import com.google.api.core.SettableApiFuture; -import com.google.api.gax.retrying.ExponentialRetryAlgorithm; -import com.google.api.gax.retrying.TimedAttemptSettings; import com.google.api.gax.rpc.StatusCode.Code; import com.google.cloud.firestore.v1.FirestoreSettings; import com.google.common.annotations.VisibleForTesting; @@ -44,11 +41,24 @@ import javax.annotation.Nullable; final class BulkWriter implements AutoCloseable { - public interface WriteResultListener { + /** + * A callback set by `addWriteResultListener()` to be run every time an operation successfully + * completes. + */ + public interface WriteResultCallback { + /** + * @param documentReference The document the operation was performed on. + * @param result The result of the operation. + */ void onResult(DocumentReference documentReference, WriteResult result); }; - public interface WriteErrorListener { + /** A callback set by `addWriteErrorListener()` to be run every time an operation fails. */ + public interface WriteErrorCallback { + /** + * @param error The error object from the failed BulkWriter operation attempt. + * @return Whether to retry the operation or not. + */ boolean shouldRetryListener(BulkWriterError error); } @@ -63,7 +73,7 @@ enum OperationType { DELETE } /** The maximum number of writes that can be in a single batch. */ - public static final int MAX_BATCH_SIZE = 500; + public static final int MAX_BATCH_SIZE = 20; /** * The maximum number of retries that will be attempted by backoff before stopping all retry @@ -134,13 +144,13 @@ enum OperationType { /** Rate limiter used to throttle requests as per the 500/50/5 rule. */ private final RateLimiter rateLimiter; - private WriteResultListener successListener = - new WriteResultListener() { + private WriteResultCallback successListener = + new WriteResultCallback() { public void onResult(DocumentReference documentReference, WriteResult result) {} }; - private WriteErrorListener errorListener = - new WriteErrorListener() { + private WriteErrorCallback errorListener = + new WriteErrorCallback() { public boolean shouldRetryListener(BulkWriterError error) { if (error.getFailedAttempts() == MAX_RETRY_ATTEMPTS) { return false; @@ -159,15 +169,8 @@ public boolean shouldRetryListener(BulkWriterError error) { private final ScheduledExecutorService firestoreExecutor; - private final ExponentialRetryAlgorithm backoff; - private TimedAttemptSettings nextAttempt; - BulkWriter(FirestoreImpl firestore, BulkWriterOptions options) { this.firestore = firestore; - this.backoff = - new ExponentialRetryAlgorithm( - firestore.getOptions().getRetrySettings(), CurrentMillisClock.getDefaultClock()); - this.nextAttempt = backoff.createFirstAttempt(); this.firestoreExecutor = firestore.getClient().getExecutor(); if (!options.getThrottlingEnabled()) { @@ -214,7 +217,8 @@ public boolean shouldRetryListener(BulkWriterError error) { * * @param documentReference A reference to the document to be created. * @param fields A map of the fields and values for the document. - * @return An ApiFuture containing the result of the write. Contains an error if the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if + * the write fails. */ @Nonnull public ApiFuture create( @@ -237,7 +241,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * * @param documentReference A reference to the document to be created. * @param pojo The POJO that will be used to populate the document contents. - * @return An ApiFuture containing the result of the write. Contains an error if the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if + * the write fails. */ @Nonnull public ApiFuture create( @@ -257,8 +262,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * Delete a document from the database. * * @param documentReference The DocumentReference to delete. - * @return An ApiFuture containing the result of the delete. Contains an error if the delete - * fails. + * @return An ApiFuture containing a sentinel value (Timestamp(0)) for the delete operation. + * Contains a {@link BulkWriterError} if the delete fails. */ @Nonnull public ApiFuture delete(@Nonnull final DocumentReference documentReference) { @@ -279,7 +284,7 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param documentReference The DocumentReference to delete. * @param precondition Precondition to enforce for this delete. * @return An ApiFuture containing a sentinel value (Timestamp(0)) for the delete operation. - * Contains an error if the delete fails. + * Contains a {@link BulkWriterError} if the delete fails. */ @Nonnull public ApiFuture delete( @@ -301,9 +306,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * exist yet, it will be created. * * @param documentReference A reference to the document to be set. - * @param fields A map of the fields and values for the document. - * @return An ApiFuture containing a sentinel value (Timestamp(0)) for the delete operation. - * Contains an error if the delete fails. + * @param fields A map of the fields and values for the document. * @return An ApiFuture + * containing the result of the write. Contains a {@link BulkWriterError} if the write fails. */ @Nonnull public ApiFuture set( @@ -328,7 +332,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param documentReference A reference to the document to be set. * @param fields A map of the fields and values for the document. * @param options An object to configure the set behavior. - * @return An ApiFuture containing the result of the write. Contains an error if the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if + * the write fails. */ @Nonnull public ApiFuture set( @@ -354,7 +359,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param documentReference A reference to the document to be set. * @param pojo The POJO that will be used to populate the document contents. * @param options An object to configure the set behavior. - * @return An ApiFuture containing the result of the write. Contains an error if the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if + * the write fails. */ @Nonnull public ApiFuture set( @@ -378,7 +384,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * * @param documentReference A reference to the document to be set. * @param pojo The POJO that will be used to populate the document contents. - * @return An ApiFuture containing the result of the write. Contains an error if the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if + * the write fails. */ @Nonnull public ApiFuture set( @@ -405,7 +412,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * * @param documentReference A reference to the document to be updated. * @param fields A map of the fields and values for the document. - * @return An ApiFuture containing the result of the write. Contains an error if the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if + * the write fails. */ @Nonnull public ApiFuture update( @@ -434,7 +442,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param documentReference A reference to the document to be updated. * @param fields A map of the fields and values for the document. * @param precondition Precondition to enforce on this update. - * @return An ApiFuture containing the result of the write. Contains an error if the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if + * the write fails. */ @Nonnull public ApiFuture update( @@ -465,7 +474,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param field The first field to set. * @param value The first value to set. * @param moreFieldsAndValues String and Object pairs with more fields to be set. - * @return An ApiFuture containing the result of the write. Contains an error if the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if + * the write fails. */ @Nonnull public ApiFuture update( @@ -497,7 +507,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param fieldPath The first field to set. * @param value The first value to set. * @param moreFieldsAndValues String and Object pairs with more fields to be set. - * @return An ApiFuture containing the result of the write. Contains an error if the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if + * the write fails. */ @Nonnull public ApiFuture update( @@ -529,7 +540,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param field The first field to set. * @param value The first value to set. * @param moreFieldsAndValues String and Object pairs with more fields to be set. - * @return An ApiFuture containing the result of the write. Contains an error if the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if + * the write fails. */ @Nonnull public ApiFuture update( @@ -563,7 +575,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param fieldPath The first field to set. * @param value The first value to set. * @param moreFieldsAndValues String and Object pairs with more fields to be set. - * @return An ApiFuture containing the result of the write. Contains an error if the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if + * the write fails. */ @Nonnull public ApiFuture update( @@ -585,14 +598,22 @@ public ApiFuture apply(BulkCommitBatch batch) { } private ApiFuture executeWrite( - DocumentReference documentReference, + final DocumentReference documentReference, OperationType operationType, BulkWriterOperationCallback operationCallback) { final SettableApiFuture operationCompletedFuture = SettableApiFuture.create(); pendingOperations.add(operationCompletedFuture); ApiFuture writeResultApiFuture = - continueCallback(documentReference, operationType, operationCallback, 0); + ApiFutures.transformAsync( + executeWriteWithRetries(documentReference, operationType, operationCallback, 0), + new ApiAsyncFunction() { + public ApiFuture apply(WriteResult result) { + successListener.onResult(documentReference, result); + return ApiFutures.immediateFuture(result); + } + }, + MoreExecutors.directExecutor()); writeResultApiFuture.addListener( new Runnable() { public void run() { @@ -605,7 +626,7 @@ public void run() { return writeResultApiFuture; } - private ApiFuture continueCallback( + private ApiFuture executeWriteWithRetries( final DocumentReference documentReference, final OperationType operationType, final BulkWriterOperationCallback operationCallback, @@ -619,40 +640,31 @@ private ApiFuture continueCallback( sendReadyBatches(operationBatchQueue); } - return ApiFutures.transformAsync( - ApiFutures.catchingAsync( - operationCallback.apply(bulkCommitBatch), - FirestoreException.class, - new ApiAsyncFunction() { - public ApiFuture apply(FirestoreException exception) - throws BulkWriterError { - BulkWriterError bulkWriterError = - new BulkWriterError( - exception.getStatus(), - exception.getMessage(), - documentReference, - operationType, - failedAttempts); - - boolean shouldRetry = errorListener.shouldRetryListener(bulkWriterError); - logger.log( - Level.INFO, - String.format( - "Running error callback on error code: %d, shouldRetry: %b", - exception.getStatus().getCode().value(), shouldRetry)); - if (!shouldRetry) { - throw bulkWriterError; - } else { - return continueCallback( - documentReference, operationType, operationCallback, failedAttempts + 1); - } - } - }, - MoreExecutors.directExecutor()), - new ApiAsyncFunction() { - public ApiFuture apply(WriteResult result) throws Exception { - successListener.onResult(documentReference, result); - return ApiFutures.immediateFuture(result); + return ApiFutures.catchingAsync( + operationCallback.apply(bulkCommitBatch), + FirestoreException.class, + new ApiAsyncFunction() { + public ApiFuture apply(FirestoreException exception) throws BulkWriterError { + BulkWriterError bulkWriterError = + new BulkWriterError( + exception.getStatus(), + exception.getMessage(), + documentReference, + operationType, + failedAttempts); + + boolean shouldRetry = errorListener.shouldRetryListener(bulkWriterError); + logger.log( + Level.INFO, + String.format( + "Running error callback on error code: %d, shouldRetry: %b", + exception.getStatus().getCode().value(), shouldRetry)); + if (!shouldRetry) { + throw bulkWriterError; + } else { + return executeWriteWithRetries( + documentReference, operationType, operationCallback, failedAttempts + 1); + } } }, MoreExecutors.directExecutor()); @@ -726,7 +738,8 @@ public void run() { /** * Commits all enqueued writes and marks the BulkWriter instance as closed. * - *

After calling `close()`, calling any method wil return an error. + *

After calling `close()`, calling any method wil return an error. Any retries scheduled with + * `addWriteErrorListener()` will be run before the `close()` completes. * *

This method completes when there are no more pending writes. Calling this method will send * all requests. @@ -743,12 +756,56 @@ private void verifyNotClosed() { } } - public void addWriteResultListener(WriteResultListener writeResultListener) { - successListener = writeResultListener; + /** + * Attaches a listener that is run every time a BulkWriter operation successfully completes. + * + *

For example, see the sample code: + * BulkWriter bulkWriter = firestore.bulkWriter(); + * bulkWriter.addWriteResultListener( + * new WriteResultCallback() { + * public void onResult(DocumentReference documentReference, WriteResult result) { + * System.out.println( + * "Successfully executed write on document: " + * + documentReference + * + " at: " + * + result.getUpdateTime()); + * } + * }); + * + * + * @param writeResultCallback A callback to be called every time a BulkWriter operation + * successfully completes. + */ + public void addWriteResultListener(WriteResultCallback writeResultCallback) { + successListener = writeResultCallback; } - public void addWriteErrorListener(WriteErrorListener shouldRetryListener) { - errorListener = shouldRetryListener; + /** + * Attaches an error handler listener that is run every time a BulkWriter operation fails. + * + *

BulkWriter has a default error handler that retries UNAVAILABLE and ABORTED errors up to a + * maximum of 10 failed attempts. When an error handler is specified, the default error handler + * will be overwritten. + * + *

For example, see the sample code: + * BulkWriter bulkWriter = firestore.bulkWriter(); + * bulkWriter.addWriteErrorListener( + * new WriteErrorCallback() { + * if (error.getStatus() == Status.UNAVAILABLE + * && error.getFailedAttempts() < MAX_RETRY_ATTEMPTS) { + * return true; + * } else { + * System.out.println("Failed write at document: " + error.getDocumentReference()); + * return false; + * } + * }); + * + * + * @param shouldRetryCallback A callback to be called every time a BulkWriter operation fails. + * Returning `true` will retry the operation. Returning `false` will stop the retry loop. + */ + public void addWriteErrorListener(WriteErrorCallback shouldRetryCallback) { + errorListener = shouldRetryCallback; } /** diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterError.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterError.java index c9cef660ef..3a906be9a0 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterError.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterError.java @@ -19,25 +19,31 @@ import com.google.cloud.firestore.BulkWriter.OperationType; import io.grpc.Status; -public class BulkWriterError extends Exception { - public DocumentReference getDocumentReference() { - return documentReference; - } - - public int getFailedAttempts() { - return failedAttempts; +/** The error thrown when a BulkWriter operation fails. */ +class BulkWriterError extends Exception { + /** @return The status code of the error. */ + public Status getStatus() { + return status; } + /** @return The error message of the error. */ public String getMessage() { return message; } + /** @return The document reference the operation was performed on. */ + public DocumentReference getDocumentReference() { + return documentReference; + } + + /** @return The type of operation performed. */ public OperationType getOperationType() { return operationType; } - public Status getStatus() { - return status; + /** @return How many times this operation has been attempted unsuccessfully. */ + public int getFailedAttempts() { + return failedAttempts; } private final Status status; @@ -49,13 +55,13 @@ public Status getStatus() { public BulkWriterError( Status status, String message, - DocumentReference reference, - OperationType type, - int attempts) { + DocumentReference documentReference, + OperationType operationType, + int failedAttempts) { this.status = status; this.message = message; - documentReference = reference; - operationType = type; - failedAttempts = attempts; + this.documentReference = documentReference; + this.operationType = operationType; + this.failedAttempts = failedAttempts; } } diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java index 2d877dafaf..71a1ee629b 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java @@ -21,6 +21,7 @@ import static com.google.cloud.firestore.LocalFirestoreHelper.delete; import static com.google.cloud.firestore.LocalFirestoreHelper.set; import static com.google.cloud.firestore.LocalFirestoreHelper.update; +import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -32,6 +33,8 @@ import com.google.api.core.SettableApiFuture; import com.google.api.gax.rpc.UnaryCallable; import com.google.cloud.Timestamp; +import com.google.cloud.firestore.BulkWriter.WriteErrorCallback; +import com.google.cloud.firestore.BulkWriter.WriteResultCallback; import com.google.cloud.firestore.LocalFirestoreHelper.ResponseStubber; import com.google.cloud.firestore.spi.v1.FirestoreRpc; import com.google.firestore.v1.BatchWriteRequest; @@ -49,7 +52,9 @@ import javax.annotation.Nonnull; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.Timeout; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Captor; @@ -63,7 +68,7 @@ @RunWith(MockitoJUnitRunner.class) public class BulkWriterTest { - // @Rule public Timeout timeout = new Timeout(500, TimeUnit.MILLISECONDS); + @Rule public Timeout timeout = new Timeout(500, TimeUnit.MILLISECONDS); @Spy private final FirestoreRpc firestoreRpc = Mockito.mock(FirestoreRpc.class); @@ -351,6 +356,323 @@ public void sendWritesToDifferentDocsInSameBatch() throws Exception { assertEquals(Timestamp.ofTimeSecondsAndNanos(2, 0), result2.get().getUpdateTime()); } + @Test + public void runsSuccessHandler() throws Exception { + ResponseStubber responseStubber = + new ResponseStubber() { + { + put( + batchWrite( + create(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1"), + set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc2"), + update(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc3"), + delete("coll/doc4")), + mergeResponses( + successResponse(1), + successResponse(2), + successResponse(3), + successResponse(4))); + } + }; + responseStubber.initializeStub(batchWriteCapture, firestoreMock); + + final List writeResults = new ArrayList(); + DocumentReference doc3 = firestoreMock.document("coll/doc3"); + DocumentReference doc4 = firestoreMock.document("coll/doc4"); + bulkWriter.addWriteResultListener( + new WriteResultCallback() { + public void onResult(DocumentReference documentReference, WriteResult result) { + writeResults.add((int) result.getUpdateTime().getSeconds()); + } + }); + bulkWriter.create(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.set(doc2, LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.update(doc3, LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.delete(doc4); + bulkWriter.close(); + assertArrayEquals(new Integer[] {1, 2, 3, 4}, writeResults.toArray()); + } + + @Test + public void retriesFailedOperationsWithGlobalErrorCallback() throws Exception { + ResponseStubber responseStubber = + new ResponseStubber() { + { + put( + batchWrite( + create(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1"), + set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc2"), + update(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc3"), + delete("coll/doc4")), + mergeResponses( + successResponse(1), + failedResponse(Code.INTERNAL_VALUE), + failedResponse(Code.INTERNAL_VALUE), + failedResponse(Code.INTERNAL_VALUE))); + put( + batchWrite( + set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc2"), + update(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc3"), + delete("coll/doc4")), + mergeResponses(successResponse(2), successResponse(3), successResponse(4))); + } + }; + responseStubber.initializeStub(batchWriteCapture, firestoreMock); + + final List writeResults = new ArrayList<>(); + final List operations = new ArrayList<>(); + DocumentReference doc3 = firestoreMock.document("coll/doc3"); + DocumentReference doc4 = firestoreMock.document("coll/doc4"); + bulkWriter.addWriteErrorListener( + new WriteErrorCallback() { + public boolean shouldRetryListener(BulkWriterError error) { + operations.add(error.getOperationType().name()); + return true; + } + }); + bulkWriter.addWriteResultListener( + new WriteResultCallback() { + public void onResult(DocumentReference documentReference, WriteResult result) { + operations.add("SUCCESS"); + writeResults.add((int) result.getUpdateTime().getSeconds()); + } + }); + bulkWriter.create(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.set(doc2, LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.update(doc3, LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.delete(doc4); + bulkWriter.close(); + assertArrayEquals(new Integer[] {1, 2, 3, 4}, writeResults.toArray()); + assertArrayEquals( + new String[] {"SUCCESS", "SET", "UPDATE", "DELETE", "SUCCESS", "SUCCESS", "SUCCESS"}, + operations.toArray()); + } + + @Test + public void errorSurfacedEvenWithRetryFunction() throws Exception { + ResponseStubber responseStubber = + new ResponseStubber() { + { + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + failedResponse(Code.INTERNAL_VALUE)); + } + }; + responseStubber.initializeStub(batchWriteCapture, firestoreMock); + + final boolean[] errorListenerCalled = {false}; + bulkWriter.addWriteErrorListener( + new WriteErrorCallback() { + public boolean shouldRetryListener(BulkWriterError error) { + errorListenerCalled[0] = true; + return false; + } + }); + + ApiFuture result1 = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.close(); + assertTrue(errorListenerCalled[0]); + try { + result1.get(); + fail("Operation should have failed in test"); + } catch (Exception e) { + assertEquals(Status.INTERNAL, ((BulkWriterError) e.getCause()).getStatus()); + } + } + + @Test + public void surfacesExceptionsThrownByUserProvidedErrorListener() throws Exception { + ResponseStubber responseStubber = + new ResponseStubber() { + { + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + failedResponse(Code.INTERNAL_VALUE)); + } + }; + responseStubber.initializeStub(batchWriteCapture, firestoreMock); + + bulkWriter.addWriteErrorListener( + new WriteErrorCallback() { + public boolean shouldRetryListener(BulkWriterError error) { + throw new NullPointerException("Test code threw NullPointerException"); + } + }); + + ApiFuture result1 = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.close(); + try { + result1.get(); + fail("Operation should have failed in test"); + } catch (Exception e) { + assertTrue(e.getMessage().contains("Test code threw NullPointerException")); + } + } + + @Test + public void writeFailsIfUserProvidedSuccessListenerFails() throws Exception { + ResponseStubber responseStubber = + new ResponseStubber() { + { + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + successResponse(1)); + } + }; + responseStubber.initializeStub(batchWriteCapture, firestoreMock); + + bulkWriter.addWriteResultListener( + new WriteResultCallback() { + public void onResult(DocumentReference documentReference, WriteResult result) { + throw new NullPointerException("Test code threw NullPointerException"); + } + }); + + ApiFuture result1 = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.close(); + try { + result1.get(); + fail("Operation should have failed in test"); + } catch (Exception e) { + assertTrue(e.getMessage().contains("Test code threw NullPointerException")); + } + } + + @Test + public void retriesMultipleTimes() throws Exception { + ResponseStubber responseStubber = + new ResponseStubber() { + { + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + failedResponse(Code.INTERNAL_VALUE)); + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + failedResponse(Code.INTERNAL_VALUE)); + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + failedResponse(Code.INTERNAL_VALUE)); + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + successResponse(1)); + } + }; + responseStubber.initializeStub(batchWriteCapture, firestoreMock); + + bulkWriter.addWriteErrorListener( + new WriteErrorCallback() { + public boolean shouldRetryListener(BulkWriterError error) { + return true; + } + }); + + ApiFuture result1 = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.close(); + assertEquals(Timestamp.ofTimeSecondsAndNanos(1, 0), result1.get().getUpdateTime()); + } + + @Test + public void retriesMaintainCorrectWriteResolutionOrdering() throws Exception { + ResponseStubber responseStubber = + new ResponseStubber() { + { + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + failedResponse(Code.INTERNAL_VALUE)); + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + successResponse(1)); + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc2")), + successResponse(2)); + } + }; + responseStubber.initializeStub(batchWriteCapture, firestoreMock); + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + + // Use separate futures to track listener completion since the callbacks are run on a different + // thread than the BulkWriter operations. + final SettableApiFuture flushComplete = SettableApiFuture.create(); + + final List operations = new ArrayList<>(); + bulkWriter.addWriteErrorListener( + new WriteErrorCallback() { + public boolean shouldRetryListener(BulkWriterError error) { + return true; + } + }); + + bulkWriter + .set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP) + .addListener( + new Runnable() { + public void run() { + operations.add("BEFORE_FLUSH"); + } + }, + executor); + bulkWriter.flush().addListener( + new Runnable() { + public void run() { + operations.add("FLUSH"); + flushComplete.set(null); + } + }, + executor); + bulkWriter + .set(doc2, LocalFirestoreHelper.SINGLE_FIELD_MAP) + .addListener( + new Runnable() { + public void run() { + operations.add("AFTER_FLUSH"); + } + }, + executor); + flushComplete.get(); + + // Verify that the 2nd operation did not complete as a result of the flush() call. + assertArrayEquals(new String[] {"BEFORE_FLUSH", "FLUSH"}, operations.toArray()); + bulkWriter.close(); + } + + @Test + public void returnsTheErrorIfNoRetrySpecified() throws Exception { + ResponseStubber responseStubber = + new ResponseStubber() { + { + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + failedResponse(Code.INTERNAL_VALUE)); + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + failedResponse(Code.INTERNAL_VALUE)); + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + failedResponse(Code.INTERNAL_VALUE)); + put( + batchWrite(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, "coll/doc1")), + failedResponse(Code.INTERNAL_VALUE)); + } + }; + responseStubber.initializeStub(batchWriteCapture, firestoreMock); + + bulkWriter.addWriteErrorListener( + new WriteErrorCallback() { + public boolean shouldRetryListener(BulkWriterError error) { + return error.getFailedAttempts() < 3; + } + }); + + ApiFuture result1 = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); + bulkWriter.close(); + try { + result1.get(); + fail("Operation should have failed"); + } catch (Exception e) { + assertEquals(Status.INTERNAL, ((BulkWriterError) e.getCause()).getStatus()); + } + } + @Test public void sendBatchesWhenSizeLimitIsReached() throws Exception { ResponseStubber responseStubber = From 3d895e1d14624962866712cac919bf0dc30b85f8 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Mon, 9 Nov 2020 14:36:33 -0600 Subject: [PATCH 05/22] cleanup --- .../cloud/firestore/BulkCommitBatch.java | 31 +++++++++++++------ .../google/cloud/firestore/BulkWriter.java | 16 +++++----- .../google/cloud/firestore/UpdateBuilder.java | 24 ++++++-------- .../cloud/firestore/BulkWriterTest.java | 18 ++++++----- 4 files changed, 49 insertions(+), 40 deletions(-) diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java index 15014f9c9e..50d2933d00 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java @@ -32,6 +32,8 @@ import io.opencensus.trace.Tracing; import java.util.ArrayList; import java.util.List; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; import javax.annotation.Nullable; /** Used to represent a batch on the BatchQueue. */ @@ -48,10 +50,15 @@ enum BatchState { SENT, } - protected BatchState state = BatchState.OPEN; + private BatchState state = BatchState.OPEN; + + private final List> pendingOperations = new ArrayList<>(); + private final Set documents = new CopyOnWriteArraySet<>(); + private final int maxBatchSize; BulkCommitBatch(FirestoreImpl firestore, int maxBatchSize) { - super(firestore, maxBatchSize); + super(firestore); + this.maxBatchSize = maxBatchSize; } ApiFuture wrapResult(DocumentReference documentReference) { @@ -68,7 +75,7 @@ ApiFuture> bulkCommit() { .getCurrentSpan() .addAnnotation( TraceUtil.SPAN_NAME_BATCHWRITE, - ImmutableMap.of("numDocuments", AttributeValue.longAttributeValue(writes.size()))); + ImmutableMap.of("numDocuments", AttributeValue.longAttributeValue(getWrites().size()))); Preconditions.checkState( isReadyToSend(), "The batch should be marked as READY_TO_SEND before committing"); @@ -77,13 +84,15 @@ ApiFuture> bulkCommit() { final BatchWriteRequest.Builder request = BatchWriteRequest.newBuilder(); request.setDatabase(firestore.getDatabaseName()); - for (WriteOperation writeOperation : writes) { + for (WriteOperation writeOperation : getWrites()) { request.addWrites(writeOperation.write); } ApiFuture response = firestore.sendRequest(request.build(), firestore.getClient().batchWriteCallable()); + committed = true; + return ApiFutures.transform( response, new ApiFunction>() { @@ -117,17 +126,17 @@ public List apply(BatchWriteResponse batchWriteResponse) { } int getPendingOperationCount() { - return newPendingOperations.size(); + return pendingOperations.size(); } ApiFuture processLastOperation(DocumentReference documentReference) { Preconditions.checkState( - !documentPaths.contains(documentReference), + !documents.contains(documentReference), "Batch should not contain writes to the same document"); - documentPaths.add(documentReference); + documents.add(documentReference); Preconditions.checkState(state == BatchState.OPEN, "Batch should be OPEN when adding writes"); SettableApiFuture resultFuture = SettableApiFuture.create(); - newPendingOperations.add(resultFuture); + pendingOperations.add(resultFuture); if (getPendingOperationCount() == maxBatchSize) { state = BatchState.READY_TO_SEND; @@ -153,7 +162,7 @@ public ApiFuture apply(BatchWriteResult batchWriteResult) throws Ex */ void newProcessResults(List results) { for (int i = 0; i < results.size(); i++) { - SettableApiFuture resultFuture = newPendingOperations.get(i); + SettableApiFuture resultFuture = pendingOperations.get(i); BatchWriteResult result = results.get(i); if (result.getException() == null) { resultFuture.set(result); @@ -176,4 +185,8 @@ boolean isOpen() { boolean isReadyToSend() { return state == BatchState.READY_TO_SEND; } + + boolean has(DocumentReference documentReference) { + return documents.contains(documentReference); + } } diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java index 31c2c105e1..04b48edf36 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java @@ -133,7 +133,7 @@ enum OperationType { private final Set> pendingOperations = new CopyOnWriteArraySet<>(); /** - * A list of future that represent sent batches. Each future is completed when the batch's + * A list of futures that represent sent batches. Each future is completed when the batch's * response is received. This includes batches from both the batchQueue and retryBatchQueue. */ private final Set> pendingBatches = new CopyOnWriteArraySet<>(); @@ -293,7 +293,7 @@ public ApiFuture delete( verifyNotClosed(); return executeWrite( documentReference, - OperationType.SET, + OperationType.DELETE, new BulkWriterOperationCallback() { public ApiFuture apply(BulkCommitBatch batch) { return batch.delete(documentReference, precondition); @@ -606,7 +606,7 @@ private ApiFuture executeWrite( ApiFuture writeResultApiFuture = ApiFutures.transformAsync( - executeWriteWithRetries(documentReference, operationType, operationCallback, 0), + executeWriteHelper(documentReference, operationType, operationCallback, 0), new ApiAsyncFunction() { public ApiFuture apply(WriteResult result) { successListener.onResult(documentReference, result); @@ -626,7 +626,7 @@ public void run() { return writeResultApiFuture; } - private ApiFuture executeWriteWithRetries( + private ApiFuture executeWriteHelper( final DocumentReference documentReference, final OperationType operationType, final BulkWriterOperationCallback operationCallback, @@ -662,7 +662,7 @@ public ApiFuture apply(FirestoreException exception) throws BulkWri if (!shouldRetry) { throw bulkWriterError; } else { - return executeWriteWithRetries( + return executeWriteHelper( documentReference, operationType, operationCallback, failedAttempts + 1); } } @@ -816,7 +816,7 @@ private BulkCommitBatch getEligibleBatch( DocumentReference documentReference, List batchQueue) { if (batchQueue.size() > 0) { BulkCommitBatch lastBatch = batchQueue.get(batchQueue.size() - 1); - if (lastBatch.isOpen() && !lastBatch.documentPaths.contains(documentReference)) { + if (lastBatch.isOpen() && !lastBatch.has(documentReference)) { return lastBatch; } } @@ -849,7 +849,7 @@ private void sendReadyBatches(final List batchQueue) { while (index < batchQueue.size() && batchQueue.get(index).isReadyToSend()) { final BulkCommitBatch batch = batchQueue.get(index); - // Future that completes when the current or its scheduling attempts completes. + // Future that completes when the current batch or its scheduling attempts completes. final SettableApiFuture batchCompletedFuture = SettableApiFuture.create(); pendingBatches.add(batchCompletedFuture); @@ -921,7 +921,7 @@ private ApiFuture bulkCommit(final BulkCommitBatch batch) { public ApiFuture> apply(Exception exception) { List results = new ArrayList<>(); // If the BatchWrite RPC fails, map the exception to each individual result. - for (int i = 0; i < batch.newPendingOperations.size(); ++i) { + for (int i = 0; i < batch.getPendingOperationCount(); ++i) { results.add(new BatchWriteResult(null, exception)); } return ApiFutures.immediateFuture(results); diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java index 39c7070314..698df7be03 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java @@ -20,8 +20,8 @@ import com.google.api.core.ApiFuture; import com.google.api.core.ApiFutures; import com.google.api.core.InternalExtensionOnly; -import com.google.api.core.SettableApiFuture; import com.google.cloud.firestore.UserDataConverter.EncodingOptions; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.MoreExecutors; @@ -36,10 +36,8 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.concurrent.CopyOnWriteArraySet; import javax.annotation.Nonnull; import javax.annotation.Nullable; @@ -60,22 +58,13 @@ static class WriteOperation { } final FirestoreImpl firestore; - protected final List writes; - protected final int maxBatchSize; - private boolean committed; - protected List> newPendingOperations = new ArrayList<>(); + private final List writes = new ArrayList<>(); - Set documentPaths = new CopyOnWriteArraySet<>(); + protected boolean committed; UpdateBuilder(FirestoreImpl firestore) { - this(firestore, BulkWriter.MAX_BATCH_SIZE); - } - - UpdateBuilder(FirestoreImpl firestore, int maxBatchSize) { this.firestore = firestore; - this.maxBatchSize = maxBatchSize; - this.writes = new ArrayList<>(); } /** @@ -649,8 +638,13 @@ boolean isEmpty() { return writes.isEmpty(); } + List getWrites() { + return writes; + } + /** Get the number of writes. */ - public int getMutationsSize() { + @VisibleForTesting + int getMutationsSize() { return writes.size(); } } diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java index 71a1ee629b..7b5c7231b5 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java @@ -611,14 +611,16 @@ public void run() { } }, executor); - bulkWriter.flush().addListener( - new Runnable() { - public void run() { - operations.add("FLUSH"); - flushComplete.set(null); - } - }, - executor); + bulkWriter + .flush() + .addListener( + new Runnable() { + public void run() { + operations.add("FLUSH"); + flushComplete.set(null); + } + }, + executor); bulkWriter .set(doc2, LocalFirestoreHelper.SINGLE_FIELD_MAP) .addListener( From 906165a5622fb83a9ba0a434c27fdbc9cfa4cb3b Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Mon, 9 Nov 2020 15:16:43 -0600 Subject: [PATCH 06/22] add clirr exceptions --- .../clirr-ignored-differences.xml | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/google-cloud-firestore/clirr-ignored-differences.xml b/google-cloud-firestore/clirr-ignored-differences.xml index 42302e9735..21b38c5fbf 100644 --- a/google-cloud-firestore/clirr-ignored-differences.xml +++ b/google-cloud-firestore/clirr-ignored-differences.xml @@ -211,6 +211,30 @@ java.util.List + + + 6001 + com/google/cloud/firestore/UpdateBuilder + pendingOperations + + + 6001 + com/google/cloud/firestore/UpdateBuilder + state + + + 6010 + com/google/cloud/firestore/UpdateBuilder + writes + + + 7009 + com/google/cloud/firestore/UpdateBuilder + int getMutationsSize() + + + + + + + true + + + + + %date %-5.5level [%-32.32thread] %-30.30logger{30} - {%2.2marker} %message%n + + + + + %date %-5.5level [%-32.32thread] %-30.30logger{30} - {%2.2marker} %message%nopex%n + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 49fe27577d6741547adc55f23a14ab65d9c479fb Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Thu, 10 Dec 2020 10:05:03 -0800 Subject: [PATCH 13/22] Move all BulkWriter logic to its own thread + cleanup --- google-cloud-firestore/pom.xml | 22 --- .../google/cloud/firestore/BulkWriter.java | 133 ++++++++++++++---- .../firestore/BulkWriterIntegrationTest.java | 16 +-- .../cloud/firestore/BulkWriterTest.java | 15 +- .../src/test/resources/logback-test.xml | 93 ------------ 5 files changed, 110 insertions(+), 169 deletions(-) delete mode 100644 google-cloud-firestore/src/test/resources/logback-test.xml diff --git a/google-cloud-firestore/pom.xml b/google-cloud-firestore/pom.xml index 660f103452..7d41275f75 100644 --- a/google-cloud-firestore/pom.xml +++ b/google-cloud-firestore/pom.xml @@ -161,28 +161,6 @@ 3.11 test - - - org.slf4j - slf4j-api - 1.7.30 - - - org.slf4j - jul-to-slf4j - 1.7.30 - - - org.slf4j - jcl-over-slf4j - 1.7.30 - - - ch.qos.logback - logback-classic - 1.2.3 - runtime - diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java index 7e6a4fbe18..ac02275fbd 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java @@ -36,7 +36,6 @@ import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -44,10 +43,8 @@ import java.util.logging.Logger; import javax.annotation.Nonnull; import javax.annotation.Nullable; -import org.slf4j.LoggerFactory; final class BulkWriter implements AutoCloseable { - private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(BulkWriter.class); /** * A callback set by `addWriteResultListener()` to be run every time an operation successfully * completes. @@ -135,7 +132,8 @@ enum OperationType { /** * A set of futures that represent pending BulkWriter operations. Each future is completed when * the BulkWriter operation resolves. This set includes retries. Each retry's promise is added, - * attempted, and removed from this set before scheduling the next retry. + * attempted, and removed from this set before scheduling the next retry. This set of futures is + * used by flush() to track when enqueued writes are completed. */ private final Set> pendingOperations = new CopyOnWriteArraySet<>(); @@ -177,17 +175,22 @@ public boolean onError(BulkWriterError error) { private final FirestoreImpl firestore; private final ScheduledExecutorService firestoreExecutor; - private final ExecutorService userCallbackExecutor; + + // Executor used to run all BulkWriter operations. BulkWriter uses its own executor since we + // don't want to block a gax/grpc executor while running user error and success callbacks. + private final Executor bulkWriterExecutor; BulkWriter(FirestoreImpl firestore, BulkWriterOptions options) { this.firestore = firestore; this.firestoreExecutor = firestore.getClient().getExecutor(); - // TODO(chenbrian): Refactor this to be at the Firestore level. - this.userCallbackExecutor = Executors - .newCachedThreadPool(new DefaultThreadFactory("bulk-writer-user")); - this.successExecutor = Context.currentContextExecutor(this.userCallbackExecutor); - this.errorExecutor = Context.currentContextExecutor(this.userCallbackExecutor); + this.bulkWriterExecutor = + Executors.newSingleThreadExecutor(new DefaultThreadFactory("bulk-writer")); + + Executor userCallbackExecutor = + Executors.newSingleThreadExecutor(new DefaultThreadFactory("bw-user-callback")); + this.successExecutor = Context.currentContextExecutor(userCallbackExecutor); + this.errorExecutor = Context.currentContextExecutor(userCallbackExecutor); if (!options.getThrottlingEnabled()) { this.rateLimiter = @@ -613,15 +616,19 @@ public ApiFuture apply(BulkCommitBatch batch) { }); } + /** + * Schedules the provided write operation and runs the user success callback when the write result + * is obtained. + */ private ApiFuture executeWrite( final DocumentReference documentReference, - OperationType operationType, - BulkWriterOperationCallback operationCallback) { + final OperationType operationType, + final BulkWriterOperationCallback operationCallback) { final SettableApiFuture operationCompletedFuture = SettableApiFuture.create(); ApiFuture writeResultApiFuture = ApiFutures.transformAsync( - executeWriteHelper(documentReference, operationType, operationCallback, 0), + enqueueWrite(documentReference, operationType, operationCallback), new ApiAsyncFunction() { public ApiFuture apply(final WriteResult result) throws ExecutionException, InterruptedException { @@ -630,7 +637,8 @@ public ApiFuture apply(final WriteResult result) return ApiFutures.immediateFuture(result); } }, - userCallbackExecutor); + MoreExecutors.directExecutor()); + writeResultApiFuture.addListener( new Runnable() { public void run() { @@ -638,12 +646,32 @@ public void run() { operationCompletedFuture.set(null); } }, - userCallbackExecutor); + MoreExecutors.directExecutor()); + pendingOperations.add(operationCompletedFuture); return writeResultApiFuture; } - private ApiFuture executeWriteHelper( + /** Used to schedule the write enqueue logic onto the bulkWriterExecutor. */ + private ApiFuture enqueueWrite( + final DocumentReference documentReference, + final OperationType operationType, + final BulkWriterOperationCallback operationCallback) { + return ApiFutures.transformAsync( + ApiFutures.immediateFuture(null), + new ApiAsyncFunction() { + public ApiFuture apply(Object o) throws Exception { + return enqueueWriteHelper(documentReference, operationType, operationCallback, 0); + } + }, + bulkWriterExecutor); + } + + /** + * Adds the write to the appropriate batch queue and performs the bulkCommit. This helper method + * also catches failures and applies the user error callback and retrying if necessary. + */ + private ApiFuture enqueueWriteHelper( final DocumentReference documentReference, final OperationType operationType, final BulkWriterOperationCallback operationCallback, @@ -680,12 +708,12 @@ public ApiFuture apply(FirestoreException exception) if (!shouldRetry) { throw bulkWriterError; } else { - return executeWriteHelper( + return enqueueWriteHelper( documentReference, operationType, operationCallback, failedAttempts + 1); } } }, - userCallbackExecutor); + MoreExecutors.directExecutor()); } /** Invokes the user error callback on the user callback executor and returns the result. */ @@ -741,17 +769,20 @@ public void run() { */ @Nonnull public ApiFuture flush() { - LOGGER.debug(">>> flush()"); - try { - verifyNotClosed(); - return performFlush(Lists.newArrayList(pendingOperations)); - } finally { - LOGGER.debug("<<< flush()"); - } + verifyNotClosed(); + final ArrayList> pendingOperationsAtFlush = + Lists.newArrayList(pendingOperations); + return ApiFutures.transformAsync( + ApiFutures.immediateFuture(null), + new ApiAsyncFunction() { + public ApiFuture apply(Object o) throws Exception { + return performFlush(pendingOperationsAtFlush); + } + }, + bulkWriterExecutor); } private ApiFuture performFlush(final List> pendingOperations) { - LOGGER.debug(">>> performFlush(pendingOperations : {})", pendingOperations); for (BulkCommitBatch batch : batchQueue) { batch.markReadyToSend(); } @@ -791,7 +822,6 @@ public void run() { } }, MoreExecutors.directExecutor()); - LOGGER.debug("<<< performFlush(pendingOperations : {})", pendingOperations); return flushComplete; } @@ -805,11 +835,9 @@ public void run() { * all requests. */ public void close() throws InterruptedException, ExecutionException { - LOGGER.debug(">>> close()"); ApiFuture flushFuture = flush(); closed = true; flushFuture.get(); - LOGGER.debug("<<< close()"); } private void verifyNotClosed() { @@ -841,6 +869,26 @@ public void addWriteResultListener(WriteResultCallback writeResultCallback) { successListener = writeResultCallback; } + /** + * Attaches a listener that is run every time a BulkWriter operation successfully completes. + * + *

For example, see the sample code: + * BulkWriter bulkWriter = firestore.bulkWriter(); + * bulkWriter.addWriteResultListener( + * (DocumentReference documentReference, WriteResult result) -> { + * System.out.println( + * "Successfully executed write on document: " + * + documentReference + * + " at: " + * + result.getUpdateTime()); + * } + * ); + * + * + * @param executor The executor to run the provided callback on. + * @param writeResultCallback A callback to be called every time a BulkWriter operation + * successfully completes. + */ public void addWriteResultListener( @Nonnull Executor executor, WriteResultCallback writeResultCallback) { successListener = writeResultCallback; @@ -876,6 +924,32 @@ public void addWriteErrorListener(WriteErrorCallback onError) { errorListener = onError; } + /** + * Attaches an error handler listener that is run every time a BulkWriter operation fails. + * + *

BulkWriter has a default error handler that retries UNAVAILABLE and ABORTED errors up to a + * maximum of 10 failed attempts. When an error handler is specified, the default error handler + * will be overwritten. + * + *

For example, see the sample code: + * BulkWriter bulkWriter = firestore.bulkWriter(); + * bulkWriter.addWriteErrorListener( + * (BulkWriterError error) -> { + * if (error.getStatus() == Status.UNAVAILABLE + * && error.getFailedAttempts() < MAX_RETRY_ATTEMPTS) { + * return true; + * } else { + * System.out.println("Failed write at document: " + error.getDocumentReference()); + * return false; + * } + * } + * ); + * + * + * @param executor The executor to run the provided callback on. + * @param onError A callback to be called every time a BulkWriter operation fails. Returning + * `true` will retry the operation. Returning `false` will stop the retry loop. + */ public void addWriteErrorListener(@Nonnull Executor executor, WriteErrorCallback onError) { errorListener = onError; errorExecutor = executor; @@ -905,7 +979,6 @@ private BulkCommitBatch createNewBatch(List batchQueue) { if (batchQueue.size() > 0) { batchQueue.get(batchQueue.size() - 1).markReadyToSend(); - sendReadyBatches(batchQueue); } batchQueue.add(newBatch); return newBatch; diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterIntegrationTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterIntegrationTest.java index 7bbab5da77..256da12c25 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterIntegrationTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterIntegrationTest.java @@ -37,16 +37,8 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.bridge.SLF4JBridgeHandler; public class BulkWriterIntegrationTest { - static { - SLF4JBridgeHandler.removeHandlersForRootLogger(); - SLF4JBridgeHandler.install(); - } - private static final Logger LOGGER = LoggerFactory.getLogger(BulkWriterIntegrationTest.class); private Firestore firestore; private CollectionReference randomColl; private DocumentReference randomDoc; @@ -166,7 +158,6 @@ public void bulkWriterDelete() throws Exception { @Test public void bulkWriterOnResult() throws Exception { - LOGGER.debug(">>> bulkWriterOnResult()"); class NamedThreadFactory implements ThreadFactory { public Thread newThread(Runnable r) { return new Thread(r, "bulkWriterSuccess"); @@ -180,16 +171,11 @@ public Thread newThread(Runnable r) { executor, new WriteResultCallback() { public void onResult(DocumentReference documentReference, WriteResult result) { - LOGGER.debug(">>> onResult(documentReference : {}, result : {})", documentReference, - result); operations.add("operation"); - LOGGER.debug("<<< onResult(documentReference : {}, result : {})", documentReference, - result); } }); writer.set(randomDoc, Collections.singletonMap("foo", "bar")); - writer.close(); + writer.flush().get(); assertEquals("operation", operations.get(0)); - LOGGER.debug("<<< bulkWriterOnResult()"); } } diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java index ff680410b1..3b61045af4 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java @@ -52,7 +52,9 @@ import javax.annotation.Nonnull; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.Timeout; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Captor; @@ -66,7 +68,7 @@ @RunWith(MockitoJUnitRunner.class) public class BulkWriterTest { - // @Rule public Timeout timeout = new Timeout(500, TimeUnit.MILLISECONDS); + @Rule public Timeout timeout = new Timeout(500, TimeUnit.MILLISECONDS); @Spy private final FirestoreRpc firestoreRpc = Mockito.mock(FirestoreRpc.class); @@ -160,7 +162,7 @@ public void hasUpdateMethod() throws Exception { responseStubber.initializeStub(batchWriteCapture, firestoreMock); ApiFuture result = bulkWriter.update(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); - bulkWriter.close(); + bulkWriter.flush().get(); responseStubber.verifyAllRequestsSent(); assertEquals(Timestamp.ofTimeSecondsAndNanos(2, 0), result.get().getUpdateTime()); @@ -196,7 +198,7 @@ public void hasCreateMethod() throws Exception { responseStubber.initializeStub(batchWriteCapture, firestoreMock); ApiFuture result = bulkWriter.create(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); - bulkWriter.close(); + bulkWriter.flush().get(); responseStubber.verifyAllRequestsSent(); assertEquals(Timestamp.ofTimeSecondsAndNanos(2, 0), result.get().getUpdateTime()); @@ -257,11 +259,6 @@ public void addsWritesToNewBatchAfterFlush() throws Exception { assertEquals(Timestamp.ofTimeSecondsAndNanos(2, 0), result2.get().getUpdateTime()); } - @Test - public void closeResolvesImmediatelyIfNoWrites() throws Exception { - bulkWriter.close(); - } - @Test public void cannotCallMethodsAfterClose() throws Exception { String expected = "BulkWriter has already been closed."; @@ -995,7 +992,7 @@ public ApiFuture answer(InvocationOnMock mock) { batchWriteCapture.capture(), Matchers.>any()); ApiFuture result = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); - bulkWriter.close(); + bulkWriter.flush().get(); try { result.get(); diff --git a/google-cloud-firestore/src/test/resources/logback-test.xml b/google-cloud-firestore/src/test/resources/logback-test.xml deleted file mode 100644 index a11e1fe6e7..0000000000 --- a/google-cloud-firestore/src/test/resources/logback-test.xml +++ /dev/null @@ -1,93 +0,0 @@ - - - - - - true - - - - - %date %-5.5level [%-32.32thread] %-30.30logger{30} - {%2.2marker} %message%n - - - - - %date %-5.5level [%-32.32thread] %-30.30logger{30} - {%2.2marker} %message%nopex%n - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - From 028e3736cf5b554da42cef079ed1fc65b3ce1565 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Thu, 10 Dec 2020 11:47:56 -0800 Subject: [PATCH 14/22] remove usage of defaultThreadFactory --- .../main/java/com/google/cloud/firestore/BulkWriter.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java index ac02275fbd..8693e76b83 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java @@ -27,7 +27,6 @@ import com.google.common.collect.Lists; import com.google.common.util.concurrent.MoreExecutors; import io.grpc.Context; -import io.grpc.netty.shaded.io.netty.util.concurrent.DefaultThreadFactory; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -184,11 +183,9 @@ public boolean onError(BulkWriterError error) { this.firestore = firestore; this.firestoreExecutor = firestore.getClient().getExecutor(); - this.bulkWriterExecutor = - Executors.newSingleThreadExecutor(new DefaultThreadFactory("bulk-writer")); + this.bulkWriterExecutor = Executors.newSingleThreadExecutor(); - Executor userCallbackExecutor = - Executors.newSingleThreadExecutor(new DefaultThreadFactory("bw-user-callback")); + Executor userCallbackExecutor = Executors.newSingleThreadExecutor(); this.successExecutor = Context.currentContextExecutor(userCallbackExecutor); this.errorExecutor = Context.currentContextExecutor(userCallbackExecutor); From 95fdcde1e27a763a3ccaa41c55ae7f928c2415db Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Fri, 11 Dec 2020 15:34:44 -0800 Subject: [PATCH 15/22] resolve comments rd.1 --- .../cloud/firestore/BulkCommitBatch.java | 7 +- .../google/cloud/firestore/BulkWriter.java | 157 ++++++++++-------- ...terError.java => BulkWriterException.java} | 7 +- .../cloud/firestore/FirestoreException.java | 4 +- .../google/cloud/firestore/FirestoreImpl.java | 6 + .../google/cloud/firestore/UpdateBuilder.java | 10 +- .../cloud/firestore/BulkWriterTest.java | 41 ++--- ...grationTest.java => ITBulkWriterTest.java} | 39 ++++- .../cloud/firestore/WriteBatchTest.java | 20 +-- 9 files changed, 180 insertions(+), 111 deletions(-) rename google-cloud-firestore/src/main/java/com/google/cloud/firestore/{BulkWriterError.java => BulkWriterException.java} (90%) rename google-cloud-firestore/src/test/java/com/google/cloud/firestore/{BulkWriterIntegrationTest.java => ITBulkWriterTest.java} (80%) diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java index 00a9ccce5c..af2180cee4 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkCommitBatch.java @@ -61,6 +61,11 @@ enum BatchState { this.maxBatchSize = maxBatchSize; } + @Override + boolean isCommitted() { + return state == BatchState.SENT; + } + ApiFuture wrapResult(DocumentReference documentReference) { return processLastOperation(documentReference); } @@ -91,8 +96,6 @@ ApiFuture> bulkCommit() { ApiFuture response = firestore.sendRequest(request.build(), firestore.getClient().batchWriteCallable()); - committed = true; - return ApiFutures.transform( response, new ApiFunction>() { diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java index 8693e76b83..d9073f9d5d 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java @@ -28,11 +28,10 @@ import com.google.common.util.concurrent.MoreExecutors; import io.grpc.Context; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.Executors; @@ -62,7 +61,7 @@ public interface WriteErrorCallback { * @param error The error object from the failed BulkWriter operation attempt. * @return Whether to retry the operation or not. */ - boolean onError(BulkWriterError error); + boolean onError(BulkWriterException error); } private interface BulkWriterOperationCallback { @@ -79,7 +78,7 @@ enum OperationType { public static final int MAX_BATCH_SIZE = 20; /** - * The maximum number of retries that will be attempted by backoff before stopping all retry + * The maximum number of retries that will be attempted with backoff before stopping all retry * attempts. */ public static final int MAX_RETRY_ATTEMPTS = 10; @@ -119,28 +118,28 @@ enum OperationType { * modification errors (as this list is modified from both the user thread and the network * thread). */ - private final List batchQueue = new CopyOnWriteArrayList<>(); + private final List batchQueue = new ArrayList<>(); /** * A queue of batches to be retried. Use a synchronized list to avoid multi-thread concurrent * modification errors (as this list is modified from both the user thread and the network * thread). */ - private final List retryBatchQueue = new CopyOnWriteArrayList<>(); + private final List retryBatchQueue = new ArrayList<>(); /** * A set of futures that represent pending BulkWriter operations. Each future is completed when - * the BulkWriter operation resolves. This set includes retries. Each retry's promise is added, - * attempted, and removed from this set before scheduling the next retry. This set of futures is - * used by flush() to track when enqueued writes are completed. + * the BulkWriter operation resolves with either a WriteResult or failure after all applicable + * retries are performed. This set of futures is used by flush() to track when enqueued writes are + * completed. */ - private final Set> pendingOperations = new CopyOnWriteArraySet<>(); + private final Set> pendingOperations = new HashSet<>(); /** * A list of futures that represent sent batches. Each future is completed when the batch's * response is received. This includes batches from both the batchQueue and retryBatchQueue. */ - private final Set> pendingBatches = new CopyOnWriteArraySet<>(); + private final Set> pendingBatches = new HashSet<>(); /** Whether this BulkWriter instance is closed. Once closed, it cannot be opened again. */ private boolean closed = false; @@ -155,7 +154,7 @@ public void onResult(DocumentReference documentReference, WriteResult result) {} private WriteErrorCallback errorListener = new WriteErrorCallback() { - public boolean onError(BulkWriterError error) { + public boolean onError(BulkWriterException error) { if (error.getFailedAttempts() == MAX_RETRY_ATTEMPTS) { return false; } @@ -173,17 +172,21 @@ public boolean onError(BulkWriterError error) { private Executor errorExecutor; private final FirestoreImpl firestore; - private final ScheduledExecutorService firestoreExecutor; // Executor used to run all BulkWriter operations. BulkWriter uses its own executor since we // don't want to block a gax/grpc executor while running user error and success callbacks. - private final Executor bulkWriterExecutor; + private final ScheduledExecutorService bulkWriterExecutor; BulkWriter(FirestoreImpl firestore, BulkWriterOptions options) { - this.firestore = firestore; - this.firestoreExecutor = firestore.getClient().getExecutor(); + this(firestore, options, Executors.newSingleThreadScheduledExecutor()); + } - this.bulkWriterExecutor = Executors.newSingleThreadExecutor(); + BulkWriter( + FirestoreImpl firestore, + BulkWriterOptions options, + ScheduledExecutorService bulkWriterExecutor) { + this.firestore = firestore; + this.bulkWriterExecutor = bulkWriterExecutor; Executor userCallbackExecutor = Executors.newSingleThreadExecutor(); this.successExecutor = Context.currentContextExecutor(userCallbackExecutor); @@ -233,8 +236,8 @@ public boolean onError(BulkWriterError error) { * * @param documentReference A reference to the document to be created. * @param fields A map of the fields and values for the document. - * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if - * the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture create( @@ -257,8 +260,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * * @param documentReference A reference to the document to be created. * @param pojo The POJO that will be used to populate the document contents. - * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if - * the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture create( @@ -279,7 +282,7 @@ public ApiFuture apply(BulkCommitBatch batch) { * * @param documentReference The DocumentReference to delete. * @return An ApiFuture containing a sentinel value (Timestamp(0)) for the delete operation. - * Contains a {@link BulkWriterError} if the delete fails. + * Contains a {@link BulkWriterException} if the delete fails. */ @Nonnull public ApiFuture delete(@Nonnull final DocumentReference documentReference) { @@ -300,7 +303,7 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param documentReference The DocumentReference to delete. * @param precondition Precondition to enforce for this delete. * @return An ApiFuture containing a sentinel value (Timestamp(0)) for the delete operation. - * Contains a {@link BulkWriterError} if the delete fails. + * Contains a {@link BulkWriterException} if the delete fails. */ @Nonnull public ApiFuture delete( @@ -322,8 +325,9 @@ public ApiFuture apply(BulkCommitBatch batch) { * exist yet, it will be created. * * @param documentReference A reference to the document to be set. - * @param fields A map of the fields and values for the document. * @return An ApiFuture - * containing the result of the write. Contains a {@link BulkWriterError} if the write fails. + * @param fields A map of the fields and values for the document. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture set( @@ -348,8 +352,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param documentReference A reference to the document to be set. * @param fields A map of the fields and values for the document. * @param options An object to configure the set behavior. - * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if - * the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture set( @@ -375,13 +379,13 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param documentReference A reference to the document to be set. * @param pojo The POJO that will be used to populate the document contents. * @param options An object to configure the set behavior. - * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if - * the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture set( @Nonnull final DocumentReference documentReference, - final Object pojo, + @Nonnull final Object pojo, @Nonnull final SetOptions options) { verifyNotClosed(); return executeWrite( @@ -400,12 +404,12 @@ public ApiFuture apply(BulkCommitBatch batch) { * * @param documentReference A reference to the document to be set. * @param pojo The POJO that will be used to populate the document contents. - * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if - * the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture set( - @Nonnull final DocumentReference documentReference, final Object pojo) { + @Nonnull final DocumentReference documentReference, @Nonnull final Object pojo) { verifyNotClosed(); return executeWrite( documentReference, @@ -428,8 +432,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * * @param documentReference A reference to the document to be updated. * @param fields A map of the fields and values for the document. - * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if - * the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture update( @@ -458,8 +462,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param documentReference A reference to the document to be updated. * @param fields A map of the fields and values for the document. * @param precondition Precondition to enforce on this update. - * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if - * the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture update( @@ -490,8 +494,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param field The first field to set. * @param value The first value to set. * @param moreFieldsAndValues String and Object pairs with more fields to be set. - * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if - * the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture update( @@ -523,8 +527,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param fieldPath The first field to set. * @param value The first value to set. * @param moreFieldsAndValues String and Object pairs with more fields to be set. - * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if - * the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture update( @@ -556,8 +560,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param field The first field to set. * @param value The first value to set. * @param moreFieldsAndValues String and Object pairs with more fields to be set. - * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if - * the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture update( @@ -591,8 +595,8 @@ public ApiFuture apply(BulkCommitBatch batch) { * @param fieldPath The first field to set. * @param value The first value to set. * @param moreFieldsAndValues String and Object pairs with more fields to be set. - * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterError} if - * the write fails. + * @return An ApiFuture containing the result of the write. Contains a {@link BulkWriterException} + * if the write fails. */ @Nonnull public ApiFuture update( @@ -634,6 +638,11 @@ public ApiFuture apply(final WriteResult result) return ApiFutures.immediateFuture(result); } }, + // This should always run on the `bulkWriterExecutor`. However, we use + // `directExecutor()` + // here to ensure that the transform code runs immediately after without yielding. Using + // `bulkWriterExecutor` instead means that the transform is scheduled after other work + // already on the executor, which causes BulkWriter to deadlock. MoreExecutors.directExecutor()); writeResultApiFuture.addListener( @@ -643,6 +652,10 @@ public void run() { operationCompletedFuture.set(null); } }, + // This should always run on the `bulkWriterExecutor`. However, we use `directExecutor()` + // here to ensure that the transform code runs immediately after without yielding. Using + // `bulkWriterExecutor` instead means that the transform is scheduled after other work + // already on the executor, which causes BulkWriter to deadlock. MoreExecutors.directExecutor()); pendingOperations.add(operationCompletedFuture); @@ -687,34 +700,40 @@ private ApiFuture enqueueWriteHelper( FirestoreException.class, new ApiAsyncFunction() { public ApiFuture apply(FirestoreException exception) - throws BulkWriterError, ExecutionException, InterruptedException { - BulkWriterError bulkWriterError = - new BulkWriterError( + throws BulkWriterException, ExecutionException, InterruptedException { + BulkWriterException bulkWriterException = + new BulkWriterException( exception.getStatus(), exception.getMessage(), documentReference, operationType, failedAttempts); - boolean shouldRetry = invokeUserErrorCallback(bulkWriterError).get(); + boolean shouldRetry = invokeUserErrorCallback(bulkWriterException).get(); logger.log( Level.INFO, String.format( - "Running error callback on error code: %d, shouldRetry: %b", - exception.getStatus().getCode().value(), shouldRetry)); + "Ran error callback on document: %s, error code: %d, shouldRetry: %b", + documentReference.getPath(), + exception.getStatus().getCode().value(), + shouldRetry)); if (!shouldRetry) { - throw bulkWriterError; + throw bulkWriterException; } else { return enqueueWriteHelper( documentReference, operationType, operationCallback, failedAttempts + 1); } } }, + // This code should always be on the bulkWriterExecutor. However, we use `directExecutor()` + // here to ensure that the transform code runs immediately after without yielding. Using + // `bulkWriterExecutor` instead means that the transform is scheduled after other work + // already on the executor, which causes BulkWriter to deadlock.. MoreExecutors.directExecutor()); } /** Invokes the user error callback on the user callback executor and returns the result. */ - private SettableApiFuture invokeUserErrorCallback(final BulkWriterError error) { + private SettableApiFuture invokeUserErrorCallback(final BulkWriterException error) { final SettableApiFuture callbackResult = SettableApiFuture.create(); errorExecutor.execute( new Runnable() { @@ -802,23 +821,23 @@ public void run() { batchQueueFlushComplete.addListener( new Runnable() { public void run() { - if (retryBatchQueue.size() > 0) { - for (BulkCommitBatch batch : retryBatchQueue) { - batch.markReadyToSend(); - } - sendReadyBatches(retryBatchQueue); + for (BulkCommitBatch batch : retryBatchQueue) { + batch.markReadyToSend(); } - ApiFutures.successfulAsList(pendingOperations) - .addListener( - new Runnable() { - public void run() { - flushComplete.set(null); - } - }, - MoreExecutors.directExecutor()); + sendReadyBatches(retryBatchQueue); } }, MoreExecutors.directExecutor()); + + ApiFutures.successfulAsList(pendingOperations) + .addListener( + new Runnable() { + public void run() { + flushComplete.set(null); + } + }, + MoreExecutors.directExecutor()); + return flushComplete; } @@ -902,7 +921,7 @@ public void addWriteResultListener( *

For example, see the sample code: * BulkWriter bulkWriter = firestore.bulkWriter(); * bulkWriter.addWriteErrorListener( - * (BulkWriterError error) -> { + * (BulkWriterException error) -> { * if (error.getStatus() == Status.UNAVAILABLE * && error.getFailedAttempts() < MAX_RETRY_ATTEMPTS) { * return true; @@ -931,7 +950,7 @@ public void addWriteErrorListener(WriteErrorCallback onError) { *

For example, see the sample code: * BulkWriter bulkWriter = firestore.bulkWriter(); * bulkWriter.addWriteErrorListener( - * (BulkWriterError error) -> { + * (BulkWriterException error) -> { * if (error.getStatus() == Status.UNAVAILABLE * && error.getFailedAttempts() < MAX_RETRY_ATTEMPTS) { * return true; @@ -1003,7 +1022,7 @@ private void sendReadyBatches(final List batchQueue) { if (delayMs == 0) { sendBatch(batch, batchQueue, batchCompletedFuture); } else { - firestoreExecutor.schedule( + bulkWriterExecutor.schedule( new Runnable() { @Override public void run() { diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterError.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterException.java similarity index 90% rename from google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterError.java rename to google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterException.java index 3a906be9a0..ef31e2a789 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterError.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriterException.java @@ -20,7 +20,7 @@ import io.grpc.Status; /** The error thrown when a BulkWriter operation fails. */ -class BulkWriterError extends Exception { +final class BulkWriterException extends FirestoreException { /** @return The status code of the error. */ public Status getStatus() { return status; @@ -31,7 +31,7 @@ public String getMessage() { return message; } - /** @return The document reference the operation was performed on. */ + /** @return The DocumentReference the operation was performed on. */ public DocumentReference getDocumentReference() { return documentReference; } @@ -52,12 +52,13 @@ public int getFailedAttempts() { private final OperationType operationType; private final int failedAttempts; - public BulkWriterError( + public BulkWriterException( Status status, String message, DocumentReference documentReference, OperationType operationType, int failedAttempts) { + super(message, status); this.status = status; this.message = message; this.documentReference = documentReference; diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/FirestoreException.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/FirestoreException.java index 5fabd5e8bd..10186bee5d 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/FirestoreException.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/FirestoreException.java @@ -24,10 +24,10 @@ import javax.annotation.Nullable; /** A Firestore Service exception. */ -public final class FirestoreException extends BaseGrpcServiceException { +public class FirestoreException extends BaseGrpcServiceException { private Status status; - private FirestoreException(String reason, Status status) { + FirestoreException(String reason, Status status) { this(reason, status, null); } diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/FirestoreImpl.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/FirestoreImpl.java index ecb58d7974..aa7b2a5100 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/FirestoreImpl.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/FirestoreImpl.java @@ -39,6 +39,7 @@ import java.util.List; import java.util.Map; import java.util.Random; +import java.util.concurrent.ScheduledExecutorService; import javax.annotation.Nonnull; import javax.annotation.Nullable; @@ -102,6 +103,11 @@ BulkWriter bulkWriter(BulkWriterOptions options) { return new BulkWriter(this, options); } + @Nonnull + BulkWriter bulkWriter(BulkWriterOptions options, ScheduledExecutorService executor) { + return new BulkWriter(this, options, executor); + } + @Nonnull @Override public CollectionReference collection(@Nonnull String collectionPath) { diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java index 698df7be03..674ed47940 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/UpdateBuilder.java @@ -61,7 +61,11 @@ static class WriteOperation { private final List writes = new ArrayList<>(); - protected boolean committed; + private boolean committed; + + boolean isCommitted() { + return committed; + } UpdateBuilder(FirestoreImpl firestore) { this.firestore = firestore; @@ -154,7 +158,7 @@ private T performCreate( private void verifyNotCommitted() { Preconditions.checkState( - !committed, "Cannot modify a WriteBatch that has already been committed."); + !isCommitted(), "Cannot modify a WriteBatch that has already been committed."); } /** @@ -644,7 +648,7 @@ List getWrites() { /** Get the number of writes. */ @VisibleForTesting - int getMutationsSize() { + int getWriteCount() { return writes.size(); } } diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java index 3b61045af4..2d569cfb40 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java @@ -224,8 +224,8 @@ public void surfacesErrors() throws Exception { result.get(); fail("set() should have failed"); } catch (Exception e) { - assertTrue(e.getCause() instanceof BulkWriterError); - assertEquals(Status.DEADLINE_EXCEEDED, ((BulkWriterError) e.getCause()).getStatus()); + assertTrue(e.getCause() instanceof BulkWriterException); + assertEquals(Status.DEADLINE_EXCEEDED, ((BulkWriterException) e.getCause()).getStatus()); } } @@ -420,7 +420,7 @@ public void retriesFailedOperationsWithGlobalErrorCallback() throws Exception { DocumentReference doc4 = firestoreMock.document("coll/doc4"); bulkWriter.addWriteErrorListener( new WriteErrorCallback() { - public boolean onError(BulkWriterError error) { + public boolean onError(BulkWriterException error) { operations.add(error.getOperationType().name()); return true; } @@ -458,21 +458,21 @@ public void errorSurfacedEvenWithRetryFunction() throws Exception { final boolean[] errorListenerCalled = {false}; bulkWriter.addWriteErrorListener( new WriteErrorCallback() { - public boolean onError(BulkWriterError error) { + public boolean onError(BulkWriterException error) { errorListenerCalled[0] = true; assertEquals(Status.INTERNAL, error.getStatus()); return false; } }); - ApiFuture result1 = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); + ApiFuture result = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); bulkWriter.close(); assertTrue(errorListenerCalled[0]); try { - result1.get(); + result.get(); fail("Operation should have failed in test"); } catch (Exception e) { - assertEquals(Status.INTERNAL, ((BulkWriterError) e.getCause()).getStatus()); + assertEquals(Status.INTERNAL, ((BulkWriterException) e.getCause()).getStatus()); } } @@ -490,16 +490,16 @@ public void surfacesExceptionsThrownByUserProvidedErrorListener() throws Excepti bulkWriter.addWriteErrorListener( new WriteErrorCallback() { - public boolean onError(BulkWriterError error) { + public boolean onError(BulkWriterException error) { throw new UnsupportedOperationException( "Test code threw UnsupportedOperationException"); } }); - ApiFuture result1 = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); + ApiFuture result = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); bulkWriter.close(); try { - result1.get(); + result.get(); fail("Operation should have failed in test"); } catch (Exception e) { assertTrue(e.getMessage().contains("Test code threw UnsupportedOperationException")); @@ -526,10 +526,10 @@ public void onResult(DocumentReference documentReference, WriteResult result) { } }); - ApiFuture result1 = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); + ApiFuture result = bulkWriter.set(doc1, LocalFirestoreHelper.SINGLE_FIELD_MAP); bulkWriter.close(); try { - result1.get(); + result.get(); fail("Operation should have failed in test"); } catch (Exception e) { assertTrue(e.getMessage().contains("Test code threw UnsupportedOperationException")); @@ -559,7 +559,7 @@ public void retriesMultipleTimes() throws Exception { bulkWriter.addWriteErrorListener( new WriteErrorCallback() { - public boolean onError(BulkWriterError error) { + public boolean onError(BulkWriterException error) { return true; } }); @@ -595,7 +595,7 @@ public void retriesMaintainCorrectWriteResolutionOrdering() throws Exception { final List operations = new ArrayList<>(); bulkWriter.addWriteErrorListener( new WriteErrorCallback() { - public boolean onError(BulkWriterError error) { + public boolean onError(BulkWriterException error) { return true; } }); @@ -633,6 +633,7 @@ public void run() { // Verify that the 2nd operation did not complete as a result of the flush() call. assertArrayEquals(new String[] {"BEFORE_FLUSH", "FLUSH"}, operations.toArray()); bulkWriter.close(); + assertArrayEquals(new String[] {"BEFORE_FLUSH", "FLUSH", "AFTER_FLUSH"}, operations.toArray()); } @Test @@ -658,7 +659,7 @@ public void returnsTheErrorIfNoRetrySpecified() throws Exception { bulkWriter.addWriteErrorListener( new WriteErrorCallback() { - public boolean onError(BulkWriterError error) { + public boolean onError(BulkWriterException error) { return error.getFailedAttempts() < 3; } }); @@ -669,7 +670,7 @@ public boolean onError(BulkWriterError error) { result1.get(); fail("Operation should have failed"); } catch (Exception e) { - assertEquals(Status.INTERNAL, ((BulkWriterError) e.getCause()).getStatus()); + assertEquals(Status.INTERNAL, ((BulkWriterException) e.getCause()).getStatus()); } } @@ -743,8 +744,8 @@ public void retriesIndividualWritesThatFailWithAbortedOrUnavailable() throws Exc result1.get(); fail("set() should have failed"); } catch (Exception e) { - assertTrue(e.getCause() instanceof BulkWriterError); - assertEquals(Status.DEADLINE_EXCEEDED, ((BulkWriterError) e.getCause()).getStatus()); + assertTrue(e.getCause() instanceof BulkWriterException); + assertEquals(Status.DEADLINE_EXCEEDED, ((BulkWriterException) e.getCause()).getStatus()); } assertEquals(Timestamp.ofTimeSecondsAndNanos(2, 0), result2.get().getUpdateTime()); assertEquals(Timestamp.ofTimeSecondsAndNanos(3, 0), result3.get().getUpdateTime()); @@ -848,7 +849,6 @@ public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) return super.schedule(command, 0, TimeUnit.MILLISECONDS); } }; - doReturn(timeoutExecutor).when(firestoreRpc).getExecutor(); // Stub responses for the BulkWriter batches. ResponseStubber responseStubber = @@ -861,7 +861,8 @@ public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) }; responseStubber.initializeStub(batchWriteCapture, firestoreMock); BulkWriter bulkWriter = - firestoreMock.bulkWriter(BulkWriterOptions.builder().setInitialOpsPerSecond(5).build()); + firestoreMock.bulkWriter( + BulkWriterOptions.builder().setInitialOpsPerSecond(5).build(), timeoutExecutor); for (int i = 0; i < 600; ++i) { bulkWriter.set(firestoreMock.document("coll/doc"), LocalFirestoreHelper.SINGLE_FIELD_MAP); diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterIntegrationTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/ITBulkWriterTest.java similarity index 80% rename from google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterIntegrationTest.java rename to google-cloud-firestore/src/test/java/com/google/cloud/firestore/ITBulkWriterTest.java index 256da12c25..0fb7ba5855 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterIntegrationTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/ITBulkWriterTest.java @@ -24,6 +24,7 @@ import com.google.api.core.ApiFuture; import com.google.cloud.Timestamp; +import com.google.cloud.firestore.BulkWriter.WriteErrorCallback; import com.google.cloud.firestore.BulkWriter.WriteResultCallback; import com.google.common.base.Preconditions; import java.util.ArrayList; @@ -38,7 +39,7 @@ import org.junit.Test; import org.junit.rules.TestName; -public class BulkWriterIntegrationTest { +public class ITBulkWriterTest { private Firestore firestore; private CollectionReference randomColl; private DocumentReference randomDoc; @@ -136,7 +137,7 @@ public void bulkWriterUpdateAddsPrecondition() throws Exception { result.get(); fail("Update operation should have thrown exception"); } catch (Exception e) { - assertTrue(e.getMessage().contains("No document to update")); + assertTrue(e.getMessage().matches(".* No (document|entity) to update.*")); } } @@ -172,10 +173,44 @@ public Thread newThread(Runnable r) { new WriteResultCallback() { public void onResult(DocumentReference documentReference, WriteResult result) { operations.add("operation"); + assertTrue(Thread.currentThread().getName().contains("bulkWriterSuccess")); } }); writer.set(randomDoc, Collections.singletonMap("foo", "bar")); writer.flush().get(); assertEquals("operation", operations.get(0)); } + + @Test + public void bulkWriterOnError() throws Exception { + class NamedThreadFactory implements ThreadFactory { + public Thread newThread(Runnable r) { + return new Thread(r, "bulkWriterException"); + } + } + Executor executor = Executors.newSingleThreadExecutor(new NamedThreadFactory()); + final List operations = new ArrayList<>(); + + BulkWriter writer = firestore.bulkWriter(); + writer.addWriteErrorListener( + executor, + new WriteErrorCallback() { + public boolean onError(BulkWriterException error) { + operations.add("operation-error"); + assertTrue(Thread.currentThread().getName().contains("bulkWriterException")); + return false; + } + }); + + writer.addWriteResultListener( + executor, + new WriteResultCallback() { + public void onResult(DocumentReference documentReference, WriteResult result) { + fail("The success listener shouldn't be called"); + } + }); + writer.update(randomDoc, "foo", "bar"); + writer.flush().get(); + assertEquals("operation-error", operations.get(0)); + } } diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/WriteBatchTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/WriteBatchTest.java index 7dace68293..7b38d66e2c 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/WriteBatchTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/WriteBatchTest.java @@ -95,7 +95,7 @@ public void updateDocument() throws Exception { batch.update(documentReference, updateTime, "foo", "bar"); batch.update(documentReference, LocalFirestoreHelper.SINGLE_FIELD_MAP, updateTime); - assertEquals(4, batch.getMutationsSize()); + assertEquals(4, batch.getWriteCount()); List writeResults = batch.commit().get(); List writes = new ArrayList<>(); @@ -121,7 +121,7 @@ public void updateDocumentWithPOJO() throws Exception { commitCapture.capture(), Matchers.>any()); batch.update(documentReference, "foo", UPDATE_SINGLE_FIELD_OBJECT); - assertEquals(1, batch.getMutationsSize()); + assertEquals(1, batch.getWriteCount()); List writeResults = batch.commit().get(); assertEquals(1, writeResults.size()); @@ -151,7 +151,7 @@ public void setDocument() throws Exception { writes.add(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, Arrays.asList("foo"))); writes.add(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, Arrays.asList("foo"))); - assertEquals(4, batch.getMutationsSize()); + assertEquals(4, batch.getWriteCount()); List writeResults = batch.commit().get(); for (int i = 0; i < writeResults.size(); ++i) { @@ -181,7 +181,7 @@ public void setDocumentWithValue() throws Exception { writes.add(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, Arrays.asList("foo"))); writes.add(set(LocalFirestoreHelper.SINGLE_FIELD_PROTO, Arrays.asList("foo"))); - assertEquals(4, batch.getMutationsSize()); + assertEquals(4, batch.getWriteCount()); List writeResults = batch.commit().get(); for (int i = 0; i < writeResults.size(); ++i) { @@ -204,7 +204,7 @@ public void setDocumentWithFloat() throws Exception { List writes = new ArrayList<>(); writes.add(set(LocalFirestoreHelper.SINGLE_FLOAT_PROTO)); - assertEquals(1, batch.getMutationsSize()); + assertEquals(1, batch.getWriteCount()); List writeResults = batch.commit().get(); for (int i = 0; i < writeResults.size(); ++i) { @@ -224,7 +224,7 @@ public void omitWriteResultForDocumentTransforms() throws Exception { batch.set(documentReference, map("time", FieldValue.serverTimestamp())); - assertEquals(1, batch.getMutationsSize()); + assertEquals(1, batch.getWriteCount()); List writeResults = batch.commit().get(); assertEquals(1, writeResults.size()); @@ -241,7 +241,7 @@ public void createDocument() throws Exception { .create(documentReference, LocalFirestoreHelper.SINGLE_FIELD_MAP) .create(documentReference, LocalFirestoreHelper.SINGLE_FIELD_OBJECT); - assertEquals(2, batch.getMutationsSize()); + assertEquals(2, batch.getWriteCount()); List writeResults = batch.commit().get(); List writes = new ArrayList<>(); @@ -266,7 +266,7 @@ public void createDocumentWithValue() throws Exception { .create(documentReference, LocalFirestoreHelper.SINGLE_FIELD_PROTO) .create(documentReference, LocalFirestoreHelper.SINGLE_FIELD_OBJECT); - assertEquals(2, batch.getMutationsSize()); + assertEquals(2, batch.getWriteCount()); List writeResults = batch.commit().get(); List writes = new ArrayList<>(); @@ -289,7 +289,7 @@ public void createDocumentWithFloat() throws Exception { batch.create(documentReference, LocalFirestoreHelper.SINGLE_FLOAT_MAP); - assertEquals(1, batch.getMutationsSize()); + assertEquals(1, batch.getWriteCount()); List writeResults = batch.commit().get(); List writes = new ArrayList<>(); @@ -320,7 +320,7 @@ public void deleteDocument() throws Exception { precondition.getUpdateTimeBuilder().setSeconds(1).setNanos(2); writes.add(delete(precondition.build())); - assertEquals(2, batch.getMutationsSize()); + assertEquals(2, batch.getWriteCount()); List writeResults = batch.commit().get(); From b984f5622c129905a611538e3b8190ad2426951b Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Fri, 11 Dec 2020 16:30:20 -0800 Subject: [PATCH 16/22] remove userCallbackExecutor and add clirr exception --- .../clirr-ignored-differences.xml | 2 +- .../google/cloud/firestore/BulkWriter.java | 69 +++++++++++-------- .../cloud/firestore/BulkWriterTest.java | 3 + 3 files changed, 43 insertions(+), 31 deletions(-) diff --git a/google-cloud-firestore/clirr-ignored-differences.xml b/google-cloud-firestore/clirr-ignored-differences.xml index d3cc7093e6..2fdfbe7d91 100644 --- a/google-cloud-firestore/clirr-ignored-differences.xml +++ b/google-cloud-firestore/clirr-ignored-differences.xml @@ -242,7 +242,7 @@ writes - 7009 + 7002 com/google/cloud/firestore/UpdateBuilder int getMutationsSize() diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java index d9073f9d5d..b9da7954c4 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java @@ -26,7 +26,6 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.util.concurrent.MoreExecutors; -import io.grpc.Context; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -168,8 +167,8 @@ public boolean onError(BulkWriterException error) { } }; - private Executor successExecutor; - private Executor errorExecutor; + private @Nullable Executor successExecutor; + private @Nullable Executor errorExecutor; private final FirestoreImpl firestore; @@ -188,10 +187,6 @@ public boolean onError(BulkWriterException error) { this.firestore = firestore; this.bulkWriterExecutor = bulkWriterExecutor; - Executor userCallbackExecutor = Executors.newSingleThreadExecutor(); - this.successExecutor = Context.currentContextExecutor(userCallbackExecutor); - this.errorExecutor = Context.currentContextExecutor(userCallbackExecutor); - if (!options.getThrottlingEnabled()) { this.rateLimiter = new RateLimiter( @@ -735,18 +730,27 @@ public ApiFuture apply(FirestoreException exception) /** Invokes the user error callback on the user callback executor and returns the result. */ private SettableApiFuture invokeUserErrorCallback(final BulkWriterException error) { final SettableApiFuture callbackResult = SettableApiFuture.create(); - errorExecutor.execute( - new Runnable() { - @Override - public void run() { - try { - boolean shouldRetry = errorListener.onError(error); - callbackResult.set(shouldRetry); - } catch (Exception e) { - callbackResult.setException(e); + if (errorExecutor != null) { + errorExecutor.execute( + new Runnable() { + @Override + public void run() { + try { + boolean shouldRetry = errorListener.onError(error); + callbackResult.set(shouldRetry); + } catch (Exception e) { + callbackResult.setException(e); + } } - } - }); + }); + } else { + try { + boolean shouldRetry = errorListener.onError(error); + callbackResult.set(shouldRetry); + } catch (Exception e) { + callbackResult.setException(e); + } + } return callbackResult; } @@ -754,19 +758,24 @@ public void run() { private ApiFuture invokeUserSuccessCallback( final DocumentReference documentReference, final WriteResult result) { final SettableApiFuture callbackResult = SettableApiFuture.create(); - successExecutor.execute( - new Runnable() { - @Override - public void run() { - try { - successListener.onResult(documentReference, result); - callbackResult.set(null); - } catch (Exception e) { - callbackResult.setException(e); + if (successExecutor != null) { + successExecutor.execute( + new Runnable() { + @Override + public void run() { + try { + successListener.onResult(documentReference, result); + callbackResult.set(null); + } catch (Exception e) { + callbackResult.setException(e); + } } - } - }); - return callbackResult; + }); + return callbackResult; + } else { + successListener.onResult(documentReference, result); + return ApiFutures.immediateFuture(null); + } } /** diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java index 2d569cfb40..43661e11a9 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java @@ -44,6 +44,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.concurrent.Executor; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; @@ -418,7 +419,9 @@ public void retriesFailedOperationsWithGlobalErrorCallback() throws Exception { final List operations = new ArrayList<>(); DocumentReference doc3 = firestoreMock.document("coll/doc3"); DocumentReference doc4 = firestoreMock.document("coll/doc4"); + Executor userCallbackExecutor = Executors.newSingleThreadExecutor(); bulkWriter.addWriteErrorListener( + userCallbackExecutor, new WriteErrorCallback() { public boolean onError(BulkWriterException error) { operations.add(error.getOperationType().name()); From 59c89bcfe3d13abd4df7cb6c7526afcb6d69e33e Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Fri, 11 Dec 2020 16:40:46 -0800 Subject: [PATCH 17/22] fix flaky test --- .../test/java/com/google/cloud/firestore/BulkWriterTest.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java index 43661e11a9..14d193feeb 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java @@ -594,6 +594,7 @@ public void retriesMaintainCorrectWriteResolutionOrdering() throws Exception { // Use separate futures to track listener completion since the callbacks are run on a different // thread than the BulkWriter operations. final SettableApiFuture flushComplete = SettableApiFuture.create(); + final SettableApiFuture closeComplete = SettableApiFuture.create(); final List operations = new ArrayList<>(); bulkWriter.addWriteErrorListener( @@ -628,6 +629,7 @@ public void run() { new Runnable() { public void run() { operations.add("AFTER_FLUSH"); + closeComplete.set(null); } }, executor); @@ -636,6 +638,7 @@ public void run() { // Verify that the 2nd operation did not complete as a result of the flush() call. assertArrayEquals(new String[] {"BEFORE_FLUSH", "FLUSH"}, operations.toArray()); bulkWriter.close(); + closeComplete.get(); assertArrayEquals(new String[] {"BEFORE_FLUSH", "FLUSH", "AFTER_FLUSH"}, operations.toArray()); } From c46a296a42a0e29fe04e6a95dd1e98a0af8a78ea Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Mon, 14 Dec 2020 09:59:18 -0800 Subject: [PATCH 18/22] fix flaking batch limiter test --- .../java/com/google/cloud/firestore/BulkWriterTest.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java index 14d193feeb..f715090971 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/BulkWriterTest.java @@ -842,7 +842,9 @@ public void flushCompletesWhenAllWritesComplete() throws Exception { @Test public void doesNotSendBatchesIfDoingSoExceedsRateLimit() throws Exception { - final boolean[] timeoutCalled = {false}; + // This future is completed when the BulkWriter schedules a timeout. This test waits on the + // future at the end of the test to ensure that the timeout was called. + final SettableApiFuture timeoutCalledFuture = SettableApiFuture.create(); final ScheduledExecutorService timeoutExecutor = new ScheduledThreadPoolExecutor(1) { @@ -850,7 +852,7 @@ public void doesNotSendBatchesIfDoingSoExceedsRateLimit() throws Exception { @Nonnull public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { if (delay > 0) { - timeoutCalled[0] = true; + timeoutCalledFuture.set(null); } return super.schedule(command, 0, TimeUnit.MILLISECONDS); } @@ -874,8 +876,7 @@ public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) bulkWriter.set(firestoreMock.document("coll/doc"), LocalFirestoreHelper.SINGLE_FIELD_MAP); } bulkWriter.flush(); - - assertTrue(timeoutCalled[0]); + timeoutCalledFuture.get(); } @Test From 044f925726913a30ae6e3953679e0c952c20bd54 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Mon, 14 Dec 2020 13:47:34 -0800 Subject: [PATCH 19/22] use directExecutor() in success/error executors by default instead of wasting logic --- .../google/cloud/firestore/BulkWriter.java | 66 ++++++++----------- 1 file changed, 27 insertions(+), 39 deletions(-) diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java index b9da7954c4..f655691d9a 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java @@ -167,8 +167,8 @@ public boolean onError(BulkWriterException error) { } }; - private @Nullable Executor successExecutor; - private @Nullable Executor errorExecutor; + private Executor successExecutor; + private Executor errorExecutor; private final FirestoreImpl firestore; @@ -186,6 +186,8 @@ public boolean onError(BulkWriterException error) { ScheduledExecutorService bulkWriterExecutor) { this.firestore = firestore; this.bulkWriterExecutor = bulkWriterExecutor; + this.successExecutor = MoreExecutors.directExecutor(); + this.errorExecutor = MoreExecutors.directExecutor(); if (!options.getThrottlingEnabled()) { this.rateLimiter = @@ -730,27 +732,18 @@ public ApiFuture apply(FirestoreException exception) /** Invokes the user error callback on the user callback executor and returns the result. */ private SettableApiFuture invokeUserErrorCallback(final BulkWriterException error) { final SettableApiFuture callbackResult = SettableApiFuture.create(); - if (errorExecutor != null) { - errorExecutor.execute( - new Runnable() { - @Override - public void run() { - try { - boolean shouldRetry = errorListener.onError(error); - callbackResult.set(shouldRetry); - } catch (Exception e) { - callbackResult.setException(e); - } + errorExecutor.execute( + new Runnable() { + @Override + public void run() { + try { + boolean shouldRetry = errorListener.onError(error); + callbackResult.set(shouldRetry); + } catch (Exception e) { + callbackResult.setException(e); } - }); - } else { - try { - boolean shouldRetry = errorListener.onError(error); - callbackResult.set(shouldRetry); - } catch (Exception e) { - callbackResult.setException(e); - } - } + } + }); return callbackResult; } @@ -758,24 +751,19 @@ public void run() { private ApiFuture invokeUserSuccessCallback( final DocumentReference documentReference, final WriteResult result) { final SettableApiFuture callbackResult = SettableApiFuture.create(); - if (successExecutor != null) { - successExecutor.execute( - new Runnable() { - @Override - public void run() { - try { - successListener.onResult(documentReference, result); - callbackResult.set(null); - } catch (Exception e) { - callbackResult.setException(e); - } + successExecutor.execute( + new Runnable() { + @Override + public void run() { + try { + successListener.onResult(documentReference, result); + callbackResult.set(null); + } catch (Exception e) { + callbackResult.setException(e); } - }); - return callbackResult; - } else { - successListener.onResult(documentReference, result); - return ApiFutures.immediateFuture(null); - } + } + }); + return callbackResult; } /** From 8dc465d414828570af09488511d2f105e6b9ddba Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Mon, 14 Dec 2020 15:04:34 -0800 Subject: [PATCH 20/22] update comment since we're not using synchronized lists anymore --- .../java/com/google/cloud/firestore/BulkWriter.java | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java index f655691d9a..7bca559ea3 100644 --- a/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java +++ b/google-cloud-firestore/src/main/java/com/google/cloud/firestore/BulkWriter.java @@ -112,18 +112,10 @@ enum OperationType { /** The maximum number of writes that can be in a single batch. */ private int maxBatchSize = MAX_BATCH_SIZE; - /** - * A queue of batches to be written. Use a synchronized list to avoid multi-thread concurrent - * modification errors (as this list is modified from both the user thread and the network - * thread). - */ + /** A queue of batches to be written. */ private final List batchQueue = new ArrayList<>(); - /** - * A queue of batches to be retried. Use a synchronized list to avoid multi-thread concurrent - * modification errors (as this list is modified from both the user thread and the network - * thread). - */ + /** A queue of batches containing operations that need to be retried. */ private final List retryBatchQueue = new ArrayList<>(); /** From bf039040adab10d4f02d25bb563227f95aebb8e2 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Thu, 17 Dec 2020 15:27:47 -0800 Subject: [PATCH 21/22] resolve ben comments, add coverage --- .../clirr-ignored-differences.xml | 5 - .../google/cloud/firestore/BulkWriter.java | 77 ++++++++---- .../cloud/firestore/BulkWriterException.java | 40 +++---- .../google/cloud/firestore/UpdateBuilder.java | 4 +- .../cloud/firestore/BulkWriterTest.java | 112 +++++++++++++++--- .../cloud/firestore/WriteBatchTest.java | 20 ++-- 6 files changed, 179 insertions(+), 79 deletions(-) diff --git a/google-cloud-firestore/clirr-ignored-differences.xml b/google-cloud-firestore/clirr-ignored-differences.xml index 2fdfbe7d91..4a4d5d4c0f 100644 --- a/google-cloud-firestore/clirr-ignored-differences.xml +++ b/google-cloud-firestore/clirr-ignored-differences.xml @@ -241,11 +241,6 @@ com/google/cloud/firestore/UpdateBuilder writes - - 7002 - com/google/cloud/firestore/UpdateBuilder - int getMutationsSize() -