From c9f7193d03f7cccc65d22e61a9976f1f5aeaef1e Mon Sep 17 00:00:00 2001 From: Sanskar Jhajharia Date: Tue, 21 May 2024 22:18:17 +0530 Subject: [PATCH 1/2] Code cleanup --- .../apache/kafka/clients/NodeApiVersions.java | 7 +- .../apache/kafka/clients/admin/NewTopic.java | 14 +- .../consumer/internals/MemberState.java | 7 +- .../producer/internals/RecordAccumulator.java | 13 +- .../requests/DescribeLogDirsResponse.java | 28 +- .../kafka/common/requests/FetchRequest.java | 26 +- .../common/requests/LeaderAndIsrRequest.java | 18 +- .../kafka/common/requests/ProduceRequest.java | 14 +- .../common/requests/StopReplicaRequest.java | 16 +- .../requests/UpdateMetadataRequest.java | 20 +- ...shingHttpsJwksVerificationKeyResolver.java | 18 +- .../unsecured/OAuthBearerValidationUtils.java | 2 +- .../security/ssl/DefaultSslEngineFactory.java | 5 +- .../security/ssl/SslPrincipalMapper.java | 2 +- .../kafka/common/utils/PureJavaCrc32C.java | 4 +- .../org/apache/kafka/common/utils/Utils.java | 1 + .../apache/kafka/clients/ClientUtilsTest.java | 2 +- .../apache/kafka/clients/MetadataTest.java | 10 +- .../kafka/clients/admin/ConfigTest.java | 5 +- ...escribeUserScramCredentialsResultTest.java | 13 +- .../clients/admin/KafkaAdminClientTest.java | 165 +++--- .../kafka/clients/admin/MockAdminClient.java | 2 +- .../internals/CoordinatorStrategyTest.java | 2 +- .../internals/CommitRequestManagerTest.java | 29 +- .../internals/ConsumerCoordinatorTest.java | 26 +- .../internals/ConsumerInterceptorsTest.java | 2 +- .../clients/producer/KafkaProducerTest.java | 2 +- .../internals/RecordAccumulatorTest.java | 14 +- .../internals/TransactionManagerTest.java | 4 +- .../kafka/common/config/ConfigDefTest.java | 26 +- .../provider/DirectoryConfigProviderTest.java | 4 +- .../kafka/common/metrics/JmxReporterTest.java | 6 +- .../kafka/common/metrics/MetricsTest.java | 10 +- .../kafka/common/metrics/SensorTest.java | 3 +- .../common/metrics/stats/FrequenciesTest.java | 3 +- .../kafka/common/network/SelectorTest.java | 2 +- .../common/network/SslTransportLayerTest.java | 9 +- .../network/SslTransportTls12Tls13Test.java | 2 +- .../common/network/Tls12SelectorTest.java | 4 +- .../common/network/Tls13SelectorTest.java | 3 +- .../common/protocol/MessageUtilTest.java | 2 +- .../record/MemoryRecordsBuilderTest.java | 2 +- .../AlterReplicaLogDirsRequestTest.java | 6 +- .../AlterReplicaLogDirsResponseTest.java | 3 +- .../requests/DeleteAclsResponseTest.java | 2 +- .../requests/LeaderAndIsrRequestTest.java | 4 +- .../requests/ListOffsetsRequestTest.java | 20 +- .../common/requests/RequestResponseTest.java | 32 +- .../requests/UpdateMetadataRequestTest.java | 14 +- .../ClientAuthenticationFailureTest.java | 5 +- .../SaslAuthenticatorFailureDelayTest.java | 5 +- .../authenticator/SaslAuthenticatorTest.java | 147 +++-- .../security/kerberos/KerberosNameTest.java | 25 +- ...arerUnsecuredLoginCallbackHandlerTest.java | 2 +- ...UnsecuredValidatorCallbackHandlerTest.java | 3 +- .../OAuthBearerValidationUtilsTest.java | 6 +- .../common/utils/FlattenedIteratorTest.java | 25 +- .../kafka/connect/data/SchemaBuilder.java | 2 +- .../org/apache/kafka/connect/data/Values.java | 4 +- .../ConnectorReconfigurationTest.java | 2 +- .../connect/data/SchemaProjectorTest.java | 2 +- .../connect/util/ConnectorUtilsTest.java | 24 +- .../connect/file/FileStreamSinkTaskTest.java | 5 +- ...eStreamSourceConnectorIntegrationTest.java | 4 +- .../connect/mirror/rest/MirrorRestServer.java | 3 +- .../clients/admin/FakeLocalMetadataStore.java | 28 +- ...hCustomForwardingAdminIntegrationTest.java | 6 +- .../connect/runtime/ConnectorConfig.java | 2 +- .../runtime/rest/ConnectRestServer.java | 3 +- .../runtime/rest/entities/ConfigInfos.java | 20 +- .../runtime/rest/entities/ConfigKeyInfo.java | 48 +- .../ConnectorTopicsIntegrationTest.java | 4 +- .../kafka/connect/integration/TaskHandle.java | 2 +- .../runtime/ErrorHandlingTaskTest.java | 6 +- .../connect/runtime/MockConnectMetrics.java | 2 +- .../connect/runtime/WorkerSinkTaskTest.java | 18 +- .../kafka/connect/runtime/WorkerTest.java | 6 +- .../ConnectProtocolCompatibilityTest.java | 16 +- .../distributed/DistributedHerderTest.java | 80 +-- .../IncrementalCooperativeAssignorTest.java | 6 +- .../WorkerCoordinatorIncrementalTest.java | 24 +- .../runtime/rest/ConnectRestServerTest.java | 2 +- .../runtime/rest/RestServerConfigTest.java | 3 +- .../runtime/rest/entities/PluginInfoTest.java | 12 +- .../storage/FileOffsetBackingStoreTest.java | 10 +- .../KafkaConfigBackingStoreMockitoTest.java | 8 +- .../storage/KafkaConfigBackingStoreTest.java | 8 +- .../storage/KafkaOffsetBackingStoreTest.java | 2 +- .../util/ConvertingFutureCallbackTest.java | 6 +- .../apache/kafka/connect/util/TestFuture.java | 2 +- ...ribeTopicPartitionsRequestHandlerTest.java | 49 +- .../logger/RuntimeLoggerManagerTest.java | 44 +- .../BootstrapControllersIntegrationTest.java | 15 +- .../kafka/testkit/KafkaClusterTestKit.java | 8 +- .../src/main/java/kafka/examples/Utils.java | 2 +- .../group/GroupMetadataManagerTest.java | 40 +- .../group/MetadataImageBuilder.java | 2 +- .../group/OffsetMetadataManagerTest.java | 44 +- .../consumer/ConsumerGroupMemberTest.java | 14 +- .../group/runtime/CoordinatorRuntimeTest.java | 20 +- .../runtime/InMemoryPartitionWriter.java | 6 +- .../log4jappender/KafkaLog4jAppenderTest.java | 2 +- .../controller/ClusterControlManager.java | 2 +- .../controller/PartitionChangeBuilder.java | 5 +- .../kafka/controller/QuorumController.java | 3 +- .../controller/ReplicationControlManager.java | 4 +- .../errors/ControllerExceptions.java | 6 +- .../errors/EventHandlerExceptionInfo.java | 3 +- .../org/apache/kafka/image/AclsDelta.java | 2 +- .../kafka/metadata/BrokerRegistration.java | 38 +- .../metadata/ControllerRegistration.java | 28 +- .../metadata/FinalizedControllerFeatures.java | 10 +- .../kafka/metadata/PartitionRegistration.java | 26 +- .../controller/AclControlManagerTest.java | 6 +- .../ClientQuotaControlManagerTest.java | 36 +- .../controller/ClusterControlManagerTest.java | 28 +- .../ConfigurationControlManagerTest.java | 27 +- .../controller/FeatureControlManagerTest.java | 14 +- .../controller/OffsetControlManagerTest.java | 16 +- .../PartitionChangeBuilderTest.java | 40 +- .../PartitionReassignmentReplicasTest.java | 2 +- .../PartitionReassignmentRevertTest.java | 7 +- .../QuorumControllerIntegrationTestUtils.java | 11 +- .../controller/QuorumControllerTest.java | 134 ++--- .../ReplicationControlManagerTest.java | 548 +++++++++--------- .../kafka/image/ClientQuotasImageTest.java | 8 +- .../apache/kafka/image/ClusterImageTest.java | 21 +- .../kafka/image/ImageDowngradeTest.java | 10 +- .../apache/kafka/image/TopicsImageTest.java | 8 +- .../image/loader/MetadataLoaderTest.java | 124 ++-- .../node/ClusterImageBrokersNodeTest.java | 2 +- .../node/ClusterImageControllersNodeTest.java | 3 +- .../publisher/SnapshotGeneratorTest.java | 5 +- .../image/writer/RaftSnapshotWriterTest.java | 3 +- .../metadata/BrokerRegistrationTest.java | 14 +- .../metadata/DelegationTokenDataTest.java | 3 +- .../kafka/metadata/KafkaConfigSchemaTest.java | 7 +- .../kafka/metadata/ListenerInfoTest.java | 25 +- .../metadata/PartitionRegistrationTest.java | 4 +- .../kafka/metadata/RecordTestUtils.java | 12 +- .../apache/kafka/metadata/ReplicasTest.java | 8 +- .../StandardAclRecordIteratorTest.java | 5 +- .../bootstrap/BootstrapMetadataTest.java | 6 +- .../migration/KRaftMigrationDriverTest.java | 6 +- .../placement/StripedReplicaPlacerTest.java | 6 +- .../placement/TopicAssignmentTest.java | 21 +- .../MetaPropertiesEnsembleTest.java | 4 +- .../apache/kafka/metalog/LocalLogManager.java | 3 +- .../org/apache/kafka/raft/ElectionState.java | 18 +- .../kafka/raft/internals/LogHistory.java | 6 +- .../kafka/raft/internals/ReplicaKey.java | 4 +- .../apache/kafka/raft/internals/VoterSet.java | 8 +- .../raft/KafkaRaftClientSnapshotTest.java | 25 +- .../kafka/raft/KafkaRaftClientTest.java | 20 +- .../kafka/raft/RaftClientTestContext.java | 2 +- .../raft/internals/BatchBuilderTest.java | 5 +- .../kafka/raft/internals/VoterSetTest.java | 2 +- .../config/ServerTopicConfigSynonyms.java | 13 +- .../kafka/server/mutable/BoundedListTest.java | 7 +- .../network/EndpointReadyFuturesTest.java | 3 +- .../server/util/CommandLineUtilsTest.java | 7 +- .../kafka/timeline/TimelineHashMapTest.java | 3 +- .../kafka/timeline/TimelineHashSetTest.java | 5 +- .../shell/command/CatCommandHandler.java | 3 +- .../kafka/shell/command/CdCommandHandler.java | 3 +- .../command/ErroneousCommandHandler.java | 3 +- .../shell/command/ExitCommandHandler.java | 3 +- .../shell/command/FindCommandHandler.java | 3 +- .../shell/command/HelpCommandHandler.java | 3 +- .../kafka/shell/command/LsCommandHandler.java | 6 +- .../shell/command/ManCommandHandler.java | 3 +- .../shell/command/NoOpCommandHandler.java | 3 +- .../shell/command/PwdCommandHandler.java | 3 +- .../shell/command/TreeCommandHandler.java | 3 +- .../apache/kafka/shell/glob/GlobVisitor.java | 3 +- .../kafka/shell/command/CommandTest.java | 18 +- .../kafka/shell/glob/GlobVisitorTest.java | 15 +- .../storage/LocalTieredStorageEvent.java | 5 +- .../storage/LocalTieredStorageTest.java | 4 +- .../storage/RemoteLogMetadataManagerTest.java | 2 +- .../storage/utils/RecordsKeyValueMatcher.java | 11 +- .../internals/InternalTopicConfig.java | 3 +- .../streams/processor/internals/SinkNode.java | 8 +- .../internals/StreamsMetadataState.java | 10 +- .../apache/kafka/streams/state/HostInfo.java | 2 +- .../EosV2UpgradeIntegrationTest.java | 2 +- .../integration/IQv2IntegrationTest.java | 2 +- ...nGracePeriodDurabilityIntegrationTest.java | 7 +- .../KTableEfficientRangeQueryTest.java | 10 +- ...rJoinCustomPartitionerIntegrationTest.java | 5 +- .../RegexSourceIntegrationTest.java | 4 +- .../SelfJoinUpgradeIntegrationTest.java | 16 +- .../StreamStreamJoinIntegrationTest.java | 260 ++++----- ...caughtExceptionHandlerIntegrationTest.java | 10 +- .../TaskMetadataIntegrationTest.java | 4 +- .../kafka/streams/kstream/PrintedTest.java | 2 +- .../kstream/internals/KStreamImplTest.java | 10 +- .../kstream/internals/KStreamSplitTest.java | 11 +- .../internals/UnlimitedWindowTest.java | 2 +- .../WindowedStreamPartitionerTest.java | 10 +- .../internals/DefaultStateUpdaterTest.java | 2 +- .../internals/InternalTopicManagerTest.java | 9 +- .../internals/MockChangelogReader.java | 2 +- .../internals/ProcessorStateManagerTest.java | 2 +- .../internals/StateManagerUtilTest.java | 2 +- .../assignment/AssignmentTestUtils.java | 68 +-- .../TaskAssignorConvergenceTest.java | 18 +- ...lSchemaRocksDBSegmentedBytesStoreTest.java | 24 +- .../AbstractSessionBytesStoreTest.java | 4 +- ...eOrderedWindowSegmentedBytesStoreTest.java | 4 +- .../internals/SessionStoreFetchTest.java | 12 +- .../state/internals/WindowStoreFetchTest.java | 12 +- .../metrics/RocksDBBlockCacheMetricsTest.java | 8 +- .../kafka/streams/tests/EosTestDriver.java | 6 +- .../streams/tests/StaticMemberTestClient.java | 10 +- .../StreamsBrokerDownResilienceTest.java | 4 +- .../tests/StreamsNamedRepartitionTest.java | 4 +- .../streams/tests/StreamsOptimizedTest.java | 10 +- .../tests/StreamsStandByReplicaTest.java | 10 +- ...eamsUpgradeToCooperativeRebalanceTest.java | 6 +- .../apache/kafka/test/MockClientSupplier.java | 2 +- .../test/MockInternalNewProcessorContext.java | 2 +- .../test/MockInternalProcessorContext.java | 2 +- .../kafka/test/MockRestoreConsumer.java | 2 +- ...eamsUpgradeToCooperativeRebalanceTest.java | 4 +- ...eamsUpgradeToCooperativeRebalanceTest.java | 4 +- ...eamsUpgradeToCooperativeRebalanceTest.java | 4 +- ...eamsUpgradeToCooperativeRebalanceTest.java | 4 +- ...eamsUpgradeToCooperativeRebalanceTest.java | 8 +- ...eamsUpgradeToCooperativeRebalanceTest.java | 8 +- ...eamsUpgradeToCooperativeRebalanceTest.java | 8 +- ...eamsUpgradeToCooperativeRebalanceTest.java | 8 +- ...eamsUpgradeToCooperativeRebalanceTest.java | 8 +- ...eamsUpgradeToCooperativeRebalanceTest.java | 2 +- .../kafka/tools/ConsumerPerformance.java | 4 +- .../kafka/tools/LeaderElectionCommand.java | 18 +- .../org/apache/kafka/tools/ToolsUtils.java | 4 +- .../org/apache/kafka/tools/TopicCommand.java | 10 +- .../consumer/group/ConsumerGroupCommand.java | 2 +- .../kafka/tools/GetOffsetShellTest.java | 3 +- .../tools/TopicCommandIntegrationTest.java | 2 +- .../apache/kafka/tools/TopicCommandTest.java | 6 +- .../group/ConsumerGroupCommandTest.java | 3 +- .../ReassignPartitionsIntegrationTest.java | 2 +- .../reassign/ReassignPartitionsUnitTest.java | 32 +- .../kafka/trogdor/rest/TasksRequest.java | 6 +- .../trogdor/common/StringExpanderTest.java | 4 +- 247 files changed, 1830 insertions(+), 1915 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java b/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java index 838718652f37c..12a7d019921b5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java +++ b/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java @@ -174,10 +174,9 @@ public String toString(boolean lineBreaks) { // which may happen when the remote is too old. for (ApiKeys apiKey : ApiKeys.clientApis()) { if (!apiKeysText.containsKey(apiKey.id)) { - StringBuilder bld = new StringBuilder(); - bld.append(apiKey.name).append("("). - append(apiKey.id).append("): ").append("UNSUPPORTED"); - apiKeysText.put(apiKey.id, bld.toString()); + String bld = apiKey.name + "(" + + apiKey.id + "): " + "UNSUPPORTED"; + apiKeysText.put(apiKey.id, bld); } } String separator = lineBreaks ? ",\n\t" : ", "; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java b/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java index 2f335d02f2f2b..0151e6f61793f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java @@ -147,14 +147,12 @@ CreatableTopic convertToCreatableTopic() { @Override public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("(name=").append(name). - append(", numPartitions=").append(numPartitions.map(String::valueOf).orElse("default")). - append(", replicationFactor=").append(replicationFactor.map(String::valueOf).orElse("default")). - append(", replicasAssignments=").append(replicasAssignments). - append(", configs=").append(configs). - append(")"); - return bld.toString(); + return "(name=" + name + + ", numPartitions=" + numPartitions.map(String::valueOf).orElse("default") + + ", replicationFactor=" + replicationFactor.map(String::valueOf).orElse("default") + + ", replicasAssignments=" + replicasAssignments + + ", configs=" + configs + + ")"; } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberState.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberState.java index 9f0c7d947ea7e..a62c634ba437e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberState.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberState.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; public enum MemberState { @@ -120,7 +121,7 @@ public enum MemberState { RECONCILING.previousValidStates = Arrays.asList(STABLE, JOINING, ACKNOWLEDGING, RECONCILING); - ACKNOWLEDGING.previousValidStates = Arrays.asList(RECONCILING); + ACKNOWLEDGING.previousValidStates = Collections.singletonList(RECONCILING); FATAL.previousValidStates = Arrays.asList(JOINING, STABLE, RECONCILING, ACKNOWLEDGING, PREPARE_LEAVING, LEAVING, UNSUBSCRIBED); @@ -133,11 +134,11 @@ public enum MemberState { PREPARE_LEAVING.previousValidStates = Arrays.asList(JOINING, STABLE, RECONCILING, ACKNOWLEDGING, UNSUBSCRIBED); - LEAVING.previousValidStates = Arrays.asList(PREPARE_LEAVING); + LEAVING.previousValidStates = Collections.singletonList(PREPARE_LEAVING); UNSUBSCRIBED.previousValidStates = Arrays.asList(PREPARE_LEAVING, LEAVING, FENCED); - STALE.previousValidStates = Arrays.asList(LEAVING); + STALE.previousValidStates = Collections.singletonList(LEAVING); } private List previousValidStates; diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java index 013ad32dc7df3..0817927e1c545 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java @@ -855,13 +855,12 @@ private boolean shouldStopDrainBatchesForPartition(ProducerBatch first, TopicPar } int firstInFlightSequence = transactionManager.firstInFlightSequence(first.topicPartition); - if (firstInFlightSequence != RecordBatch.NO_SEQUENCE && first.hasSequence() - && first.baseSequence() != firstInFlightSequence) - // If the queued batch already has an assigned sequence, then it is being retried. - // In this case, we wait until the next immediate batch is ready and drain that. - // We only move on when the next in line batch is complete (either successfully or due to - // a fatal broker error). This effectively reduces our in flight request count to 1. - return true; + // If the queued batch already has an assigned sequence, then it is being retried. + // In this case, we wait until the next immediate batch is ready and drain that. + // We only move on when the next in line batch is complete (either successfully or due to + // a fatal broker error). This effectively reduces our in flight request count to 1. + return firstInFlightSequence != RecordBatch.NO_SEQUENCE && first.hasSequence() + && first.baseSequence() != firstInFlightSequence; } return false; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java index cbf3054217363..3d06cff712c18 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java @@ -93,13 +93,11 @@ public LogDirInfo(Errors error, Map replicaInfos) { @Override public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("(error=") - .append(error) - .append(", replicas=") - .append(replicaInfos) - .append(")"); - return builder.toString(); + return "(error=" + + error + + ", replicas=" + + replicaInfos + + ")"; } } @@ -126,15 +124,13 @@ public ReplicaInfo(long size, long offsetLag, boolean isFuture) { @Override public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("(size=") - .append(size) - .append(", offsetLag=") - .append(offsetLag) - .append(", isFuture=") - .append(isFuture) - .append(")"); - return builder.toString(); + return "(size=" + + size + + ", offsetLag=" + + offsetLag + + ", isFuture=" + + isFuture + + ")"; } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java index 2065a15d94259..1082200ec393c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java @@ -316,20 +316,18 @@ public FetchRequest build(short version) { @Override public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("(type=FetchRequest"). - append(", replicaId=").append(replicaId). - append(", maxWait=").append(maxWait). - append(", minBytes=").append(minBytes). - append(", maxBytes=").append(maxBytes). - append(", fetchData=").append(toFetch). - append(", isolationLevel=").append(isolationLevel). - append(", removed=").append(removed.stream().map(TopicIdPartition::toString).collect(Collectors.joining(", "))). - append(", replaced=").append(replaced.stream().map(TopicIdPartition::toString).collect(Collectors.joining(", "))). - append(", metadata=").append(metadata). - append(", rackId=").append(rackId). - append(")"); - return bld.toString(); + return "(type=FetchRequest" + + ", replicaId=" + replicaId + + ", maxWait=" + maxWait + + ", minBytes=" + minBytes + + ", maxBytes=" + maxBytes + + ", fetchData=" + toFetch + + ", isolationLevel=" + isolationLevel + + ", removed=" + removed.stream().map(TopicIdPartition::toString).collect(Collectors.joining(", ")) + + ", replaced=" + replaced.stream().map(TopicIdPartition::toString).collect(Collectors.joining(", ")) + + ", metadata=" + metadata + + ", rackId=" + rackId + + ")"; } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java index 8caddb0054169..9fc83cfd847a8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java @@ -112,16 +112,14 @@ private static Map groupByTopic(List d.partitionData().stream()).collect(Collectors.toList())) - .append("), transactionalId='").append(data.transactionalId() != null ? data.transactionalId() : "") - .append("'"); - return bld.toString(); + return "(type=ProduceRequest" + + ", acks=" + data.acks() + + ", timeout=" + data.timeoutMs() + + ", partitionRecords=(" + data.topicData().stream().flatMap(d -> d.partitionData().stream()).collect(Collectors.toList()) + + "), transactionalId='" + (data.transactionalId() != null ? data.transactionalId() : "") + + "'"; } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java index 940a16f0a8589..6245cb27c6c47 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java @@ -94,15 +94,13 @@ public StopReplicaRequest build(short version) { @Override public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("(type=StopReplicaRequest"). - append(", controllerId=").append(controllerId). - append(", controllerEpoch=").append(controllerEpoch). - append(", brokerEpoch=").append(brokerEpoch). - append(", deletePartitions=").append(deletePartitions). - append(", topicStates=").append(topicStates.stream().map(StopReplicaTopicState::toString).collect(Collectors.joining(","))). - append(")"); - return bld.toString(); + return "(type=StopReplicaRequest" + + ", controllerId=" + controllerId + + ", controllerEpoch=" + controllerEpoch + + ", brokerEpoch=" + brokerEpoch + + ", deletePartitions=" + deletePartitions + + ", topicStates=" + topicStates.stream().map(StopReplicaTopicState::toString).collect(Collectors.joining(",")) + + ")"; } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java index b846fb7b0f9ed..15a4dfff1a6a0 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java @@ -133,17 +133,15 @@ private static Map groupByTopic(Map nestingContex if (refreshingHttpsJwks.maybeExpediteRefresh(keyId)) log.debug("Refreshing JWKs from {} as no suitable verification key for JWS w/ header {} was found in {}", refreshingHttpsJwks.getLocation(), jws.getHeaders().getFullHeaderAsJsonString(), jwks); - StringBuilder sb = new StringBuilder(); - sb.append("Unable to find a suitable verification key for JWS w/ header ").append(jws.getHeaders().getFullHeaderAsJsonString()); - sb.append(" from JWKs ").append(jwks).append(" obtained from ").append( - refreshingHttpsJwks.getLocation()); - throw new UnresolvableKeyException(sb.toString()); + String sb = "Unable to find a suitable verification key for JWS w/ header " + jws.getHeaders().getFullHeaderAsJsonString() + + " from JWKs " + jwks + " obtained from " + + refreshingHttpsJwks.getLocation(); + throw new UnresolvableKeyException(sb); } catch (JoseException | IOException e) { - StringBuilder sb = new StringBuilder(); - sb.append("Unable to find a suitable verification key for JWS w/ header ").append(jws.getHeaders().getFullHeaderAsJsonString()); - sb.append(" due to an unexpected exception (").append(e).append(") while obtaining or using keys from JWKS endpoint at ").append( - refreshingHttpsJwks.getLocation()); - throw new UnresolvableKeyException(sb.toString(), e); + String sb = "Unable to find a suitable verification key for JWS w/ header " + jws.getHeaders().getFullHeaderAsJsonString() + + " due to an unexpected exception (" + e + ") while obtaining or using keys from JWKS endpoint at " + + refreshingHttpsJwks.getLocation(); + throw new UnresolvableKeyException(sb, e); } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java index f12a482f14901..4b9f7bb3d1601 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtils.java @@ -176,7 +176,7 @@ public static OAuthBearerValidationResult validateScope(OAuthBearerToken token, if (!tokenScope.contains(requiredScopeElement)) return OAuthBearerValidationResult.newFailure(String.format( "The provided scope (%s) was missing a required scope (%s). All required scope elements: %s", - String.valueOf(tokenScope), requiredScopeElement, requiredScope), + tokenScope, requiredScopeElement, requiredScope), requiredScope.toString(), null); } return OAuthBearerValidationResult.newSuccess(); diff --git a/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java b/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java index 3ca8ca6fcc027..3bdf9b70f2a0a 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java @@ -109,10 +109,7 @@ public boolean shouldBeRebuilt(Map nextConfigs) { if (truststore != null && truststore.modified()) { return true; } - if (keystore != null && keystore.modified()) { - return true; - } - return false; + return keystore != null && keystore.modified(); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/ssl/SslPrincipalMapper.java b/clients/src/main/java/org/apache/kafka/common/security/ssl/SslPrincipalMapper.java index 0fb83281bf864..bd8c50a0a8b7f 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/ssl/SslPrincipalMapper.java +++ b/clients/src/main/java/org/apache/kafka/common/security/ssl/SslPrincipalMapper.java @@ -178,7 +178,7 @@ private String escapeLiteralBackReferences(final String unescaped, final int num final StringBuilder sb = new StringBuilder(value.length() + 1); final int groupStart = backRefMatcher.start(1); - sb.append(value.substring(0, groupStart - 1)); + sb.append(value, 0, groupStart - 1); sb.append("\\"); sb.append(value.substring(groupStart - 1)); value = sb.toString(); diff --git a/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java b/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java index e78b83ee91c83..95a0b30cb90e6 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java @@ -111,8 +111,8 @@ final public void update(int b) { // java -cp build/test/classes/:build/classes/ \ // org.apache.hadoop.util.TestPureJavaCrc32\$Table 82F63B78 - private static final int T8_0_START = 0 * 256; - private static final int T8_1_START = 1 * 256; + private static final int T8_0_START = 0; + private static final int T8_1_START = 256; private static final int T8_2_START = 2 * 256; private static final int T8_3_START = 3 * 256; private static final int T8_4_START = 4 * 256; diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java index ce67fbdb0c73e..df7f5a50c1dce 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java @@ -371,6 +371,7 @@ public static boolean isEqualConstantTime(char[] first, char[] second) { int j = i < second.length ? i : 0; if (first[i] != second[j]) { matches = false; + break; } } return matches; diff --git a/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java b/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java index 80e5ddfc4d830..536a27f576fd0 100644 --- a/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java @@ -57,7 +57,7 @@ public void testParseAndValidateAddressesWithReverseLookup() { // With lookup of example.com, either one or two addresses are expected depending on // whether ipv4 and ipv6 are enabled - List validatedAddresses = checkWithLookup(asList("example.com:10000")); + List validatedAddresses = checkWithLookup(Collections.singletonList("example.com:10000")); assertFalse(validatedAddresses.isEmpty(), "Unexpected addresses " + validatedAddresses); List validatedHostNames = validatedAddresses.stream().map(InetSocketAddress::getHostName) .collect(Collectors.toList()); diff --git a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java index 0b2733207ce42..cec1f10263077 100644 --- a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java @@ -1141,7 +1141,7 @@ public void testTopicMetadataOnUpdatePartitionLeadership() { Optional.of(2), Optional.of(3) )), - Arrays.asList(node1) + Collections.singletonList(node1) ); assertEquals(2, metadata.fetch().partitionsForTopic(topic).size()); assertEquals(1, metadata.fetch().partition(tp0).leader().id()); @@ -1161,20 +1161,20 @@ public void testUpdatePartitionLeadership() { // topic2 has 1 partition: tp21 String topic1 = "topic1"; TopicPartition tp11 = new TopicPartition(topic1, 0); - PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp11, Optional.of(1), Optional.of(100), Arrays.asList(1, 2), Arrays.asList(1, 2), Arrays.asList(3)); + PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp11, Optional.of(1), Optional.of(100), Arrays.asList(1, 2), Arrays.asList(1, 2), Collections.singletonList(3)); Uuid topic1Id = Uuid.randomUuid(); TopicPartition tp12 = new TopicPartition(topic1, 1); - PartitionMetadata part12Metadata = new PartitionMetadata(Errors.NONE, tp12, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Arrays.asList(1)); + PartitionMetadata part12Metadata = new PartitionMetadata(Errors.NONE, tp12, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Collections.singletonList(1)); String topic2 = "topic2"; TopicPartition tp21 = new TopicPartition(topic2, 0); - PartitionMetadata part2Metadata = new PartitionMetadata(Errors.NONE, tp21, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Arrays.asList(1)); + PartitionMetadata part2Metadata = new PartitionMetadata(Errors.NONE, tp21, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Collections.singletonList(1)); Uuid topic2Id = Uuid.randomUuid(); Set internalTopics = Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME); TopicPartition internalPart = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0); Uuid internalTopicId = Uuid.randomUuid(); - PartitionMetadata internalTopicMetadata = new PartitionMetadata(Errors.NONE, internalPart, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Arrays.asList(1)); + PartitionMetadata internalTopicMetadata = new PartitionMetadata(Errors.NONE, internalPart, Optional.of(2), Optional.of(200), Arrays.asList(2, 3), Arrays.asList(2, 3), Collections.singletonList(1)); Map topicIds = new HashMap<>(); topicIds.put(topic1, topic1Id); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java index 59d1150ac3ba8..7d70d58a71ac2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java @@ -21,6 +21,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.List; import static java.util.Arrays.asList; @@ -61,7 +62,7 @@ public void shouldGetAllEntries() { public void shouldImplementEqualsProperly() { assertEquals(config, config); assertEquals(config, new Config(config.entries())); - assertNotEquals(new Config(asList(E1)), config); + assertNotEquals(new Config(Collections.singletonList(E1)), config); assertNotEquals(config, "this"); } @@ -69,7 +70,7 @@ public void shouldImplementEqualsProperly() { public void shouldImplementHashCodeProperly() { assertEquals(config.hashCode(), config.hashCode()); assertEquals(config.hashCode(), new Config(config.entries()).hashCode()); - assertNotEquals(new Config(asList(E1)).hashCode(), config.hashCode()); + assertNotEquals(new Config(Collections.singletonList(E1)).hashCode(), config.hashCode()); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResultTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResultTest.java index 9b5e98a005f06..13119e9f2cf5e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResultTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResultTest.java @@ -23,6 +23,7 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; +import java.util.Collections; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -64,7 +65,7 @@ public void testUserLevelErrors() throws Exception { int iterations = 4096; dataFuture.complete(new DescribeUserScramCredentialsResponseData().setErrorCode(Errors.NONE.code()).setResults(Arrays.asList( new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult().setUser(goodUser).setCredentialInfos( - Arrays.asList(new DescribeUserScramCredentialsResponseData.CredentialInfo().setMechanism(scramSha256.type()).setIterations(iterations))), + Collections.singletonList(new DescribeUserScramCredentialsResponseData.CredentialInfo().setMechanism(scramSha256.type()).setIterations(iterations))), new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult().setUser(unknownUser).setErrorCode(Errors.RESOURCE_NOT_FOUND.code()), new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult().setUser(failedUser).setErrorCode(Errors.DUPLICATE_RESOURCE.code())))); DescribeUserScramCredentialsResult results = new DescribeUserScramCredentialsResult(dataFuture); @@ -76,7 +77,7 @@ public void testUserLevelErrors() throws Exception { } assertEquals(Arrays.asList(goodUser, failedUser), results.users().get(), "Expected 2 users with credentials"); UserScramCredentialsDescription goodUserDescription = results.description(goodUser).get(); - assertEquals(new UserScramCredentialsDescription(goodUser, Arrays.asList(new ScramCredentialInfo(scramSha256, iterations))), goodUserDescription); + assertEquals(new UserScramCredentialsDescription(goodUser, Collections.singletonList(new ScramCredentialInfo(scramSha256, iterations))), goodUserDescription); try { results.description(failedUser).get(); fail("expected description(failedUser) to fail when there is a user-level error"); @@ -98,15 +99,15 @@ public void testSuccessfulDescription() throws Exception { KafkaFutureImpl dataFuture = new KafkaFutureImpl<>(); ScramMechanism scramSha256 = ScramMechanism.SCRAM_SHA_256; int iterations = 4096; - dataFuture.complete(new DescribeUserScramCredentialsResponseData().setErrorCode(Errors.NONE.code()).setResults(Arrays.asList( + dataFuture.complete(new DescribeUserScramCredentialsResponseData().setErrorCode(Errors.NONE.code()).setResults(Collections.singletonList( new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult().setUser(goodUser).setCredentialInfos( - Arrays.asList(new DescribeUserScramCredentialsResponseData.CredentialInfo().setMechanism(scramSha256.type()).setIterations(iterations)))))); + Collections.singletonList(new DescribeUserScramCredentialsResponseData.CredentialInfo().setMechanism(scramSha256.type()).setIterations(iterations)))))); DescribeUserScramCredentialsResult results = new DescribeUserScramCredentialsResult(dataFuture); - assertEquals(Arrays.asList(goodUser), results.users().get(), "Expected 1 user with credentials"); + assertEquals(Collections.singletonList(goodUser), results.users().get(), "Expected 1 user with credentials"); Map allResults = results.all().get(); assertEquals(1, allResults.size()); UserScramCredentialsDescription goodUserDescriptionViaAll = allResults.get(goodUser); - assertEquals(new UserScramCredentialsDescription(goodUser, Arrays.asList(new ScramCredentialInfo(scramSha256, iterations))), goodUserDescriptionViaAll); + assertEquals(new UserScramCredentialsDescription(goodUser, Collections.singletonList(new ScramCredentialInfo(scramSha256, iterations))), goodUserDescriptionViaAll); assertEquals(goodUserDescriptionViaAll, results.description(goodUser).get(), "Expected same thing via all() and description()"); try { results.description(unknownUser).get(); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java index ea1305e533bef..eb7aea8ae871d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java @@ -1426,7 +1426,7 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiBasic() { ); DescribeTopicPartitionsResponseData dataFirstPart = new DescribeTopicPartitionsResponseData(); - addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName0, topics.get(topicName0), Arrays.asList(0)); + addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName0, topics.get(topicName0), singletonList(0)); dataFirstPart.setNextCursor(new DescribeTopicPartitionsResponseData.Cursor() .setTopicName(topicName0) .setPartitionIndex(1)); @@ -1435,13 +1435,12 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiBasic() { if (request.topics().size() != 2) return false; if (!request.topics().get(0).name().equals(topicName0)) return false; if (!request.topics().get(1).name().equals(topicName1)) return false; - if (request.cursor() != null) return false; - return true; + return request.cursor() == null; }, new DescribeTopicPartitionsResponse(dataFirstPart)); DescribeTopicPartitionsResponseData dataSecondPart = new DescribeTopicPartitionsResponseData(); - addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName0, topics.get(topicName0), Arrays.asList(1)); - addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName1, topics.get(topicName1), Arrays.asList(0)); + addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName0, topics.get(topicName0), singletonList(1)); + addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName1, topics.get(topicName1), singletonList(0)); env.kafkaClient().prepareResponse(body -> { DescribeTopicPartitionsRequestData request = (DescribeTopicPartitionsRequestData) body.data(); if (request.topics().size() != 2) return false; @@ -1449,9 +1448,7 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiBasic() { if (!request.topics().get(1).name().equals(topicName1)) return false; DescribeTopicPartitionsRequestData.Cursor cursor = request.cursor(); - if (cursor == null || cursor.topicName() != topicName0 || cursor.partitionIndex() != 1) return false; - - return true; + return cursor != null && cursor.topicName() == topicName0 && cursor.partitionIndex() == 1; }, new DescribeTopicPartitionsResponse(dataSecondPart)); try { DescribeTopicsResult result = env.adminClient().describeTopics( @@ -1493,8 +1490,8 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiEdgeCase() { ); DescribeTopicPartitionsResponseData dataFirstPart = new DescribeTopicPartitionsResponseData(); - addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName0, topics.get(topicName0), Arrays.asList(0)); - addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName1, topics.get(topicName1), Arrays.asList(0)); + addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName0, topics.get(topicName0), singletonList(0)); + addPartitionToDescribeTopicPartitionsResponse(dataFirstPart, topicName1, topics.get(topicName1), singletonList(0)); dataFirstPart.setNextCursor(new DescribeTopicPartitionsResponseData.Cursor() .setTopicName(topicName1) .setPartitionIndex(1)); @@ -1504,13 +1501,12 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiEdgeCase() { if (!request.topics().get(0).name().equals(topicName0)) return false; if (!request.topics().get(1).name().equals(topicName1)) return false; if (!request.topics().get(2).name().equals(topicName2)) return false; - if (request.cursor() != null) return false; - return true; + return request.cursor() == null; }, new DescribeTopicPartitionsResponse(dataFirstPart)); DescribeTopicPartitionsResponseData dataSecondPart = new DescribeTopicPartitionsResponseData(); - addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName1, topics.get(topicName1), Arrays.asList(1)); - addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName2, topics.get(topicName2), Arrays.asList(0)); + addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName1, topics.get(topicName1), singletonList(1)); + addPartitionToDescribeTopicPartitionsResponse(dataSecondPart, topicName2, topics.get(topicName2), singletonList(0)); dataSecondPart.setNextCursor(new DescribeTopicPartitionsResponseData.Cursor() .setTopicName(topicName2) .setPartitionIndex(1)); @@ -1520,19 +1516,17 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiEdgeCase() { if (!request.topics().get(0).name().equals(topicName1)) return false; if (!request.topics().get(1).name().equals(topicName2)) return false; DescribeTopicPartitionsRequestData.Cursor cursor = request.cursor(); - if (cursor == null || !cursor.topicName().equals(topicName1) || cursor.partitionIndex() != 1) return false; - return true; + return cursor != null && cursor.topicName().equals(topicName1) && cursor.partitionIndex() == 1; }, new DescribeTopicPartitionsResponse(dataSecondPart)); DescribeTopicPartitionsResponseData dataThirdPart = new DescribeTopicPartitionsResponseData(); - addPartitionToDescribeTopicPartitionsResponse(dataThirdPart, topicName2, topics.get(topicName2), Arrays.asList(1)); + addPartitionToDescribeTopicPartitionsResponse(dataThirdPart, topicName2, topics.get(topicName2), singletonList(1)); env.kafkaClient().prepareResponse(body -> { DescribeTopicPartitionsRequestData request = (DescribeTopicPartitionsRequestData) body.data(); if (request.topics().size() != 1) return false; if (!request.topics().get(0).name().equals(topicName2)) return false; DescribeTopicPartitionsRequestData.Cursor cursor = request.cursor(); - if (cursor == null || !cursor.topicName().equals(topicName2) || cursor.partitionIndex() != 1) return false; - return true; + return cursor != null && cursor.topicName().equals(topicName2) && cursor.partitionIndex() == 1; }, new DescribeTopicPartitionsResponse(dataThirdPart)); try { DescribeTopicsResult result = env.adminClient().describeTopics( @@ -1561,12 +1555,12 @@ private void addPartitionToDescribeTopicPartitionsResponse( List addingPartitions = new ArrayList<>(); partitions.forEach(partition -> { addingPartitions.add(new DescribeTopicPartitionsResponsePartition() - .setIsrNodes(Arrays.asList(0)) + .setIsrNodes(singletonList(0)) .setErrorCode((short) 0) .setLeaderEpoch(0) .setLeaderId(0) - .setEligibleLeaderReplicas(Arrays.asList(1)) - .setLastKnownElr(Arrays.asList(2)) + .setEligibleLeaderReplicas(singletonList(1)) + .setLastKnownElr(singletonList(2)) .setPartitionIndex(partition) .setReplicaNodes(Arrays.asList(0, 1, 2))); }); @@ -1603,15 +1597,15 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiErrorHandling() { .setTopicId(topics.get(topicName0)) .setName(topicName0) .setIsInternal(false) - .setPartitions(Arrays.asList(new DescribeTopicPartitionsResponsePartition() - .setIsrNodes(Arrays.asList(0)) - .setErrorCode((short) 0) - .setLeaderEpoch(0) - .setLeaderId(0) - .setEligibleLeaderReplicas(Arrays.asList(1)) - .setLastKnownElr(Arrays.asList(2)) - .setPartitionIndex(0) - .setReplicaNodes(Arrays.asList(0, 1, 2)))) + .setPartitions(singletonList(new DescribeTopicPartitionsResponsePartition() + .setIsrNodes(singletonList(0)) + .setErrorCode((short) 0) + .setLeaderEpoch(0) + .setLeaderId(0) + .setEligibleLeaderReplicas(singletonList(1)) + .setLastKnownElr(singletonList(2)) + .setPartitionIndex(0) + .setReplicaNodes(asList(0, 1, 2)))) ); dataFirstPart.topics().add(new DescribeTopicPartitionsResponseTopic() .setErrorCode((short) 29) @@ -1624,8 +1618,7 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiErrorHandling() { if (request.topics().size() != 2) return false; if (!request.topics().get(0).name().equals(topicName0)) return false; if (!request.topics().get(1).name().equals(topicName1)) return false; - if (request.cursor() != null) return false; - return true; + return request.cursor() == null; }, new DescribeTopicPartitionsResponse(dataFirstPart)); DescribeTopicsResult result = env.adminClient().describeTopics( Arrays.asList(topicName1, topicName0), new DescribeTopicsOptions() @@ -1660,7 +1653,7 @@ private void callAdminClientApisAndExpectAnAuthenticationError(AdminClientUnitTe Map counts = new HashMap<>(); counts.put("my_topic", NewPartitions.increaseTo(3)); - counts.put("other_topic", NewPartitions.increaseTo(3, asList(asList(2), asList(3)))); + counts.put("other_topic", NewPartitions.increaseTo(3, asList(singletonList(2), singletonList(3)))); e = assertThrows(ExecutionException.class, () -> env.adminClient().createPartitions(counts).all().get()); assertInstanceOf(AuthenticationException.class, e.getCause(), "Expected an authentication error, but got " + Utils.stackTrace(e)); @@ -1690,9 +1683,9 @@ private void callClientQuotasApisAndExpectAnAuthenticationError(AdminClientUnitT "Expected an authentication error, but got " + Utils.stackTrace(e)); ClientQuotaEntity entity = new ClientQuotaEntity(Collections.singletonMap(ClientQuotaEntity.USER, "user")); - ClientQuotaAlteration alteration = new ClientQuotaAlteration(entity, asList(new ClientQuotaAlteration.Op("consumer_byte_rate", 1000.0))); + ClientQuotaAlteration alteration = new ClientQuotaAlteration(entity, singletonList(new ClientQuotaAlteration.Op("consumer_byte_rate", 1000.0))); e = assertThrows(ExecutionException.class, - () -> env.adminClient().alterClientQuotas(asList(alteration)).all().get()); + () -> env.adminClient().alterClientQuotas(singletonList(alteration)).all().get()); assertInstanceOf(AuthenticationException.class, e.getCause(), "Expected an authentication error, but got " + Utils.stackTrace(e)); @@ -1818,9 +1811,9 @@ public void testDeleteAcls() throws Exception { .setThrottleTimeMs(0) .setFilterResults(asList( new DeleteAclsResponseData.DeleteAclsFilterResult() - .setMatchingAcls(asList(DeleteAclsResponse.matchingAcl(ACL1, ApiError.NONE))), + .setMatchingAcls(singletonList(DeleteAclsResponse.matchingAcl(ACL1, ApiError.NONE))), new DeleteAclsResponseData.DeleteAclsFilterResult() - .setMatchingAcls(asList(DeleteAclsResponse.matchingAcl(ACL2, ApiError.NONE))))), + .setMatchingAcls(singletonList(DeleteAclsResponse.matchingAcl(ACL2, ApiError.NONE))))), ApiKeys.DELETE_ACLS.latestVersion())); results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2)); Collection deleted = results.all().get(); @@ -1894,11 +1887,11 @@ public void testDescribeBrokerConfigs() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse( - new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult() + new DescribeConfigsResponseData().setResults(singletonList(new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(broker0Resource.name()).setResourceType(broker0Resource.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList())))), env.cluster().nodeById(0)); env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse( - new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult() + new DescribeConfigsResponseData().setResults(singletonList(new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(broker1Resource.name()).setResourceType(broker1Resource.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList())))), env.cluster().nodeById(1)); Map> result = env.adminClient().describeConfigs(asList( @@ -1939,9 +1932,9 @@ public void testDescribeConfigsPartialResponse() { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(new DescribeConfigsResponse( - new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult() - .setResourceName(topic.name()).setResourceType(topic.type().id()).setErrorCode(Errors.NONE.code()) - .setConfigs(emptyList()))))); + new DescribeConfigsResponseData().setResults(singletonList(new DescribeConfigsResponseData.DescribeConfigsResult() + .setResourceName(topic.name()).setResourceType(topic.type().id()).setErrorCode(Errors.NONE.code()) + .setConfigs(emptyList()))))); Map> result = env.adminClient().describeConfigs(asList( topic, topic2)).values(); @@ -1964,9 +1957,9 @@ public void testDescribeConfigsUnrequested() throws Exception { new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(unrequested.name()).setResourceType(unrequested.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList()))))); - Map> result = env.adminClient().describeConfigs(asList( + Map> result = env.adminClient().describeConfigs(singletonList( topic)).values(); - assertEquals(new HashSet<>(asList(topic)), result.keySet()); + assertEquals(new HashSet<>(singletonList(topic)), result.keySet()); assertNotNull(result.get(topic).get()); assertNull(result.get(unrequested)); } @@ -2331,10 +2324,10 @@ public void testDescribeReplicaLogDirsUnexpected() throws ExecutionException, In prepareDescribeLogDirsResult(unexpected, broker1log1, broker1Log1PartitionSize, broker1Log1OffsetLag, true)))), env.cluster().nodeById(expected.brokerId())); - DescribeReplicaLogDirsResult result = env.adminClient().describeReplicaLogDirs(asList(expected)); + DescribeReplicaLogDirsResult result = env.adminClient().describeReplicaLogDirs(singletonList(expected)); Map> values = result.values(); - assertEquals(TestUtils.toSet(asList(expected)), values.keySet()); + assertEquals(TestUtils.toSet(singletonList(expected)), values.keySet()); assertNotNull(values.get(expected)); assertEquals(broker1log0, values.get(expected).get().getCurrentReplicaLogDir()); @@ -2360,7 +2353,7 @@ public void testCreatePartitions() throws Exception { Map counts = new HashMap<>(); counts.put("my_topic", NewPartitions.increaseTo(3)); - counts.put("other_topic", NewPartitions.increaseTo(3, asList(asList(2), asList(3)))); + counts.put("other_topic", NewPartitions.increaseTo(3, asList(singletonList(2), singletonList(3)))); CreatePartitionsResult results = env.adminClient().createPartitions(counts); Map> values = results.values(); @@ -3006,12 +2999,12 @@ public void testListConsumerGroupsWithTypes() throws Exception { expectListGroupsRequestWithFilters(singleton(ConsumerGroupState.STABLE.toString()), Collections.emptySet()), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) - .setGroups(Arrays.asList( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable") - .setGroupType(GroupType.CLASSIC.toString())))), + .setGroups(singletonList( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable") + .setGroupType(GroupType.CLASSIC.toString())))), env.cluster().nodeById(0)); final ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inStates(singleton(ConsumerGroupState.STABLE)); @@ -3567,13 +3560,13 @@ public void testDescribeNonConsumerGroups() throws Exception { DescribeGroupsResponseData data = new DescribeGroupsResponseData(); data.groups().add(DescribeGroupsResponse.groupMetadata( - GROUP_ID, - Errors.NONE, - "", - "non-consumer", - "", - asList(), - Collections.emptySet())); + GROUP_ID, + Errors.NONE, + "", + "non-consumer", + "", + emptyList(), + Collections.emptySet())); env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data)); @@ -5379,12 +5372,12 @@ public void testListOffsets() throws Exception { pInfos.add(new PartitionInfo("qux", 0, node0, new Node[]{node0}, new Node[]{node0})); final Cluster cluster = new Cluster( - "mockClusterId", - Arrays.asList(node0), - pInfos, - Collections.emptySet(), - Collections.emptySet(), - node0); + "mockClusterId", + singletonList(node0), + pInfos, + Collections.emptySet(), + Collections.emptySet(), + node0); final TopicPartition tp0 = new TopicPartition("foo", 0); final TopicPartition tp1 = new TopicPartition("bar", 0); @@ -5474,7 +5467,7 @@ public void testListOffsetsRetriableErrors() throws Exception { ListOffsetsTopicResponse t2 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp2, Errors.NONE, -1L, 456L, 654); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(t2)); + .setTopics(singletonList(t2)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1); // metadata refresh because of LEADER_NOT_AVAILABLE @@ -5483,7 +5476,7 @@ public void testListOffsetsRetriableErrors() throws Exception { t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(t0)); + .setTopics(singletonList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0); Map partitions = new HashMap<>(); @@ -5533,7 +5526,7 @@ public void testListOffsetsNonRetriableErrors() throws Exception { ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.TOPIC_AUTHORIZATION_FAILED, -1L, -1L, -1); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(t0)); + .setTopics(singletonList(t0)); env.kafkaClient().prepareResponse(new ListOffsetsResponse(responseData)); Map partitions = new HashMap<>(); @@ -5603,7 +5596,7 @@ public void testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec() throws Ex ListOffsetsTopicResponse topicResponse = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 345L, 543); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(topicResponse)); + .setTopics(singletonList(topicResponse)); env.kafkaClient().prepareResponseFrom( // ensure that no max timestamp requests are retried request -> request instanceof ListOffsetsRequest && ((ListOffsetsRequest) request).topics().stream() @@ -5771,7 +5764,7 @@ public void testListOffsetsNonMaxTimestampDowngradedImmediately() throws Excepti ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 123L, 321); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(t0)); + .setTopics(singletonList(t0)); // listoffsets response from broker 0 env.kafkaClient().prepareResponse( @@ -6096,13 +6089,13 @@ public void testListOffsetsMetadataRetriableErrors() throws Exception { ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(t0)); + .setTopics(singletonList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0); // listoffsets response from broker 1 ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 789L, 987); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(t1)); + .setTopics(singletonList(t1)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1); Map partitions = new HashMap<>(); @@ -6165,13 +6158,13 @@ public void testListOffsetsWithMultiplePartitionsLeaderChange() throws Exception t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(t0)); + .setTopics(singletonList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1); t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -2L, 123L, 456); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(t1)); + .setTopics(singletonList(t1)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node2); Map partitions = new HashMap<>(); @@ -6211,7 +6204,7 @@ public void testListOffsetsWithLeaderChange() throws Exception { ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1L, 345L, 543); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(t0)); + .setTopics(singletonList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0); // updating leader from node0 to node1 and metadata refresh because of NOT_LEADER_OR_FOLLOWER @@ -6225,7 +6218,7 @@ public void testListOffsetsWithLeaderChange() throws Exception { t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -2L, 123L, 456); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(t0)); + .setTopics(singletonList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1); Map partitions = new HashMap<>(); @@ -6335,7 +6328,7 @@ public void testListOffsetsPartialResponse() throws Exception { ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -2L, 123L, 456); ListOffsetsResponseData data = new ListOffsetsResponseData() .setThrottleTimeMs(0) - .setTopics(Arrays.asList(t0)); + .setTopics(singletonList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(data), node0); Map partitions = new HashMap<>(); @@ -6370,7 +6363,7 @@ public void testSuccessfulRetryAfterRequestTimeout() throws Exception { Node node0 = new Node(0, "localhost", 8121); nodes.put(0, node0); Cluster cluster = new Cluster("mockClusterId", nodes.values(), - Arrays.asList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), + singletonList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), nodes.get(0)); @@ -6425,7 +6418,7 @@ private void testApiTimeout(int requestTimeoutMs, Node node0 = new Node(0, "localhost", 8121); nodes.put(0, node0); Cluster cluster = new Cluster("mockClusterId", nodes.values(), - Arrays.asList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), + singletonList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), nodes.get(0)); @@ -6474,7 +6467,7 @@ public void testRequestTimeoutExceedingDefaultApiTimeout() throws Exception { Node node0 = new Node(0, "localhost", 8121); nodes.put(0, node0); Cluster cluster = new Cluster("mockClusterId", nodes.values(), - Arrays.asList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), + singletonList(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), nodes.get(0)); @@ -6532,7 +6525,7 @@ public void testDescribeClientQuotas() throws Exception { env.kafkaClient().prepareResponse(DescribeClientQuotasResponse.fromQuotaEntities(responseData, 0)); - ClientQuotaFilter filter = ClientQuotaFilter.contains(asList(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, value))); + ClientQuotaFilter filter = ClientQuotaFilter.contains(singletonList(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, value))); DescribeClientQuotasResult result = env.adminClient().describeClientQuotas(filter); Map> resultData = result.entities().get(); @@ -6790,7 +6783,7 @@ public void testAlterUserScramCredentialsUnknownMechanism() { ScramMechanism user2ScramMechanism0 = ScramMechanism.SCRAM_SHA_256; AlterUserScramCredentialsResponseData responseData = new AlterUserScramCredentialsResponseData(); - responseData.setResults(Arrays.asList( + responseData.setResults(singletonList( new AlterUserScramCredentialsResponseData.AlterUserScramCredentialsResult().setUser(user2Name))); env.kafkaClient().prepareResponse(new AlterUserScramCredentialsResponse(responseData)); @@ -7167,7 +7160,7 @@ public void testRetryDescribeTransactionsAfterNotCoordinatorError() throws Excep env.kafkaClient().prepareResponse( request -> request instanceof FindCoordinatorRequest, new FindCoordinatorResponse(new FindCoordinatorResponseData() - .setCoordinators(Arrays.asList(new FindCoordinatorResponseData.Coordinator() + .setCoordinators(singletonList(new FindCoordinatorResponseData.Coordinator() .setKey(transactionalId) .setErrorCode(Errors.NONE.code()) .setNodeId(coordinator1.id()) @@ -7197,7 +7190,7 @@ public void testRetryDescribeTransactionsAfterNotCoordinatorError() throws Excep env.kafkaClient().prepareResponse( request -> request instanceof FindCoordinatorRequest, new FindCoordinatorResponse(new FindCoordinatorResponseData() - .setCoordinators(Arrays.asList(new FindCoordinatorResponseData.Coordinator() + .setCoordinators(singletonList(new FindCoordinatorResponseData.Coordinator() .setKey(transactionalId) .setErrorCode(Errors.NONE.code()) .setNodeId(coordinator2.id()) diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java b/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java index 6cbc86cb0dd96..f72362715e26e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java @@ -105,7 +105,7 @@ public class MockAdminClient extends AdminClient { private KafkaException listConsumerGroupOffsetsException; - private Map mockMetrics = new HashMap<>(); + private final Map mockMetrics = new HashMap<>(); private final List allTokens = new ArrayList<>(); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java index fc52e9e6717ed..823d4b39b1e9a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java @@ -90,7 +90,7 @@ public void testBuildOldLookupRequestRequiresAtLeastOneKey() { strategy.disableBatch(); assertThrows(IllegalArgumentException.class, () -> strategy.buildRequest( - new HashSet<>(Arrays.asList(CoordinatorKey.byTransactionalId("txnid"))))); + new HashSet<>(Collections.singletonList(CoordinatorKey.byTransactionalId("txnid"))))); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index b1db0297a120b..043cc211da5b4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -52,7 +52,6 @@ import org.mockito.Mockito; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -1336,13 +1335,13 @@ public ClientResponse mockOffsetCommitResponse(String topic, long receivedTimeMs, Errors error) { OffsetCommitResponseData responseData = new OffsetCommitResponseData() - .setTopics(Arrays.asList( - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setName(topic) - .setPartitions(Collections.singletonList( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setErrorCode(error.code()) - .setPartitionIndex(partition))))); + .setTopics(Collections.singletonList( + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setName(topic) + .setPartitions(Collections.singletonList( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setErrorCode(error.code()) + .setPartitionIndex(partition))))); OffsetCommitResponse response = mock(OffsetCommitResponse.class); when(response.data()).thenReturn(responseData); return new ClientResponse( @@ -1362,13 +1361,13 @@ public ClientResponse mockOffsetCommitResponseDisconnected(String topic, int par short apiKeyVersion, NetworkClientDelegate.UnsentRequest unsentRequest) { OffsetCommitResponseData responseData = new OffsetCommitResponseData() - .setTopics(Arrays.asList( - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setName(topic) - .setPartitions(Collections.singletonList( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setErrorCode(Errors.NONE.code()) - .setPartitionIndex(partition))))); + .setTopics(Collections.singletonList( + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setName(topic) + .setPartitions(Collections.singletonList( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setErrorCode(Errors.NONE.code()) + .setPartitionIndex(partition))))); OffsetCommitResponse response = mock(OffsetCommitResponse.class); when(response.data()).thenReturn(responseData); return new ClientResponse( diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java index 954ed1c11e09b..541c6a2a04adc 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java @@ -162,13 +162,13 @@ public abstract class ConsumerCoordinatorTest { private final String consumerId2 = "consumer2"; private MockClient client; - private MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap() { + private final MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap() { { put(topic1, 1); put(topic2, 1); } }); - private Node node = metadataResponse.brokers().iterator().next(); + private final Node node = metadataResponse.brokers().iterator().next(); private SubscriptionState subscriptions; private ConsumerMetadata metadata; private Metrics metrics; @@ -311,7 +311,7 @@ public void testPerformAssignmentShouldUpdateGroupSubscriptionAfterAssignmentIfN List> capturedTopics = topicsCaptor.getAllValues(); // expected the final group subscribed topics to be updated to "topic1" - Set expectedTopicsGotCalled = new HashSet<>(Arrays.asList(topic1)); + Set expectedTopicsGotCalled = new HashSet<>(singletonList(topic1)); assertEquals(expectedTopicsGotCalled, capturedTopics.get(0)); } @@ -389,8 +389,8 @@ public void testPerformAssignmentShouldValidateCooperativeAssignment() { // simulate the custom cooperative assignor didn't revoke the partition first before assign to other consumer Map> assignment = new HashMap<>(); - assignment.put(consumerId, Arrays.asList(t1p)); - assignment.put(consumerId2, Arrays.asList(t2p)); + assignment.put(consumerId, singletonList(t1p)); + assignment.put(consumerId2, singletonList(t2p)); partitionAssignor.prepare(assignment); try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, mockSubscriptionState)) { @@ -450,8 +450,8 @@ public String name() { // simulate the cooperative sticky assignor do the assignment with out-of-date ownedPartition Map> assignment = new HashMap<>(); - assignment.put(consumerId, Arrays.asList(t1p)); - assignment.put(consumerId2, Arrays.asList(t2p)); + assignment.put(consumerId, singletonList(t1p)); + assignment.put(consumerId2, singletonList(t2p)); mockCooperativeStickyAssignor.prepare(assignment); try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignorsWithCooperativeStickyAssignor, false, mockSubscriptionState)) { @@ -979,7 +979,7 @@ public void testNormalJoinGroupLeader() { final String consumerId = "leader"; final Set subscription = singleton(topic1); final List owned = Collections.emptyList(); - final List assigned = Arrays.asList(t1p); + final List assigned = singletonList(t1p); subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener)); @@ -1016,9 +1016,9 @@ public void testOutdatedCoordinatorAssignment() { final String consumerId = "outdated_assignment"; final List owned = Collections.emptyList(); final List oldSubscription = singletonList(topic2); - final List oldAssignment = Arrays.asList(t2p); + final List oldAssignment = singletonList(t2p); final List newSubscription = singletonList(topic1); - final List newAssignment = Arrays.asList(t1p); + final List newAssignment = singletonList(t1p); subscriptions.subscribe(toSet(oldSubscription), Optional.of(rebalanceListener)); @@ -2051,7 +2051,7 @@ public void testUpdateMetadataDuringRebalance() { // prepare initial rebalance Map> memberSubscriptions = singletonMap(consumerId, topics); - partitionAssignor.prepare(singletonMap(consumerId, Arrays.asList(tp1))); + partitionAssignor.prepare(singletonMap(consumerId, singletonList(tp1))); client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE)); client.prepareResponse(body -> { @@ -2254,7 +2254,7 @@ private void testInternalTopicInclusion(boolean includeInternalTopics) { public void testRejoinGroup() { String otherTopic = "otherTopic"; final List owned = Collections.emptyList(); - final List assigned = Arrays.asList(t1p); + final List assigned = singletonList(t1p); subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener)); @@ -2286,7 +2286,7 @@ public void testRejoinGroup() { public void testDisconnectInJoin() { subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener)); final List owned = Collections.emptyList(); - final List assigned = Arrays.asList(t1p); + final List assigned = singletonList(t1p); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java index 4331e72054177..27404877ec014 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java @@ -54,7 +54,7 @@ public class ConsumerInterceptorsTest { * Test consumer interceptor that filters records in onConsume() intercept */ private class FilterConsumerInterceptor implements ConsumerInterceptor { - private int filterPartition; + private final int filterPartition; private boolean throwExceptionOnConsume = false; private boolean throwExceptionOnCommit = false; diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java index 7d4aa5e3a85d6..643dffb617918 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java @@ -2070,7 +2070,7 @@ public void testCallbackAndInterceptorHandleError() { String invalidTopicName = "topic abc"; // Invalid topic name due to space ProducerInterceptors producerInterceptors = - new ProducerInterceptors<>(Arrays.asList(new MockProducerInterceptor())); + new ProducerInterceptors<>(Collections.singletonList(new MockProducerInterceptor())); try (Producer producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), producerMetadata, client, producerInterceptors, time)) { diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java index 32d3e532b6ee7..276c8afb75cc5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java @@ -1485,7 +1485,7 @@ public void testReadyAndDrainWhenABatchIsBeingRetried() throws InterruptedExcept int part1LeaderEpoch = 100; // Create cluster metadata, partition1 being hosted by node1 PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.of(part1LeaderEpoch), null, null, null); - MetadataSnapshot metadataCache = new MetadataSnapshot(null, nodes, Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + MetadataSnapshot metadataCache = new MetadataSnapshot(null, nodes, Collections.singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); int batchSize = 10; int lingerMs = 10; @@ -1527,7 +1527,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, // Try to drain from node1, it should return no batches. Map> batches = accum.drain(metadataCache, - new HashSet<>(Arrays.asList(node1)), 999999 /* maxSize */, now); + new HashSet<>(Collections.singletonList(node1)), 999999 /* maxSize */, now); assertTrue(batches.containsKey(node1.id()) && batches.get(node1.id()).isEmpty(), "No batches ready to be drained on Node1"); } @@ -1538,7 +1538,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, part1LeaderEpoch++; // Create cluster metadata, with new leader epoch. part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.of(part1LeaderEpoch), null, null, null); - metadataCache = new MetadataSnapshot(null, nodes, Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + metadataCache = new MetadataSnapshot(null, nodes, Collections.singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, now); assertTrue(result.readyNodes.contains(node1), "Node1 is ready"); @@ -1558,7 +1558,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, now += 2 * retryBackoffMaxMs; // Create cluster metadata, with new leader epoch. part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.of(part1LeaderEpoch), null, null, null); - metadataCache = new MetadataSnapshot(null, nodes, Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + metadataCache = new MetadataSnapshot(null, nodes, Collections.singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, now); assertTrue(result.readyNodes.contains(node1), "Node1 is ready"); @@ -1579,7 +1579,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, part1LeaderEpoch++; // Create cluster metadata, with new leader epoch. part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.of(part1LeaderEpoch), null, null, null); - metadataCache = new MetadataSnapshot(null, nodes, Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + metadataCache = new MetadataSnapshot(null, nodes, Collections.singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, now); assertTrue(result.readyNodes.contains(node1), "Node1 is ready"); @@ -1605,11 +1605,11 @@ public void testDrainWithANodeThatDoesntHostAnyPartitions() { // Create cluster metadata, node2 doesn't host any partitions. PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.empty(), null, null, null); - MetadataSnapshot metadataCache = new MetadataSnapshot(null, nodes, Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + MetadataSnapshot metadataCache = new MetadataSnapshot(null, nodes, Collections.singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); // Drain for node2, it should return 0 batches, Map> batches = accum.drain(metadataCache, - new HashSet<>(Arrays.asList(node2)), 999999 /* maxSize */, time.milliseconds()); + new HashSet<>(Collections.singletonList(node2)), 999999 /* maxSize */, time.milliseconds()); assertTrue(batches.get(node2.id()).isEmpty()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java index 3b539b8803f86..5ee5935838fb5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java @@ -2512,7 +2512,7 @@ public void testAllowDrainInAbortableErrorState() throws InterruptedException { // Try to drain a message destined for tp1, it should get drained. Node node1 = new Node(1, "localhost", 1112); PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp1, Optional.of(node1.id()), Optional.empty(), null, null, null); - MetadataSnapshot metadataCache = new MetadataSnapshot(null, Collections.singletonMap(node1.id(), node1), Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + MetadataSnapshot metadataCache = new MetadataSnapshot(null, Collections.singletonMap(node1.id(), node1), singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); appendToAccumulator(tp1); Map> drainedBatches = accumulator.drain(metadataCache, Collections.singleton(node1), Integer.MAX_VALUE, @@ -2533,7 +2533,7 @@ public void testRaiseErrorWhenNoPartitionsPendingOnDrain() throws InterruptedExc appendToAccumulator(tp0); Node node1 = new Node(0, "localhost", 1111); PartitionMetadata part1Metadata = new PartitionMetadata(Errors.NONE, tp0, Optional.of(node1.id()), Optional.empty(), null, null, null); - MetadataSnapshot metadataCache = new MetadataSnapshot(null, Collections.singletonMap(node1.id(), node1), Arrays.asList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); + MetadataSnapshot metadataCache = new MetadataSnapshot(null, Collections.singletonMap(node1.id(), node1), singletonList(part1Metadata), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); Set nodes = new HashSet<>(); nodes.add(node1); diff --git a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java index b85dd3556e007..92ca3e8cf2de5 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java @@ -121,7 +121,7 @@ public void testDefinedTwice() { @Test public void testBadInputs() { testBadInputs(Type.INT, "hello", "42.5", 42.5, Long.MAX_VALUE, Long.toString(Long.MAX_VALUE), new Object()); - testBadInputs(Type.LONG, "hello", "42.5", Long.toString(Long.MAX_VALUE) + "00", new Object()); + testBadInputs(Type.LONG, "hello", "42.5", Long.MAX_VALUE + "00", new Object()); testBadInputs(Type.DOUBLE, "hello", new Object()); testBadInputs(Type.STRING, new Object()); testBadInputs(Type.LIST, 53, new Object()); @@ -242,7 +242,7 @@ public void testParseForValidate() { String errorMessageC = "Missing required configuration \"c\" which has no default value."; ConfigValue configA = new ConfigValue("a", 1, Collections.emptyList(), Collections.emptyList()); ConfigValue configB = new ConfigValue("b", null, Collections.emptyList(), Arrays.asList(errorMessageB, errorMessageB)); - ConfigValue configC = new ConfigValue("c", null, Collections.emptyList(), Arrays.asList(errorMessageC)); + ConfigValue configC = new ConfigValue("c", null, Collections.emptyList(), singletonList(errorMessageC)); ConfigValue configD = new ConfigValue("d", 10, Collections.emptyList(), Collections.emptyList()); expected.put("a", configA); expected.put("b", configB); @@ -253,7 +253,7 @@ public void testParseForValidate() { .define("a", Type.INT, Importance.HIGH, "docs", "group", 1, Width.SHORT, "a", Arrays.asList("b", "c"), new IntegerRecommender(false)) .define("b", Type.INT, Importance.HIGH, "docs", "group", 2, Width.SHORT, "b", new IntegerRecommender(true)) .define("c", Type.INT, Importance.HIGH, "docs", "group", 3, Width.SHORT, "c", new IntegerRecommender(true)) - .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", Arrays.asList("b"), new IntegerRecommender(false)); + .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", singletonList("b"), new IntegerRecommender(false)); Map props = new HashMap<>(); props.put("a", "1"); @@ -279,7 +279,7 @@ public void testValidate() { ConfigValue configA = new ConfigValue("a", 1, Arrays.asList(1, 2, 3), Collections.emptyList()); ConfigValue configB = new ConfigValue("b", null, Arrays.asList(4, 5), Arrays.asList(errorMessageB, errorMessageB)); - ConfigValue configC = new ConfigValue("c", null, Arrays.asList(4, 5), Arrays.asList(errorMessageC)); + ConfigValue configC = new ConfigValue("c", null, Arrays.asList(4, 5), singletonList(errorMessageC)); ConfigValue configD = new ConfigValue("d", 10, Arrays.asList(1, 2, 3), Collections.emptyList()); expected.put("a", configA); @@ -291,7 +291,7 @@ public void testValidate() { .define("a", Type.INT, Importance.HIGH, "docs", "group", 1, Width.SHORT, "a", Arrays.asList("b", "c"), new IntegerRecommender(false)) .define("b", Type.INT, Importance.HIGH, "docs", "group", 2, Width.SHORT, "b", new IntegerRecommender(true)) .define("c", Type.INT, Importance.HIGH, "docs", "group", 3, Width.SHORT, "c", new IntegerRecommender(true)) - .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", Arrays.asList("b"), new IntegerRecommender(false)); + .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", singletonList("b"), new IntegerRecommender(false)); Map props = new HashMap<>(); props.put("a", "1"); @@ -313,9 +313,9 @@ public void testValidateMissingConfigKey() { String errorMessageD = "d is referred in the dependents, but not defined."; ConfigValue configA = new ConfigValue("a", 1, Arrays.asList(1, 2, 3), Collections.emptyList()); - ConfigValue configB = new ConfigValue("b", null, Arrays.asList(4, 5), Arrays.asList(errorMessageB)); - ConfigValue configC = new ConfigValue("c", null, Arrays.asList(4, 5), Arrays.asList(errorMessageC)); - ConfigValue configD = new ConfigValue("d", null, Collections.emptyList(), Arrays.asList(errorMessageD)); + ConfigValue configB = new ConfigValue("b", null, Arrays.asList(4, 5), singletonList(errorMessageB)); + ConfigValue configC = new ConfigValue("c", null, Arrays.asList(4, 5), singletonList(errorMessageC)); + ConfigValue configD = new ConfigValue("d", null, Collections.emptyList(), singletonList(errorMessageD)); configD.visible(false); expected.put("a", configA); @@ -343,7 +343,7 @@ public void testValidateMissingConfigKey() { public void testValidateCannotParse() { Map expected = new HashMap<>(); String errorMessageB = "Invalid value non_integer for configuration a: Not a number of type INT"; - ConfigValue configA = new ConfigValue("a", null, Collections.emptyList(), Arrays.asList(errorMessageB)); + ConfigValue configA = new ConfigValue("a", null, Collections.emptyList(), singletonList(errorMessageB)); expected.put("a", configA); ConfigDef def = new ConfigDef().define("a", Type.INT, Importance.HIGH, "docs"); @@ -438,7 +438,7 @@ public void testBaseConfigDefDependents() { // Creating a ConfigDef based on another should compute the correct number of configs with no parent, even // if the base ConfigDef has already computed its parentless configs final ConfigDef baseConfigDef = new ConfigDef().define("a", Type.STRING, Importance.LOW, "docs"); - assertEquals(new HashSet<>(Arrays.asList("a")), baseConfigDef.getConfigsWithNoParent()); + assertEquals(new HashSet<>(singletonList("a")), baseConfigDef.getConfigsWithNoParent()); final ConfigDef configDef = new ConfigDef(baseConfigDef) .define("parent", Type.STRING, Importance.HIGH, "parent docs", "group", 1, Width.LONG, "Parent", singletonList("child")) @@ -502,8 +502,7 @@ public void toRst() { .define("opt3", Type.LIST, Arrays.asList("a", "b"), Importance.LOW, "docs3") .define("opt4", Type.BOOLEAN, false, Importance.LOW, null); - final String expectedRst = "" + - "``opt2``\n" + + final String expectedRst = "``opt2``\n" + " docs2\n" + "\n" + " * Type: int\n" + @@ -547,8 +546,7 @@ public void toEnrichedRst() { "Group Two", 0, Width.NONE, "..", singletonList("some.option")) .define("poor.opt", Type.STRING, "foo", Importance.HIGH, "Doc doc doc doc."); - final String expectedRst = "" + - "``poor.opt``\n" + + final String expectedRst = "``poor.opt``\n" + " Doc doc doc doc.\n" + "\n" + " * Type: string\n" + diff --git a/clients/src/test/java/org/apache/kafka/common/config/provider/DirectoryConfigProviderTest.java b/clients/src/test/java/org/apache/kafka/common/config/provider/DirectoryConfigProviderTest.java index 59949e6043c3e..b351b0e6e6cae 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/provider/DirectoryConfigProviderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/provider/DirectoryConfigProviderTest.java @@ -179,12 +179,12 @@ public void testMultipleAllowedPaths() { provider.configure(configs); ConfigData configData = provider.get(subdir); - assertEquals(toSet(asList(subdirFileName)), configData.data().keySet()); + assertEquals(toSet(Collections.singletonList(subdirFileName)), configData.data().keySet()); assertEquals("SUBDIRFILE", configData.data().get(subdirFileName)); assertNull(configData.ttl()); configData = provider.get(siblingDir); - assertEquals(toSet(asList(siblingDirFileName)), configData.data().keySet()); + assertEquals(toSet(Collections.singletonList(siblingDirFileName)), configData.data().keySet()); assertEquals("SIBLINGDIRFILE", configData.data().get(siblingDirFileName)); assertNull(configData.ttl()); } diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java index a6b2e7f65d8e9..3f71a45628f26 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java @@ -26,7 +26,7 @@ import javax.management.ObjectName; import java.lang.management.ManagementFactory; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -164,7 +164,7 @@ public void testJmxPrefix() throws Exception { JmxReporter reporter = new JmxReporter(); MetricsContext metricsContext = new KafkaMetricsContext("kafka.server"); MetricConfig metricConfig = new MetricConfig(); - Metrics metrics = new Metrics(metricConfig, new ArrayList<>(Arrays.asList(reporter)), Time.SYSTEM, metricsContext); + Metrics metrics = new Metrics(metricConfig, new ArrayList<>(Collections.singletonList(reporter)), Time.SYSTEM, metricsContext); MBeanServer server = ManagementFactory.getPlatformMBeanServer(); try { @@ -183,7 +183,7 @@ public void testDeprecatedJmxPrefixWithDefaultMetrics() throws Exception { // for backwards compatibility, ensure prefix does not get overridden by the default empty namespace in metricscontext MetricConfig metricConfig = new MetricConfig(); - Metrics metrics = new Metrics(metricConfig, new ArrayList<>(Arrays.asList(reporter)), Time.SYSTEM); + Metrics metrics = new Metrics(metricConfig, new ArrayList<>(Collections.singletonList(reporter)), Time.SYSTEM); MBeanServer server = ManagementFactory.getPlatformMBeanServer(); try { diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java index f514281abf314..0713f0ac30a3f 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java @@ -77,7 +77,7 @@ public class MetricsTest { @BeforeEach public void setup() { - this.metrics = new Metrics(config, Arrays.asList(new JmxReporter()), time, true); + this.metrics = new Metrics(config, singletonList(new JmxReporter()), time, true); } @AfterEach @@ -197,7 +197,7 @@ public void testHierarchicalSensors() { assertEquals(1.0 + c1, p2, EPS); assertEquals(1.0 + c1 + c2, p1, EPS); assertEquals(Arrays.asList(child1, child2), metrics.childrenSensors().get(parent1)); - assertEquals(Arrays.asList(child1), metrics.childrenSensors().get(parent2)); + assertEquals(singletonList(child1), metrics.childrenSensors().get(parent2)); assertNull(metrics.childrenSensors().get(grandchild)); } @@ -719,7 +719,7 @@ public void testMetricInstances() { Map childTagsWithValues = new HashMap<>(); childTagsWithValues.put("child-tag", "child-tag-value"); - try (Metrics inherited = new Metrics(new MetricConfig().tags(parentTagsWithValues), Arrays.asList(new JmxReporter()), time, true)) { + try (Metrics inherited = new Metrics(new MetricConfig().tags(parentTagsWithValues), singletonList(new JmxReporter()), time, true)) { MetricName inheritedMetric = inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, childTagsWithValues); Map filledOutTags = inheritedMetric.tags(); @@ -787,7 +787,7 @@ public void testConcurrentReadUpdate() { public void testConcurrentReadUpdateReport() { class LockingReporter implements MetricsReporter { - Map activeMetrics = new HashMap<>(); + final Map activeMetrics = new HashMap<>(); @Override public synchronized void init(List metrics) { } @@ -819,7 +819,7 @@ synchronized void processMetrics() { final LockingReporter reporter = new LockingReporter(); this.metrics.close(); - this.metrics = new Metrics(config, Arrays.asList(reporter), new MockTime(10), true); + this.metrics = new Metrics(config, singletonList(reporter), new MockTime(10), true); final Deque sensors = new ConcurrentLinkedDeque<>(); SensorCreator sensorCreator = new SensorCreator(metrics); diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java index 9254616528fe7..df3eedd176ad9 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java @@ -29,7 +29,6 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -128,7 +127,7 @@ public void testShouldRecordForTraceLevelSensor() { public void testExpiredSensor() { MetricConfig config = new MetricConfig(); Time mockTime = new MockTime(); - try (Metrics metrics = new Metrics(config, Arrays.asList(new JmxReporter()), mockTime, true)) { + try (Metrics metrics = new Metrics(config, Collections.singletonList(new JmxReporter()), mockTime, true)) { long inactiveSensorExpirationTimeSeconds = 60L; Sensor sensor = new Sensor(metrics, "sensor", null, config, mockTime, inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel.INFO); diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/stats/FrequenciesTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/stats/FrequenciesTest.java index 344ade22a66ed..c46691542349e 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/stats/FrequenciesTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/stats/FrequenciesTest.java @@ -29,7 +29,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -46,7 +45,7 @@ public class FrequenciesTest { public void setup() { config = new MetricConfig().eventWindow(50).samples(2); time = new MockTime(); - metrics = new Metrics(config, Arrays.asList(new JmxReporter()), time, true); + metrics = new Metrics(config, Collections.singletonList(new JmxReporter()), time, true); } @AfterEach diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index 25a240c2ede18..a46a38af30c13 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -955,7 +955,7 @@ public void testWriteCompletesSendWithNoBytesWritten() throws IOException { NetworkSend send = new NetworkSend("destination", new ByteBufferSend(ByteBuffer.allocate(0))); when(channel.maybeCompleteSend()).thenReturn(send); selector.write(channel); - assertEquals(asList(send), selector.completedSends()); + assertEquals(Collections.singletonList(send), selector.completedSends()); } /** diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java index aeb37af931369..e56ce7abbad0b 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java @@ -55,7 +55,6 @@ import java.nio.channels.SocketChannel; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -105,7 +104,7 @@ private static class Args { private CertStores clientCertStores; private Map sslClientConfigs; private Map sslServerConfigs; - private Map sslConfigOverrides; + private final Map sslConfigOverrides; public Args(String tlsProtocol, boolean useInlinePem) throws Exception { this.tlsProtocol = tlsProtocol; @@ -621,7 +620,7 @@ public void testTlsDefaults(Args args) throws Exception { /** Checks connection failed using the specified {@code tlsVersion}. */ private void checkAuthenticationFailed(Args args, String node, String tlsVersion) throws IOException { - args.sslClientConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Arrays.asList(tlsVersion)); + args.sslClientConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList(tlsVersion)); createSelector(args.sslClientConfigs); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); @@ -640,10 +639,10 @@ public void testUnsupportedCiphers(Args args) throws Exception { SSLContext context = SSLContext.getInstance(args.tlsProtocol); context.init(null, null, null); String[] cipherSuites = context.getDefaultSSLParameters().getCipherSuites(); - args.sslServerConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, Arrays.asList(cipherSuites[0])); + args.sslServerConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, Collections.singletonList(cipherSuites[0])); server = createEchoServer(args, SecurityProtocol.SSL); - args.sslClientConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, Arrays.asList(cipherSuites[1])); + args.sslClientConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, Collections.singletonList(cipherSuites[1])); createSelector(args.sslClientConfigs); checkAuthenticationFailed(args, "1", args.tlsProtocol); diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java b/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java index 4f6e4b3aced70..425ab23532bb8 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java @@ -139,7 +139,7 @@ public void testCiphersSuiteForTls12() throws Exception { /** Checks connection failed using the specified {@code tlsVersion}. */ private void checkAuthenticationFailed() throws IOException, InterruptedException { - sslClientConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Arrays.asList("TLSv1.3")); + sslClientConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList("TLSv1.3")); createSelector(sslClientConfigs); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); selector.connect("0", addr, BUFFER_SIZE, BUFFER_SIZE); diff --git a/clients/src/test/java/org/apache/kafka/common/network/Tls12SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/Tls12SelectorTest.java index 7169b2ec51706..750f75f50753c 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/Tls12SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/Tls12SelectorTest.java @@ -17,7 +17,6 @@ package org.apache.kafka.common.network; -import static java.util.Arrays.asList; import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; @@ -25,6 +24,7 @@ import java.net.InetSocketAddress; import java.security.GeneralSecurityException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import org.apache.kafka.common.config.SslConfigs; @@ -38,7 +38,7 @@ protected Map createSslClientConfigs(File trustStoreFile) throws GeneralSecurityException, IOException { Map configs = TestSslUtils.createSslConfig(false, false, Mode.CLIENT, trustStoreFile, "client"); - configs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, asList("TLSv1.2")); + configs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList("TLSv1.2")); return configs; } diff --git a/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java index db69c2fa8ea1e..2313ec4748e96 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java @@ -25,6 +25,7 @@ import java.net.InetSocketAddress; import java.security.GeneralSecurityException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -42,7 +43,7 @@ public class Tls13SelectorTest extends SslSelectorTest { protected Map createSslClientConfigs(File trustStoreFile) throws GeneralSecurityException, IOException { Map configs = TestSslUtils.createSslConfig(false, false, Mode.CLIENT, trustStoreFile, "client"); - configs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, asList("TLSv1.3")); + configs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList("TLSv1.3")); return configs; } diff --git a/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java b/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java index 4795798908ff8..b3181907b52a4 100755 --- a/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java +++ b/clients/src/test/java/org/apache/kafka/common/protocol/MessageUtilTest.java @@ -46,7 +46,7 @@ public void testDeepToString() { assertEquals("[1, 2, 3]", MessageUtil.deepToString(Arrays.asList(1, 2, 3).iterator())); assertEquals("[foo]", - MessageUtil.deepToString(Arrays.asList("foo").iterator())); + MessageUtil.deepToString(Collections.singletonList("foo").iterator())); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java index e279e952e7eaa..a3020e9cb0722 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java @@ -756,7 +756,7 @@ public void testBuffersDereferencedOnClose(Args args) { // Ignore memory usage during initialization if (iterations == 2) startMem = memUsed; - else if (iterations > 2 && memUsed < (iterations - 2) * 1024) + else if (iterations > 2 && memUsed < (iterations - 2) * 1024L) break; } assertTrue(iterations < 100, "Memory usage too high: " + memUsed); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequestTest.java index c18926dc57d20..3251515492a34 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequestTest.java @@ -70,13 +70,13 @@ public void testPartitionDir() { .setPartitions(asList(0, 1)), new AlterReplicaLogDirTopic() .setName("topic2") - .setPartitions(asList(7))).iterator())), + .setPartitions(singletonList(7))).iterator())), new AlterReplicaLogDir() .setPath("/data1") .setTopics(new AlterReplicaLogDirTopicCollection( - asList(new AlterReplicaLogDirTopic() + singletonList(new AlterReplicaLogDirTopic() .setName("topic3") - .setPartitions(asList(12))).iterator()))).iterator())); + .setPartitions(singletonList(12))).iterator()))).iterator())); AlterReplicaLogDirsRequest request = new AlterReplicaLogDirsRequest.Builder(data).build(); Map expect = new HashMap<>(); expect.put(new TopicPartition("topic", 0), "/data0"); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponseTest.java index edc441cf9c828..347a8c26bc067 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponseTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.common.requests; +import java.util.Collections; import java.util.Map; import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData; @@ -44,7 +45,7 @@ public void testErrorCounts() { .setErrorCode(Errors.NONE.code()))), new AlterReplicaLogDirTopicResult() .setTopicName("t1") - .setPartitions(asList( + .setPartitions(Collections.singletonList( new AlterReplicaLogDirPartitionResult() .setPartitionIndex(0) .setErrorCode(Errors.LOG_DIR_NOT_FOUND.code()))))); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java index 3baf3af3f26ea..339ef9be4a5df 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java @@ -80,7 +80,7 @@ public class DeleteAclsResponseTest { private static final DeleteAclsFilterResult PREFIXED_RESPONSE = new DeleteAclsFilterResult().setMatchingAcls(asList( LITERAL_ACL1, PREFIXED_ACL1)); - private static final DeleteAclsFilterResult UNKNOWN_RESPONSE = new DeleteAclsFilterResult().setMatchingAcls(asList( + private static final DeleteAclsFilterResult UNKNOWN_RESPONSE = new DeleteAclsFilterResult().setMatchingAcls(singletonList( UNKNOWN_ACL)); @Test diff --git a/clients/src/test/java/org/apache/kafka/common/requests/LeaderAndIsrRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/LeaderAndIsrRequestTest.java index 83c33e4903fba..ce3e1548d31d8 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/LeaderAndIsrRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/LeaderAndIsrRequestTest.java @@ -118,8 +118,8 @@ public void testVersionLogic() { .setIsr(asList(0, 1)) .setPartitionEpoch(10) .setReplicas(asList(0, 1, 2)) - .setAddingReplicas(asList(3)) - .setRemovingReplicas(asList(2)), + .setAddingReplicas(Collections.singletonList(3)) + .setRemovingReplicas(Collections.singletonList(2)), new LeaderAndIsrPartitionState() .setTopicName("topic0") .setPartitionIndex(1) diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java index 83c4b101d8969..81b8b8090e56f 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java @@ -60,12 +60,12 @@ public void testDuplicatePartitions() { @Test public void testGetErrorResponse() { for (short version = 1; version <= ApiKeys.LIST_OFFSETS.latestVersion(); version++) { - List topics = Arrays.asList( + List topics = Collections.singletonList( new ListOffsetsTopic() - .setName("topic") - .setPartitions(Collections.singletonList( - new ListOffsetsPartition() - .setPartitionIndex(0)))); + .setName("topic") + .setPartitions(Collections.singletonList( + new ListOffsetsPartition() + .setPartitionIndex(0)))); ListOffsetsRequest request = ListOffsetsRequest.Builder .forConsumer(true, IsolationLevel.READ_COMMITTED, false) .setTargetTimes(topics) @@ -93,12 +93,12 @@ public void testGetErrorResponse() { @Test public void testGetErrorResponseV0() { - List topics = Arrays.asList( + List topics = Collections.singletonList( new ListOffsetsTopic() - .setName("topic") - .setPartitions(Collections.singletonList( - new ListOffsetsPartition() - .setPartitionIndex(0)))); + .setName("topic") + .setPartitions(Collections.singletonList( + new ListOffsetsPartition() + .setPartitionIndex(0)))); ListOffsetsRequest request = ListOffsetsRequest.Builder .forConsumer(true, IsolationLevel.READ_UNCOMMITTED, false) .setTargetTimes(topics) diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java index b1fdf35d8e383..8c8099942fd42 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java @@ -1197,25 +1197,25 @@ private AssignReplicasToDirsRequest createAssignReplicasToDirsRequest(short vers .setDirectories(Arrays.asList( new AssignReplicasToDirsRequestData.DirectoryData() .setId(Uuid.randomUuid()) - .setTopics(Arrays.asList( + .setTopics(singletonList( new AssignReplicasToDirsRequestData.TopicData() .setTopicId(Uuid.fromString("qo0Pcp70TdGnAa7YKMKCqw")) - .setPartitions(Arrays.asList( + .setPartitions(singletonList( new AssignReplicasToDirsRequestData.PartitionData() .setPartitionIndex(8) )) )), new AssignReplicasToDirsRequestData.DirectoryData() .setId(Uuid.randomUuid()) - .setTopics(Arrays.asList( + .setTopics(singletonList( new AssignReplicasToDirsRequestData.TopicData() .setTopicId(Uuid.fromString("yEu11V7HTRGIwm6FDWFhzg")) - .setPartitions(Arrays.asList( + .setPartitions(asList( new AssignReplicasToDirsRequestData.PartitionData() .setPartitionIndex(2), new AssignReplicasToDirsRequestData.PartitionData() .setPartitionIndex(80) - )) + )) )) )); return new AssignReplicasToDirsRequest.Builder(data).build(version); @@ -1228,10 +1228,10 @@ private AssignReplicasToDirsResponse createAssignReplicasToDirsResponse() { .setDirectories(Arrays.asList( new AssignReplicasToDirsResponseData.DirectoryData() .setId(Uuid.randomUuid()) - .setTopics(Arrays.asList( + .setTopics(singletonList( new AssignReplicasToDirsResponseData.TopicData() .setTopicId(Uuid.fromString("sKhZV8LnTA275KvByB9bVg")) - .setPartitions(Arrays.asList( + .setPartitions(singletonList( new AssignReplicasToDirsResponseData.PartitionData() .setPartitionIndex(8) .setErrorCode(Errors.NONE.code()) @@ -1239,10 +1239,10 @@ private AssignReplicasToDirsResponse createAssignReplicasToDirsResponse() { )), new AssignReplicasToDirsResponseData.DirectoryData() .setId(Uuid.randomUuid()) - .setTopics(Arrays.asList( + .setTopics(singletonList( new AssignReplicasToDirsResponseData.TopicData() .setTopicId(Uuid.fromString("ORLP5NEzRo64SvKq1hIVQg")) - .setPartitions(Arrays.asList( + .setPartitions(asList( new AssignReplicasToDirsResponseData.PartitionData() .setPartitionIndex(2) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()), @@ -1257,7 +1257,7 @@ private AssignReplicasToDirsResponse createAssignReplicasToDirsResponse() { private DescribeTopicPartitionsRequest createDescribeTopicPartitionsRequest(short version) { DescribeTopicPartitionsRequestData data = new DescribeTopicPartitionsRequestData() - .setTopics(Arrays.asList(new DescribeTopicPartitionsRequestData.TopicRequest().setName("foo"))) + .setTopics(singletonList(new DescribeTopicPartitionsRequestData.TopicRequest().setName("foo"))) .setCursor(new DescribeTopicPartitionsRequestData.Cursor().setTopicName("foo").setPartitionIndex(1)); return new DescribeTopicPartitionsRequest.Builder(data).build(version); } @@ -1272,13 +1272,13 @@ private DescribeTopicPartitionsResponse createDescribeTopicPartitionsResponse() .setIsInternal(false) .setName("foo") .setTopicAuthorizedOperations(0) - .setPartitions(Arrays.asList( + .setPartitions(singletonList( new DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponsePartition() .setErrorCode((short) 0) - .setIsrNodes(Arrays.asList(1)) + .setIsrNodes(singletonList(1)) .setPartitionIndex(1) .setLeaderId(1) - .setReplicaNodes(Arrays.asList(1)) + .setReplicaNodes(singletonList(1)) .setLeaderEpoch(0) )) ); @@ -1335,7 +1335,7 @@ private ControllerRegistrationRequest createControllerRegistrationRequest(short setIncarnationId(Uuid.fromString("qiTdnbu6RPazh1Aufq4dxw")). setZkMigrationReady(true). setFeatures(new ControllerRegistrationRequestData.FeatureCollection( - Arrays.asList( + singletonList( new ControllerRegistrationRequestData.Feature(). setName("metadata.version"). setMinSupportedVersion((short) 1). @@ -1343,7 +1343,7 @@ private ControllerRegistrationRequest createControllerRegistrationRequest(short ).iterator() )). setListeners(new ControllerRegistrationRequestData.ListenerCollection( - Arrays.asList( + singletonList( new ControllerRegistrationRequestData.Listener(). setName("CONTROLLER"). setName("localhost"). @@ -3542,7 +3542,7 @@ private BrokerRegistrationRequest createBrokerRegistrationRequest(short v) { .setListeners(new BrokerRegistrationRequestData.ListenerCollection(singletonList( new BrokerRegistrationRequestData.Listener()).iterator())) .setIncarnationId(Uuid.randomUuid()) - .setLogDirs(Arrays.asList(Uuid.fromString("qaJjNJ05Q36kEgeTBDcj0Q"))) + .setLogDirs(singletonList(Uuid.fromString("qaJjNJ05Q36kEgeTBDcj0Q"))) .setPreviousBrokerEpoch(123L); return new BrokerRegistrationRequest.Builder(data).build(v); } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/UpdateMetadataRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/UpdateMetadataRequestTest.java index 2dd17f776ec95..a7d8df35871a2 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/UpdateMetadataRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/UpdateMetadataRequestTest.java @@ -92,7 +92,7 @@ public void testVersionLogic() { .setIsr(asList(0, 1)) .setZkVersion(10) .setReplicas(asList(0, 1, 2)) - .setOfflineReplicas(asList(2)), + .setOfflineReplicas(Collections.singletonList(2)), new UpdateMetadataPartitionState() .setTopicName(topic0) .setPartitionIndex(1) @@ -143,12 +143,12 @@ public void testVersionLogic() { .setEndpoints(broker0Endpoints), new UpdateMetadataBroker() .setId(1) - .setEndpoints(asList( - new UpdateMetadataEndpoint() - .setHost("host1") - .setPort(9090) - .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener("PLAINTEXT") + .setEndpoints(Collections.singletonList( + new UpdateMetadataEndpoint() + .setHost("host1") + .setPort(9090) + .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + .setListener("PLAINTEXT") )) ); diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java index d705c75ab3fe9..a0e22ee150552 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java @@ -43,7 +43,6 @@ import org.junit.jupiter.api.Test; import java.time.Duration; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -65,13 +64,13 @@ public void setup() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; saslServerConfigs = new HashMap<>(); - saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Arrays.asList("PLAIN")); + saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList("PLAIN")); saslClientConfigs = new HashMap<>(); saslClientConfigs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT"); saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); - TestJaasConfig testJaasConfig = TestJaasConfig.createConfiguration("PLAIN", Arrays.asList("PLAIN")); + TestJaasConfig testJaasConfig = TestJaasConfig.createConfiguration("PLAIN", Collections.singletonList("PLAIN")); testJaasConfig.setClientOptions("PLAIN", TestJaasConfig.USERNAME, "anotherpassword"); server = createEchoServer(securityProtocol); } diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java index 477c6283bb498..87c8b456bcd07 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java @@ -39,7 +39,6 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -93,7 +92,7 @@ public void teardown() throws Exception { public void testInvalidPasswordSaslPlain() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); jaasConfig.setClientOptions("PLAIN", TestJaasConfig.USERNAME, "invalidpassword"); server = createEchoServer(securityProtocol); @@ -141,7 +140,7 @@ public void testDisabledSaslMechanism() throws Exception { public void testClientConnectionClose() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); jaasConfig.setClientOptions("PLAIN", TestJaasConfig.USERNAME, "invalidpassword"); server = createEchoServer(securityProtocol); diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java index 7063bbe698fff..1ab864f912266 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java @@ -191,7 +191,7 @@ public void teardown() throws Exception { public void testValidSaslPlainOverSsl() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); server = createEchoServer(securityProtocol); checkAuthenticationAndReauthentication(securityProtocol, node); @@ -205,7 +205,7 @@ public void testValidSaslPlainOverSsl() throws Exception { public void testValidSaslPlainOverPlaintext() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); server = createEchoServer(securityProtocol); checkAuthenticationAndReauthentication(securityProtocol, node); @@ -264,7 +264,7 @@ public void testSaslAuthenticationMaxReceiveSize() throws Exception { public void testInvalidPasswordSaslPlain() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); jaasConfig.setClientOptions("PLAIN", TestJaasConfig.USERNAME, "invalidpassword"); server = createEchoServer(securityProtocol); @@ -281,7 +281,7 @@ public void testInvalidPasswordSaslPlain() throws Exception { public void testInvalidUsernameSaslPlain() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); jaasConfig.setClientOptions("PLAIN", "invaliduser", TestJaasConfig.PASSWORD); server = createEchoServer(securityProtocol); @@ -297,7 +297,7 @@ public void testInvalidUsernameSaslPlain() throws Exception { @Test public void testMissingUsernameSaslPlain() throws Exception { String node = "0"; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); jaasConfig.setClientOptions("PLAIN", null, "mypassword"); SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; @@ -321,7 +321,7 @@ public void testMissingUsernameSaslPlain() throws Exception { @Test public void testMissingPasswordSaslPlain() throws Exception { String node = "0"; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); jaasConfig.setClientOptions("PLAIN", "myuser", null); SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; @@ -412,7 +412,7 @@ static void reset() { public void testMechanismPluggability() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("DIGEST-MD5", Arrays.asList("DIGEST-MD5")); + configureMechanisms("DIGEST-MD5", Collections.singletonList("DIGEST-MD5")); configureDigestMd5ServerCallback(securityProtocol); server = createEchoServer(securityProtocol); @@ -486,7 +486,7 @@ public void testMultipleServerMechanisms() throws Exception { @Test public void testValidSaslScramSha256() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); + configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); server = createEchoServer(securityProtocol); updateScramCredentialCache(TestJaasConfig.USERNAME, TestJaasConfig.PASSWORD); @@ -516,7 +516,7 @@ public void testValidSaslScramMechanisms() throws Exception { @Test public void testInvalidPasswordSaslScram() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); + TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); Map options = new HashMap<>(); options.put("username", TestJaasConfig.USERNAME); options.put("password", "invalidpassword"); @@ -536,7 +536,7 @@ public void testInvalidPasswordSaslScram() throws Exception { @Test public void testUnknownUserSaslScram() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); + TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); Map options = new HashMap<>(); options.put("username", "unknownUser"); options.put("password", TestJaasConfig.PASSWORD); @@ -582,7 +582,7 @@ public void testScramUsernameWithSpecialCharacters() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; String username = "special user= test,scram"; String password = username + "-password"; - TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); + TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); Map options = new HashMap<>(); options.put("username", username); options.put("password", password); @@ -597,7 +597,7 @@ public void testScramUsernameWithSpecialCharacters() throws Exception { @Test public void testTokenAuthenticationOverSaslScram() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); + TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); //create jaas config for token auth Map options = new HashMap<>(); @@ -633,7 +633,7 @@ public void testTokenAuthenticationOverSaslScram() throws Exception { @Test public void testTokenReauthenticationOverSaslScram() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Arrays.asList("SCRAM-SHA-256")); + TestJaasConfig jaasConfig = configureMechanisms("SCRAM-SHA-256", Collections.singletonList("SCRAM-SHA-256")); // create jaas config for token auth Map options = new HashMap<>(); @@ -745,7 +745,7 @@ public void testUnauthenticatedApiVersionsRequestOverSslHandshakeVersion1() thro public void testApiVersionsRequestWithServerUnsupportedVersion() throws Exception { short handshakeVersion = ApiKeys.SASL_HANDSHAKE.latestVersion(); SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); server = createEchoServer(securityProtocol); // Send ApiVersionsRequest with unsupported version and validate error response. @@ -787,7 +787,7 @@ public void testApiVersionsRequestWithServerUnsupportedVersion() throws Exceptio */ @Test public void testSaslUnsupportedClientVersions() throws Exception { - configureMechanisms("SCRAM-SHA-512", Arrays.asList("SCRAM-SHA-512")); + configureMechanisms("SCRAM-SHA-512", Collections.singletonList("SCRAM-SHA-512")); server = startServerApiVersionsUnsupportedByClient(SecurityProtocol.SASL_SSL, "SCRAM-SHA-512"); updateScramCredentialCache(TestJaasConfig.USERNAME, TestJaasConfig.PASSWORD); @@ -806,7 +806,7 @@ public void testSaslUnsupportedClientVersions() throws Exception { public void testInvalidApiVersionsRequest() throws Exception { short handshakeVersion = ApiKeys.SASL_HANDSHAKE.latestVersion(); SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); server = createEchoServer(securityProtocol); // Send ApiVersionsRequest with invalid version and validate error response. @@ -848,7 +848,7 @@ public void testForBrokenSaslHandshakeVersionBump() { public void testValidApiVersionsRequest() throws Exception { short handshakeVersion = ApiKeys.SASL_HANDSHAKE.latestVersion(); SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); server = createEchoServer(securityProtocol); // Send ApiVersionsRequest with valid version and validate error response. @@ -878,7 +878,7 @@ public void testValidApiVersionsRequest() throws Exception { @Test public void testSaslHandshakeRequestWithUnsupportedVersion() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); server = createEchoServer(securityProtocol); // Send SaslHandshakeRequest and validate that connection is closed by server. @@ -905,7 +905,7 @@ public void testSaslHandshakeRequestWithUnsupportedVersion() throws Exception { @Test public void testInvalidSaslPacket() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); server = createEchoServer(securityProtocol); // Send invalid SASL packet after valid handshake request @@ -944,7 +944,7 @@ public void testInvalidSaslPacket() throws Exception { @Test public void testInvalidApiVersionsRequestSequence() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); server = createEchoServer(securityProtocol); // Send handshake request followed by ApiVersionsRequest @@ -970,7 +970,7 @@ public void testInvalidApiVersionsRequestSequence() throws Exception { @Test public void testPacketSizeTooBig() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); server = createEchoServer(securityProtocol); // Send SASL packet with large size after valid handshake request @@ -1010,7 +1010,7 @@ public void testPacketSizeTooBig() throws Exception { @Test public void testDisallowedKafkaRequestsBeforeAuthentication() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); server = createEchoServer(securityProtocol); // Send metadata request before Kafka SASL handshake request @@ -1047,7 +1047,7 @@ public void testDisallowedKafkaRequestsBeforeAuthentication() throws Exception { */ @Test public void testInvalidLoginModule() throws Exception { - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); jaasConfig.createOrUpdateEntry(TestJaasConfig.LOGIN_CONTEXT_CLIENT, "InvalidLoginModule", TestJaasConfig.defaultClientOptions()); SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; @@ -1270,7 +1270,7 @@ public void testServerLoginCallbackOverride() throws Exception { public void testDisabledMechanism() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("PLAIN", Arrays.asList("DIGEST-MD5")); + configureMechanisms("PLAIN", Collections.singletonList("DIGEST-MD5")); server = createEchoServer(securityProtocol); createAndCheckClientConnectionFailure(securityProtocol, node); @@ -1285,7 +1285,7 @@ public void testDisabledMechanism() throws Exception { public void testInvalidMechanism() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "INVALID"); server = createEchoServer(securityProtocol); @@ -1312,7 +1312,7 @@ public void testInvalidMechanism() throws Exception { public void testClientDynamicJaasConfiguration() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); - saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Arrays.asList("PLAIN")); + saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList("PLAIN")); Map serverOptions = new HashMap<>(); serverOptions.put("user_user1", "user1-secret"); serverOptions.put("user_user2", "user2-secret"); @@ -1359,7 +1359,7 @@ public void testClientDynamicJaasConfiguration() throws Exception { public void testServerDynamicJaasConfiguration() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); - saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Arrays.asList("PLAIN")); + saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList("PLAIN")); Map serverOptions = new HashMap<>(); serverOptions.put("user_user1", "user1-secret"); serverOptions.put("user_user2", "user2-secret"); @@ -1385,7 +1385,7 @@ public void testServerDynamicJaasConfiguration() throws Exception { public void testJaasConfigurationForListener() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); - saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Arrays.asList("PLAIN")); + saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList("PLAIN")); TestJaasConfig staticJaasConfig = new TestJaasConfig(); @@ -1573,7 +1573,7 @@ public void oldSaslScramSslClientWithoutSaslAuthenticateHeaderFailure() throws E public void testValidSaslOauthBearerMechanism() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("OAUTHBEARER", Arrays.asList("OAUTHBEARER")); + configureMechanisms("OAUTHBEARER", Collections.singletonList("OAUTHBEARER")); server = createEchoServer(securityProtocol); createAndCheckClientConnection(securityProtocol, node); } @@ -1588,7 +1588,7 @@ public void testCannotReauthenticateWithDifferentPrincipal() throws Exception { saslClientConfigs.put(SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, AlternateLoginCallbackHandler.class.getName()); configureMechanisms(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, - Arrays.asList(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)); + Collections.singletonList(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)); server = createEchoServer(securityProtocol); // initial authentication must succeed createClientConnection(securityProtocol, node); @@ -1701,7 +1701,7 @@ public void testCannotReauthenticateAgainFasterThanOneSecond() throws Exception time = new MockTime(); SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; configureMechanisms(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, - Arrays.asList(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)); + Collections.singletonList(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)); server = createEchoServer(securityProtocol); try { createClientConnection(securityProtocol, node); @@ -1748,7 +1748,7 @@ public void testCannotReauthenticateAgainFasterThanOneSecond() throws Exception public void testRepeatedValidSaslPlainOverSsl() throws Exception { String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); /* * Make sure 85% of this value is at least 1 second otherwise it is possible for * the client to start re-authenticating but the server does not start due to @@ -1785,7 +1785,7 @@ public void testValidSaslOauthBearerMechanismWithoutServerTokens() throws Except String node = "0"; SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; saslClientConfigs.put(SaslConfigs.SASL_MECHANISM, "OAUTHBEARER"); - saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Arrays.asList("OAUTHBEARER")); + saslServerConfigs.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList("OAUTHBEARER")); saslClientConfigs.put(SaslConfigs.SASL_JAAS_CONFIG, TestJaasConfig.jaasConfigProperty("OAUTHBEARER", Collections.singletonMap("unsecuredLoginStringClaim_sub", TestJaasConfig.USERNAME))); saslServerConfigs.put("listener.name.sasl_ssl.oauthbearer." + SaslConfigs.SASL_JAAS_CONFIG, @@ -1818,7 +1818,7 @@ public void testValidSaslOauthBearerMechanismWithoutServerTokens() throws Except @Test public void testInsufficientScopeSaslOauthBearerMechanism() throws Exception { SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - TestJaasConfig jaasConfig = configureMechanisms("OAUTHBEARER", Arrays.asList("OAUTHBEARER")); + TestJaasConfig jaasConfig = configureMechanisms("OAUTHBEARER", Collections.singletonList("OAUTHBEARER")); // now update the server side to require a scope the client does not provide Map serverJaasConfigOptionsMap = TestJaasConfig.defaultServerOptions("OAUTHBEARER"); serverJaasConfigOptionsMap.put("unsecuredValidatorRequiredScope", "LOGIN_TO_KAFKA"); // causes the failure @@ -1900,7 +1900,7 @@ private void removeClientSslKeystore() { private void verifySaslAuthenticateHeaderInterop(boolean enableHeaderOnServer, boolean enableHeaderOnClient, SecurityProtocol securityProtocol, String saslMechanism) throws Exception { - configureMechanisms(saslMechanism, Arrays.asList(saslMechanism)); + configureMechanisms(saslMechanism, Collections.singletonList(saslMechanism)); createServer(securityProtocol, saslMechanism, enableHeaderOnServer); String node = "0"; @@ -1910,7 +1910,7 @@ private void verifySaslAuthenticateHeaderInterop(boolean enableHeaderOnServer, b private void verifySaslAuthenticateHeaderInteropWithFailure(boolean enableHeaderOnServer, boolean enableHeaderOnClient, SecurityProtocol securityProtocol, String saslMechanism) throws Exception { - TestJaasConfig jaasConfig = configureMechanisms(saslMechanism, Arrays.asList(saslMechanism)); + TestJaasConfig jaasConfig = configureMechanisms(saslMechanism, Collections.singletonList(saslMechanism)); jaasConfig.setClientOptions(saslMechanism, TestJaasConfig.USERNAME, "invalidpassword"); createServer(securityProtocol, saslMechanism, enableHeaderOnServer); @@ -1947,7 +1947,7 @@ private NioEchoServer startServerApiVersionsUnsupportedByClient(final SecurityPr boolean isScram = ScramMechanism.isScram(saslMechanism); if (isScram) - ScramCredentialUtils.createCache(credentialCache, Arrays.asList(saslMechanism)); + ScramCredentialUtils.createCache(credentialCache, Collections.singletonList(saslMechanism)); Supplier apiVersionSupplier = () -> { ApiVersionCollection versionCollection = new ApiVersionCollection(2); @@ -1976,7 +1976,7 @@ private NioEchoServer startServerWithoutSaslAuthenticateHeader(final SecurityPro boolean isScram = ScramMechanism.isScram(saslMechanism); if (isScram) - ScramCredentialUtils.createCache(credentialCache, Arrays.asList(saslMechanism)); + ScramCredentialUtils.createCache(credentialCache, Collections.singletonList(saslMechanism)); Supplier apiVersionSupplier = () -> { ApiVersionsResponse defaultApiVersionResponse = TestUtils.defaultApiVersionsResponse( @@ -2090,7 +2090,7 @@ protected void setSaslAuthenticateAndHandshakeVersions(ApiVersionsResponse apiVe * */ private void testUnauthenticatedApiVersionsRequest(SecurityProtocol securityProtocol, short saslHandshakeVersion) throws Exception { - configureMechanisms("PLAIN", Arrays.asList("PLAIN")); + configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); server = createEchoServer(securityProtocol); // Create non-SASL connection to manually authenticate after ApiVersionsRequest @@ -2499,44 +2499,43 @@ public static class AlternateLoginCallbackHandler implements AuthenticateCallbac public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { DELEGATE.handle(callbacks); // now change any returned token to have a different principal name - if (callbacks.length > 0) - for (Callback callback : callbacks) { - if (callback instanceof OAuthBearerTokenCallback) { - OAuthBearerTokenCallback oauthBearerTokenCallback = (OAuthBearerTokenCallback) callback; - OAuthBearerToken token = oauthBearerTokenCallback.token(); - if (token != null) { - String changedPrincipalNameToUse = token.principalName() - + String.valueOf(++numInvocations); - String headerJson = "{" + claimOrHeaderJsonText("alg", "none") + "}"; - /* - * Use a short lifetime so the background refresh thread replaces it before we - * re-authenticate - */ - String lifetimeSecondsValueToUse = "1"; - String claimsJson; - try { - claimsJson = String.format("{%s,%s,%s}", - expClaimText(Long.parseLong(lifetimeSecondsValueToUse)), - claimOrHeaderJsonText("iat", time.milliseconds() / 1000.0), - claimOrHeaderJsonText("sub", changedPrincipalNameToUse)); - } catch (NumberFormatException e) { - throw new OAuthBearerConfigException(e.getMessage()); - } - try { - Encoder urlEncoderNoPadding = Base64.getUrlEncoder().withoutPadding(); - OAuthBearerUnsecuredJws jws = new OAuthBearerUnsecuredJws(String.format("%s.%s.", - urlEncoderNoPadding.encodeToString(headerJson.getBytes(StandardCharsets.UTF_8)), - urlEncoderNoPadding - .encodeToString(claimsJson.getBytes(StandardCharsets.UTF_8))), - "sub", "scope"); - oauthBearerTokenCallback.token(jws); - } catch (OAuthBearerIllegalTokenException e) { - // occurs if the principal claim doesn't exist or has an empty value - throw new OAuthBearerConfigException(e.getMessage(), e); - } + for (Callback callback : callbacks) { + if (callback instanceof OAuthBearerTokenCallback) { + OAuthBearerTokenCallback oauthBearerTokenCallback = (OAuthBearerTokenCallback) callback; + OAuthBearerToken token = oauthBearerTokenCallback.token(); + if (token != null) { + String changedPrincipalNameToUse = token.principalName() + + ++numInvocations; + String headerJson = "{" + claimOrHeaderJsonText("alg", "none") + "}"; + /* + * Use a short lifetime so the background refresh thread replaces it before we + * re-authenticate + */ + String lifetimeSecondsValueToUse = "1"; + String claimsJson; + try { + claimsJson = String.format("{%s,%s,%s}", + expClaimText(Long.parseLong(lifetimeSecondsValueToUse)), + claimOrHeaderJsonText("iat", time.milliseconds() / 1000.0), + claimOrHeaderJsonText("sub", changedPrincipalNameToUse)); + } catch (NumberFormatException e) { + throw new OAuthBearerConfigException(e.getMessage()); + } + try { + Encoder urlEncoderNoPadding = Base64.getUrlEncoder().withoutPadding(); + OAuthBearerUnsecuredJws jws = new OAuthBearerUnsecuredJws(String.format("%s.%s.", + urlEncoderNoPadding.encodeToString(headerJson.getBytes(StandardCharsets.UTF_8)), + urlEncoderNoPadding + .encodeToString(claimsJson.getBytes(StandardCharsets.UTF_8))), + "sub", "scope"); + oauthBearerTokenCallback.token(jws); + } catch (OAuthBearerIllegalTokenException e) { + // occurs if the principal claim doesn't exist or has an empty value + throw new OAuthBearerConfigException(e.getMessage(), e); } } } + } } private static String claimOrHeaderJsonText(String claimName, String claimValue) { diff --git a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java index a6e8f9714dc27..209c11e0a8da7 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosNameTest.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -117,18 +118,18 @@ public void testToUpperCase() throws Exception { @Test public void testInvalidRules() { - testInvalidRule(Arrays.asList("default")); - testInvalidRule(Arrays.asList("DEFAUL")); - testInvalidRule(Arrays.asList("DEFAULT/L")); - testInvalidRule(Arrays.asList("DEFAULT/g")); - - testInvalidRule(Arrays.asList("rule:[1:$1]")); - testInvalidRule(Arrays.asList("rule:[1:$1]/L/U")); - testInvalidRule(Arrays.asList("rule:[1:$1]/U/L")); - testInvalidRule(Arrays.asList("rule:[1:$1]/LU")); - testInvalidRule(Arrays.asList("RULE:[1:$1/L")); - testInvalidRule(Arrays.asList("RULE:[1:$1]/l")); - testInvalidRule(Arrays.asList("RULE:[2:$1](ABC.*)s/ABC/XYZ/L/g")); + testInvalidRule(Collections.singletonList("default")); + testInvalidRule(Collections.singletonList("DEFAUL")); + testInvalidRule(Collections.singletonList("DEFAULT/L")); + testInvalidRule(Collections.singletonList("DEFAULT/g")); + + testInvalidRule(Collections.singletonList("rule:[1:$1]")); + testInvalidRule(Collections.singletonList("rule:[1:$1]/L/U")); + testInvalidRule(Collections.singletonList("rule:[1:$1]/U/L")); + testInvalidRule(Collections.singletonList("rule:[1:$1]/LU")); + testInvalidRule(Collections.singletonList("RULE:[1:$1/L")); + testInvalidRule(Collections.singletonList("RULE:[1:$1]/l")); + testInvalidRule(Collections.singletonList("RULE:[2:$1](ABC.*)s/ABC/XYZ/L/g")); } private void testInvalidRule(List rules) { diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java index e29b7c069c984..4ad4e78b92c1f 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java @@ -143,7 +143,7 @@ private static OAuthBearerUnsecuredLoginCallbackHandler createCallbackHandler(Ma OAuthBearerUnsecuredLoginCallbackHandler callbackHandler = new OAuthBearerUnsecuredLoginCallbackHandler(); callbackHandler.time(mockTime); callbackHandler.configure(Collections.emptyMap(), OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, - Arrays.asList(config.getAppConfigurationEntry("KafkaClient")[0])); + Collections.singletonList(config.getAppConfigurationEntry("KafkaClient")[0])); return callbackHandler; } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java index d7d6013a45717..4c0d055012988 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java @@ -21,7 +21,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import java.nio.charset.StandardCharsets; -import java.util.Arrays; import java.util.Base64; import java.util.Base64.Encoder; import java.util.Collections; @@ -157,7 +156,7 @@ private static OAuthBearerUnsecuredValidatorCallbackHandler createCallbackHandle (Map) options); OAuthBearerUnsecuredValidatorCallbackHandler callbackHandler = new OAuthBearerUnsecuredValidatorCallbackHandler(); callbackHandler.configure(Collections.emptyMap(), OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, - Arrays.asList(config.getAppConfigurationEntry("KafkaClient")[0])); + Collections.singletonList(config.getAppConfigurationEntry("KafkaClient")[0])); return callbackHandler; } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java index ef8997a7bc7a9..c52ed6c4ec1cd 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerValidationUtilsTest.java @@ -161,14 +161,14 @@ public void validateScope() { long nowMs = TIME.milliseconds(); double nowClaimValue = ((double) nowMs) / 1000; final List noScope = Collections.emptyList(); - final List scope1 = Arrays.asList("scope1"); + final List scope1 = Collections.singletonList("scope1"); final List scope1And2 = Arrays.asList("scope1", "scope2"); for (boolean actualScopeExists : new boolean[] {true, false}) { - List scopes = !actualScopeExists ? Arrays.asList((List) null) + List scopes = !actualScopeExists ? Collections.singletonList((List) null) : Arrays.asList(noScope, scope1, scope1And2); for (List actualScope : scopes) { for (boolean requiredScopeExists : new boolean[] {true, false}) { - List requiredScopes = !requiredScopeExists ? Arrays.asList((List) null) + List requiredScopes = !requiredScopeExists ? Collections.singletonList((List) null) : Arrays.asList(noScope, scope1, scope1And2); for (List requiredScope : requiredScopes) { StringBuilder sb = new StringBuilder("{"); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java b/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java index fe02cbe5a96cc..2441c88779ad8 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java @@ -19,6 +19,7 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -31,9 +32,9 @@ public class FlattenedIteratorTest { @Test public void testNestedLists() { List> list = asList( - asList("foo", "a", "bc"), - asList("ddddd"), - asList("", "bar2", "baz45")); + asList("foo", "a", "bc"), + Collections.singletonList("ddddd"), + asList("", "bar2", "baz45")); Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), l -> l.iterator()); List flattened = new ArrayList<>(); @@ -61,7 +62,7 @@ public void testEmptyList() { @Test public void testNestedSingleEmptyList() { - List> list = asList(emptyList()); + List> list = Collections.singletonList(emptyList()); Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), l -> l.iterator()); List flattened = new ArrayList<>(); @@ -73,8 +74,8 @@ public void testNestedSingleEmptyList() { @Test public void testEmptyListFollowedByNonEmpty() { List> list = asList( - emptyList(), - asList("boo", "b", "de")); + emptyList(), + asList("boo", "b", "de")); Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), l -> l.iterator()); List flattened = new ArrayList<>(); @@ -86,9 +87,9 @@ public void testEmptyListFollowedByNonEmpty() { @Test public void testEmptyListInBetweenNonEmpty() { List> list = asList( - asList("aadwdwdw"), - emptyList(), - asList("ee", "aa", "dd")); + Collections.singletonList("aadwdwdw"), + emptyList(), + asList("ee", "aa", "dd")); Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), l -> l.iterator()); List flattened = new ArrayList<>(); @@ -100,9 +101,9 @@ public void testEmptyListInBetweenNonEmpty() { @Test public void testEmptyListAtTheEnd() { List> list = asList( - asList("ee", "dd"), - asList("e"), - emptyList()); + asList("ee", "dd"), + Collections.singletonList("e"), + emptyList()); Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), l -> l.iterator()); List flattened = new ArrayList<>(); diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java index b883843089955..8115675f5a532 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java @@ -88,7 +88,7 @@ public SchemaBuilder(Type type) { @Override public boolean isOptional() { - return optional == null ? false : optional; + return optional != null && optional; } /** diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java b/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java index 7b78c64af0ca7..2a29ea1ad7aa1 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java @@ -1125,9 +1125,7 @@ public boolean canDetect(Object value) { } if (knownType == null) { knownType = schema.type(); - } else if (knownType != schema.type()) { - return false; - } + } else return knownType == schema.type(); return true; } diff --git a/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java b/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java index b895ed398586d..efa56aca4692d 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java @@ -46,7 +46,7 @@ public void testReconfigureStopException() { private static class TestConnector extends Connector { - private boolean stopException; + private final boolean stopException; private int order = 0; public int stopOrder = -1; public int configureOrder = -1; diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java index 3e0c9de8d4c7b..32e304c218ac9 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java @@ -82,7 +82,7 @@ public void testNumericTypeProjection() { expectedProjected.put(values[2], Arrays.asList(32767, 32767L, 32767.F, 32767.)); expectedProjected.put(values[3], Arrays.asList(327890L, 327890.F, 327890.)); expectedProjected.put(values[4], Arrays.asList(1.2F, 1.2)); - expectedProjected.put(values[5], Arrays.asList(1.2345)); + expectedProjected.put(values[5], Collections.singletonList(1.2345)); Object promoted; for (int i = 0; i < promotableSchemas.length; ++i) { diff --git a/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java b/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java index b6f96bd4bc15c..1972ff7a89d58 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java @@ -33,7 +33,7 @@ public class ConnectorUtilsTest { public void testGroupPartitions() { List> grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 1); - assertEquals(Arrays.asList(FIVE_ELEMENTS), grouped); + assertEquals(Collections.singletonList(FIVE_ELEMENTS), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 2); assertEquals(Arrays.asList(Arrays.asList(1, 2, 3), Arrays.asList(4, 5)), grouped); @@ -41,21 +41,21 @@ public void testGroupPartitions() { grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 3); assertEquals(Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3, 4), - Arrays.asList(5)), grouped); + Collections.singletonList(5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 5); - assertEquals(Arrays.asList(Arrays.asList(1), - Arrays.asList(2), - Arrays.asList(3), - Arrays.asList(4), - Arrays.asList(5)), grouped); + assertEquals(Arrays.asList(Collections.singletonList(1), + Collections.singletonList(2), + Collections.singletonList(3), + Collections.singletonList(4), + Collections.singletonList(5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 7); - assertEquals(Arrays.asList(Arrays.asList(1), - Arrays.asList(2), - Arrays.asList(3), - Arrays.asList(4), - Arrays.asList(5), + assertEquals(Arrays.asList(Collections.singletonList(1), + Collections.singletonList(2), + Collections.singletonList(3), + Collections.singletonList(4), + Collections.singletonList(5), Collections.emptyList(), Collections.emptyList()), grouped); } diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java index 11106e5a179f9..23a28d8527c18 100644 --- a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java @@ -32,6 +32,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -60,7 +61,7 @@ public void testPutFlush() { // We do not call task.start() since it would override the output stream - task.put(Arrays.asList( + task.put(Collections.singletonList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L)); @@ -85,7 +86,7 @@ public void testStart() throws IOException { task.start(props); HashMap offsets = new HashMap<>(); - task.put(Arrays.asList( + task.put(Collections.singletonList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line0", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L)); diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java index 95dabf703c585..698f4fcf8d366 100644 --- a/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java @@ -94,7 +94,7 @@ public void testStopResumeSavedOffset() throws Exception { // Append NUM_LINES more lines to the file try (PrintStream printStream = new PrintStream(Files.newOutputStream(sourceFile.toPath(), StandardOpenOption.APPEND))) { for (int i = NUM_LINES; i < 2 * NUM_LINES; i++) { - printStream.println(String.format(LINE_FORMAT, i)); + printStream.printf(LINE_FORMAT + "%n", i); } } @@ -197,7 +197,7 @@ private File createTempFile(int numLines) throws Exception { try (PrintStream printStream = new PrintStream(Files.newOutputStream(sourceFile.toPath()))) { for (int i = 0; i < numLines; i++) { - printStream.println(String.format(LINE_FORMAT, i)); + printStream.printf(LINE_FORMAT + "%n", i); } } diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java index a5abeff40ce17..7d24a5f14db9c 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java @@ -26,7 +26,6 @@ import org.glassfish.hk2.utilities.binding.AbstractBinder; import org.glassfish.jersey.server.ResourceConfig; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Map; @@ -48,7 +47,7 @@ public void initializeInternalResources(Map herders) { @Override protected Collection> regularResources() { - return Arrays.asList( + return Collections.singletonList( InternalMirrorResource.class ); } diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java index 830ddb32eb190..99706d4eaeaac 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java @@ -34,8 +34,8 @@ public class FakeLocalMetadataStore { private static final Logger log = LoggerFactory.getLogger(FakeLocalMetadataStore.class); - private static ConcurrentHashMap> allTopics = new ConcurrentHashMap<>(); - private static ConcurrentHashMap> allAcls = new ConcurrentHashMap<>(); + private static final ConcurrentHashMap> ALL_TOPICS = new ConcurrentHashMap<>(); + private static final ConcurrentHashMap> ALL_ACLS = new ConcurrentHashMap<>(); /** * Add topic to allTopics. @@ -44,7 +44,7 @@ public class FakeLocalMetadataStore { public static void addTopicToLocalMetadataStore(NewTopic newTopic) { ConcurrentHashMap configs = new ConcurrentHashMap<>(newTopic.configs()); configs.putIfAbsent("partitions", String.valueOf(newTopic.numPartitions())); - allTopics.putIfAbsent(newTopic.name(), configs); + ALL_TOPICS.putIfAbsent(newTopic.name(), configs); } /** @@ -53,9 +53,9 @@ public static void addTopicToLocalMetadataStore(NewTopic newTopic) { * @param newPartitionCount new partition count. */ public static void updatePartitionCount(String topic, int newPartitionCount) { - ConcurrentHashMap configs = FakeLocalMetadataStore.allTopics.getOrDefault(topic, new ConcurrentHashMap<>()); + ConcurrentHashMap configs = FakeLocalMetadataStore.ALL_TOPICS.getOrDefault(topic, new ConcurrentHashMap<>()); configs.compute("partitions", (key, value) -> String.valueOf(newPartitionCount)); - FakeLocalMetadataStore.allTopics.putIfAbsent(topic, configs); + FakeLocalMetadataStore.ALL_TOPICS.putIfAbsent(topic, configs); } /** @@ -64,7 +64,7 @@ public static void updatePartitionCount(String topic, int newPartitionCount) { * @param newConfig topic config */ public static void updateTopicConfig(String topic, Config newConfig) { - ConcurrentHashMap topicConfigs = FakeLocalMetadataStore.allTopics.getOrDefault(topic, new ConcurrentHashMap<>()); + ConcurrentHashMap topicConfigs = FakeLocalMetadataStore.ALL_TOPICS.getOrDefault(topic, new ConcurrentHashMap<>()); newConfig.entries().stream().forEach(configEntry -> { if (configEntry.name() != null) { if (configEntry.value() != null) { @@ -75,7 +75,7 @@ public static void updateTopicConfig(String topic, Config newConfig) { } } }); - FakeLocalMetadataStore.allTopics.putIfAbsent(topic, topicConfigs); + FakeLocalMetadataStore.ALL_TOPICS.putIfAbsent(topic, topicConfigs); } /** @@ -84,7 +84,7 @@ public static void updateTopicConfig(String topic, Config newConfig) { * @return true if topic name is a key in allTopics */ public static Boolean containsTopic(String topic) { - return allTopics.containsKey(topic); + return ALL_TOPICS.containsKey(topic); } /** @@ -93,7 +93,7 @@ public static Boolean containsTopic(String topic) { * @return topic configurations. */ public static Map topicConfig(String topic) { - return allTopics.getOrDefault(topic, new ConcurrentHashMap<>()); + return ALL_TOPICS.getOrDefault(topic, new ConcurrentHashMap<>()); } /** @@ -102,7 +102,7 @@ public static Map topicConfig(String topic) { * @return {@link List} */ public static List aclBindings(String aclPrinciple) { - return FakeLocalMetadataStore.allAcls.getOrDefault("User:" + aclPrinciple, new Vector<>()); + return FakeLocalMetadataStore.ALL_ACLS.getOrDefault("User:" + aclPrinciple, new Vector<>()); } /** @@ -111,16 +111,16 @@ public static List aclBindings(String aclPrinciple) { * @param aclBinding {@link AclBinding} */ public static void addACLs(String principal, AclBinding aclBinding) { - Vector aclBindings = FakeLocalMetadataStore.allAcls.getOrDefault(principal, new Vector<>()); + Vector aclBindings = FakeLocalMetadataStore.ALL_ACLS.getOrDefault(principal, new Vector<>()); aclBindings.add(aclBinding); - FakeLocalMetadataStore.allAcls.putIfAbsent(principal, aclBindings); + FakeLocalMetadataStore.ALL_ACLS.putIfAbsent(principal, aclBindings); } /** * clear allTopics and allAcls. */ public static void clear() { - allTopics.clear(); - allAcls.clear(); + ALL_TOPICS.clear(); + ALL_ACLS.clear(); } } diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java index b54aa7073ce88..70f1cd6f6a343 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java @@ -164,7 +164,7 @@ public void startClusters() throws Exception { startClusters(additionalConfig); try (Admin adminClient = primary.kafka().createAdminClient()) { - adminClient.createAcls(Arrays.asList( + adminClient.createAcls(Collections.singletonList( new AclBinding( new ResourcePattern(ResourceType.TOPIC, "*", PatternType.LITERAL), new AccessControlEntry("User:connector", "*", AclOperation.ALL, AclPermissionType.ALLOW) @@ -172,7 +172,7 @@ public void startClusters() throws Exception { )).all().get(); } try (Admin adminClient = backup.kafka().createAdminClient()) { - adminClient.createAcls(Arrays.asList( + adminClient.createAcls(Collections.singletonList( new AclBinding( new ResourcePattern(ResourceType.TOPIC, "*", PatternType.LITERAL), new AccessControlEntry("User:connector", "*", AclOperation.ALL, AclPermissionType.ALLOW) @@ -293,7 +293,7 @@ public void testSyncTopicConfigUseProvidedForwardingAdmin() throws Exception { public void testSyncTopicACLsUseProvidedForwardingAdmin() throws Exception { mm2Props.put("sync.topic.acls.enabled", "true"); mm2Config = new MirrorMakerConfig(mm2Props); - List aclBindings = Arrays.asList( + List aclBindings = Collections.singletonList( new AclBinding( new ResourcePattern(ResourceType.TOPIC, "test-topic-1", PatternType.LITERAL), new AccessControlEntry("User:dummy", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW) diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java index 93ea9499df252..c262fd076a4c9 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java @@ -324,7 +324,7 @@ public > List> transformationS @SuppressWarnings("unchecked") Predicate predicate = Utils.newInstance(getClass(predicatePrefix + "type"), Predicate.class); predicate.configure(originalsWithPrefix(predicatePrefix)); - transformations.add(new TransformationStage<>(predicate, negate == null ? false : Boolean.parseBoolean(negate.toString()), transformation)); + transformations.add(new TransformationStage<>(predicate, negate != null && Boolean.parseBoolean(negate.toString()), transformation)); } else { transformations.add(new TransformationStage<>(transformation)); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java index 3adbc0f14ec36..fa25a5ea60d72 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java @@ -27,6 +27,7 @@ import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Map; public class ConnectRestServer extends RestServer { @@ -56,7 +57,7 @@ protected Collection> regularResources() { @Override protected Collection> adminResources() { - return Arrays.asList( + return Collections.singletonList( LoggingResource.class ); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java index d0f67386de35c..dd075b5f90df8 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java @@ -85,17 +85,15 @@ public int hashCode() { @Override public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("[") - .append(name) - .append(",") - .append(errorCount) - .append(",") - .append(groups) - .append(",") - .append(configs) - .append("]"); - return sb.toString(); + return "[" + + name + + "," + + errorCount + + "," + + groups + + "," + + configs + + "]"; } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java index 5cfdf2dd4f00b..0b1a41c212ed9 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java @@ -141,30 +141,28 @@ public int hashCode() { @Override public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("[") - .append(name) - .append(",") - .append(type) - .append(",") - .append(required) - .append(",") - .append(defaultValue) - .append(",") - .append(importance) - .append(",") - .append(documentation) - .append(",") - .append(group) - .append(",") - .append(orderInGroup) - .append(",") - .append(width) - .append(",") - .append(displayName) - .append(",") - .append(dependents) - .append("]"); - return sb.toString(); + return "[" + + name + + "," + + type + + "," + + required + + "," + + defaultValue + + "," + + importance + + "," + + documentation + + "," + + group + + "," + + orderInGroup + + "," + + width + + "," + + displayName + + "," + + dependents + + "]"; } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java index 61be2769f3b24..eb055ab13fb11 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java @@ -199,8 +199,8 @@ public void testTopicTrackingResetIsDisabled() throws InterruptedException { connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(SINK_CONNECTOR, NUM_TASKS, "Connector tasks did not start in time."); - connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, Arrays.asList(FOO_TOPIC), - "Active topic set is not: " + Arrays.asList(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, Collections.singletonList(FOO_TOPIC), + "Active topic set is not: " + Collections.singletonList(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); // deleting a connector resets its active topics connect.deleteConnector(FOO_CONNECTOR); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TaskHandle.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TaskHandle.java index ab5b711af0f53..fe63658a7578a 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TaskHandle.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TaskHandle.java @@ -48,7 +48,7 @@ public class TaskHandle { private CountDownLatch recordsRemainingLatch; private CountDownLatch recordsToCommitLatch; private int expectedRecords = -1; - private int expectedCommits = -1; + private final int expectedCommits = -1; public TaskHandle(ConnectorHandle connectorHandle, String taskId, Consumer consumer) { this.taskId = taskId; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java index a0b8cecc8f427..3aeecc1d757c3 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java @@ -134,8 +134,8 @@ public class ErrorHandlingTaskTest { private static final TaskConfig TASK_CONFIG = new TaskConfig(TASK_PROPS); - private ConnectorTaskId taskId = new ConnectorTaskId("job", 0); - private TargetState initialState = TargetState.STARTED; + private final ConnectorTaskId taskId = new ConnectorTaskId("job", 0); + private final TargetState initialState = TargetState.STARTED; private Time time; private MockConnectMetrics metrics; @SuppressWarnings("unused") @@ -179,7 +179,7 @@ public class ErrorHandlingTaskTest { private ErrorHandlingMetrics errorHandlingMetrics; - private boolean enableTopicCreation; + private final boolean enableTopicCreation; @Parameterized.Parameters public static Collection parameters() { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java index 597041715b538..c5f9f8314d9ef 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java @@ -150,7 +150,7 @@ public static String currentMetricValueAsString(ConnectMetrics metrics, MetricGr } public static class MockMetricsReporter implements MetricsReporter { - private Map metricsByName = new HashMap<>(); + private final Map metricsByName = new HashMap<>(); private MetricsContext metricsContext; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java index 21b2b10c16e75..75f942a6871d8 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java @@ -141,14 +141,14 @@ public class WorkerSinkTaskTest { private static final TaskConfig TASK_CONFIG = new TaskConfig(TASK_PROPS); - private ConnectorTaskId taskId = new ConnectorTaskId("job", 0); - private ConnectorTaskId taskId1 = new ConnectorTaskId("job", 1); - private TargetState initialState = TargetState.STARTED; + private final ConnectorTaskId taskId = new ConnectorTaskId("job", 0); + private final ConnectorTaskId taskId1 = new ConnectorTaskId("job", 1); + private final TargetState initialState = TargetState.STARTED; private MockTime time; private WorkerSinkTask workerTask; @Mock private SinkTask sinkTask; - private ArgumentCaptor sinkTaskContext = ArgumentCaptor.forClass(WorkerSinkTaskContext.class); + private final ArgumentCaptor sinkTaskContext = ArgumentCaptor.forClass(WorkerSinkTaskContext.class); private WorkerConfig workerConfig; private MockConnectMetrics metrics; @Mock @@ -169,7 +169,7 @@ public class WorkerSinkTaskTest { private KafkaConsumer consumer; @Mock private ErrorHandlingMetrics errorHandlingMetrics; - private ArgumentCaptor rebalanceListener = ArgumentCaptor.forClass(ConsumerRebalanceListener.class); + private final ArgumentCaptor rebalanceListener = ArgumentCaptor.forClass(ConsumerRebalanceListener.class); @Rule public final MockitoRule rule = MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS); @@ -684,9 +684,9 @@ public void testPreCommitFailureAfterPartialRevocationAndAssignment() { when(consumer.assignment()) .thenReturn(INITIAL_ASSIGNMENT, INITIAL_ASSIGNMENT) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2))) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2))) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2))) + .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) + .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) + .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))); @@ -1788,7 +1788,7 @@ private void expectRebalanceAssignmentError(RuntimeException e) { } private void verifyInitializeTask() { - verify(consumer).subscribe(eq(asList(TOPIC)), rebalanceListener.capture()); + verify(consumer).subscribe(eq(Collections.singletonList(TOPIC)), rebalanceListener.capture()); verify(sinkTask).initialize(sinkTaskContext.capture()); verify(sinkTask).start(TASK_PROPS); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java index 4579794a2c4ff..b51b84d1ac623 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java @@ -2602,7 +2602,7 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti Map taskConfig = new HashMap<>(); // No warnings or exceptions when a connector generates an empty list of task configs - when(sourceConnector.taskConfigs(1)).thenReturn(Arrays.asList()); + when(sourceConnector.taskConfigs(1)).thenReturn(Collections.emptyList()); try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Worker.class)) { connectorProps.put(TASKS_MAX_CONFIG, "1"); List> taskConfigs = worker.connectorTaskConfigs(CONNECTOR_ID, new ConnectorConfig(plugins, connectorProps)); @@ -2611,7 +2611,7 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti } // No warnings or exceptions when a connector generates the maximum permitted number of task configs - when(sourceConnector.taskConfigs(1)).thenReturn(Arrays.asList(taskConfig)); + when(sourceConnector.taskConfigs(1)).thenReturn(Collections.singletonList(taskConfig)); when(sourceConnector.taskConfigs(2)).thenReturn(Arrays.asList(taskConfig, taskConfig)); when(sourceConnector.taskConfigs(3)).thenReturn(Arrays.asList(taskConfig, taskConfig, taskConfig)); try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Worker.class)) { @@ -2672,7 +2672,7 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti } // One last sanity check in case the connector is reconfigured and respects tasks.max - when(sourceConnector.taskConfigs(1)).thenReturn(Arrays.asList(taskConfig)); + when(sourceConnector.taskConfigs(1)).thenReturn(Collections.singletonList(taskConfig)); try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Worker.class)) { connectorProps.put(TASKS_MAX_CONFIG, "1"); List> taskConfigs = worker.connectorTaskConfigs(CONNECTOR_ID, new ConnectorConfig(plugins, connectorProps)); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java index 6dcbe6c38e617..38084383e2951 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java @@ -98,7 +98,7 @@ public void testEagerToCoopMetadata() { public void testEagerToEagerAssignment() { ConnectProtocol.Assignment assignment = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - Arrays.asList(connectorId1, connectorId3), Arrays.asList(taskId2x0)); + Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0)); ByteBuffer leaderBuf = ConnectProtocol.serializeAssignment(assignment); ConnectProtocol.Assignment leaderAssignment = ConnectProtocol.deserializeAssignment(leaderBuf); @@ -110,7 +110,7 @@ public void testEagerToEagerAssignment() { ConnectProtocol.Assignment assignment2 = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - Arrays.asList(connectorId2), Arrays.asList(taskId1x0, taskId3x0)); + Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0)); ByteBuffer memberBuf = ConnectProtocol.serializeAssignment(assignment2); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(memberBuf); @@ -125,7 +125,7 @@ public void testEagerToEagerAssignment() { public void testCoopToCoopAssignment() { ExtendedAssignment assignment = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - Arrays.asList(connectorId1, connectorId3), Arrays.asList(taskId2x0), + Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0), Collections.emptyList(), Collections.emptyList(), 0); ByteBuffer leaderBuf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment, false); @@ -138,7 +138,7 @@ public void testCoopToCoopAssignment() { ExtendedAssignment assignment2 = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - Arrays.asList(connectorId2), Arrays.asList(taskId1x0, taskId3x0), + Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0), Collections.emptyList(), Collections.emptyList(), 0); ByteBuffer memberBuf = ConnectProtocol.serializeAssignment(assignment2); @@ -155,7 +155,7 @@ public void testCoopToCoopAssignment() { public void testEagerToCoopAssignment() { ConnectProtocol.Assignment assignment = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - Arrays.asList(connectorId1, connectorId3), Arrays.asList(taskId2x0)); + Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0)); ByteBuffer leaderBuf = ConnectProtocol.serializeAssignment(assignment); ConnectProtocol.Assignment leaderAssignment = @@ -168,7 +168,7 @@ public void testEagerToCoopAssignment() { ConnectProtocol.Assignment assignment2 = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - Arrays.asList(connectorId2), Arrays.asList(taskId1x0, taskId3x0)); + Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0)); ByteBuffer memberBuf = ConnectProtocol.serializeAssignment(assignment2); ConnectProtocol.Assignment memberAssignment = @@ -184,7 +184,7 @@ public void testEagerToCoopAssignment() { public void testCoopToEagerAssignment() { ExtendedAssignment assignment = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - Arrays.asList(connectorId1, connectorId3), Arrays.asList(taskId2x0), + Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0), Collections.emptyList(), Collections.emptyList(), 0); ByteBuffer leaderBuf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment, false); @@ -197,7 +197,7 @@ public void testCoopToEagerAssignment() { ExtendedAssignment assignment2 = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - Arrays.asList(connectorId2), Arrays.asList(taskId1x0, taskId3x0), + Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0), Collections.emptyList(), Collections.emptyList(), 0); ByteBuffer memberBuf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment2, false); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java index f4a7cd247bd2b..f69f586bc90cf 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java @@ -331,7 +331,7 @@ public void testJoinAssignment() throws Exception { // Join group and get assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Arrays.asList(CONN1), Arrays.asList(TASK1)); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1)); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -355,7 +355,7 @@ public void testRebalance() throws Exception { // Join group and get assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Arrays.asList(CONN1), Arrays.asList(TASK1)); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1)); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -380,8 +380,8 @@ public void testRebalance() throws Exception { verify(worker).startSourceTask(eq(TASK1), any(), any(), any(), eq(herder), eq(TargetState.STARTED)); // Rebalance and get a new assignment - expectRebalance(Arrays.asList(CONN1), Arrays.asList(TASK1), ConnectProtocol.Assignment.NO_ERROR, - 1, Arrays.asList(CONN1), Arrays.asList()); + expectRebalance(singletonList(CONN1), singletonList(TASK1), ConnectProtocol.Assignment.NO_ERROR, + 1, singletonList(CONN1), Collections.emptyList()); herder.tick(); time.sleep(3000L); assertStatistics(3, 2, 100, 3000); @@ -414,7 +414,7 @@ public void testIncrementalCooperativeRebalanceForNewMember() throws Exception { // The new member got its assignment expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, - 1, Arrays.asList(CONN1), Arrays.asList(TASK1), 0); + 1, singletonList(CONN1), singletonList(TASK1), 0); // and the new assignment started ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -445,7 +445,7 @@ public void testIncrementalCooperativeRebalanceForExistingMember() { // Join group. First rebalance contains revocations because a new member joined. when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); - expectRebalance(Arrays.asList(CONN1), Arrays.asList(TASK1), + expectRebalance(singletonList(CONN1), singletonList(TASK1), ConnectProtocol.Assignment.NO_ERROR, 1, Collections.emptyList(), Collections.emptyList(), 0); doNothing().when(member).requestRejoin(); @@ -482,7 +482,7 @@ public void testIncrementalCooperativeRebalanceWithDelay() throws Exception { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, 1, - Collections.emptyList(), Arrays.asList(TASK2), + Collections.emptyList(), singletonList(TASK2), rebalanceDelay); expectConfigRefreshAndSnapshot(SNAPSHOT); @@ -503,7 +503,7 @@ public void testIncrementalCooperativeRebalanceWithDelay() throws Exception { // The member got its assignment and revocation expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, - 1, Arrays.asList(CONN1), Arrays.asList(TASK1), 0); + 1, singletonList(CONN1), singletonList(TASK1), 0); // and the new assignment started ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -528,7 +528,7 @@ public void testRebalanceFailedConnector() throws Exception { // Join group and get assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Arrays.asList(CONN1), Arrays.asList(TASK1)); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1)); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -550,8 +550,8 @@ public void testRebalanceFailedConnector() throws Exception { verify(worker).startSourceTask(eq(TASK1), any(), any(), any(), eq(herder), eq(TargetState.STARTED)); // Rebalance and get a new assignment - expectRebalance(Arrays.asList(CONN1), Arrays.asList(TASK1), ConnectProtocol.Assignment.NO_ERROR, - 1, Arrays.asList(CONN1), Arrays.asList()); + expectRebalance(singletonList(CONN1), singletonList(TASK1), ConnectProtocol.Assignment.NO_ERROR, + 1, singletonList(CONN1), Collections.emptyList()); // worker is not running, so we should see no call to connectorTaskConfigs() expectExecuteTaskReconfiguration(false, null, null); @@ -606,7 +606,7 @@ public void revokeAndReassign(boolean incompleteRebalance) throws TimeoutExcepti // Perform a partial re-balance just prior to the revocation // bump the configOffset to trigger reading the config topic to the end configOffset++; - expectRebalance(configOffset, Arrays.asList(), Arrays.asList()); + expectRebalance(configOffset, Collections.emptyList(), Collections.emptyList()); // give it the wrong snapshot, as if we're out of sync/can't reach the broker expectConfigRefreshAndSnapshot(SNAPSHOT); doNothing().when(member).requestRejoin(); @@ -616,9 +616,9 @@ public void revokeAndReassign(boolean incompleteRebalance) throws TimeoutExcepti } // Revoke the connector in the next rebalance - expectRebalance(Arrays.asList(CONN1), Arrays.asList(), - ConnectProtocol.Assignment.NO_ERROR, configOffset, Arrays.asList(), - Arrays.asList()); + expectRebalance(singletonList(CONN1), Collections.emptyList(), + ConnectProtocol.Assignment.NO_ERROR, configOffset, Collections.emptyList(), + Collections.emptyList()); if (incompleteRebalance) { // Same as SNAPSHOT, except with an updated offset @@ -643,7 +643,7 @@ public void revokeAndReassign(boolean incompleteRebalance) throws TimeoutExcepti herder.tick(); // re-assign the connector back to the same worker to ensure state was cleaned up - expectRebalance(configOffset, Arrays.asList(CONN1), Arrays.asList()); + expectRebalance(configOffset, singletonList(CONN1), Collections.emptyList()); herder.tick(); @@ -973,7 +973,7 @@ public void testDestroyConnector() throws Exception { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // Start with one connector - expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList(), true); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -1006,7 +1006,7 @@ public void testDestroyConnector() throws Exception { doNothing().when(statusBackingStore).deleteTopic(eq(CONN1), eq(FOO_TOPIC)); doNothing().when(statusBackingStore).deleteTopic(eq(CONN1), eq(BAR_TOPIC)); - expectRebalance(Arrays.asList(CONN1), Arrays.asList(TASK1), + expectRebalance(singletonList(CONN1), singletonList(TASK1), ConnectProtocol.Assignment.NO_ERROR, 2, "leader", "leaderUrl", Collections.emptyList(), Collections.emptyList(), 0, true); expectConfigRefreshAndSnapshot(ClusterConfigState.EMPTY); @@ -1533,7 +1533,7 @@ public void testConnectorConfigAdded() throws Exception { // Performs rebalance and gets new assignment expectRebalance(Collections.emptyList(), Collections.emptyList(), - ConnectProtocol.Assignment.NO_ERROR, 1, Arrays.asList(CONN1), Collections.emptyList()); + ConnectProtocol.Assignment.NO_ERROR, 1, singletonList(CONN1), Collections.emptyList()); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -1556,7 +1556,7 @@ public void testConnectorConfigUpdate() throws Exception { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // join - expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList()); + expectRebalance(1, singletonList(CONN1), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1591,7 +1591,7 @@ public void testConnectorConfigUpdateFailedTransformation() throws Exception { WorkerConfigTransformer configTransformer = mock(WorkerConfigTransformer.class); // join - expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList()); + expectRebalance(1, singletonList(CONN1), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1646,7 +1646,7 @@ public void testConnectorPaused() throws Exception { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // join - expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList()); + expectRebalance(1, singletonList(CONN1), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1683,7 +1683,7 @@ public void testConnectorResumed() throws Exception { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // start with the connector paused - expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList()); + expectRebalance(1, singletonList(CONN1), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT_PAUSED_CONN1); expectMemberPoll(); @@ -1723,7 +1723,7 @@ public void testConnectorStopped() throws Exception { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // join - expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList()); + expectRebalance(1, singletonList(CONN1), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1976,7 +1976,7 @@ public void testTaskConfigAdded() { // Performs rebalance and gets new assignment expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, 1, Collections.emptyList(), - Arrays.asList(TASK0)); + singletonList(TASK0)); expectConfigRefreshAndSnapshot(SNAPSHOT); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), eq(TargetState.STARTED))).thenReturn(true); @@ -2014,7 +2014,7 @@ public void testJoinLeaderCatchUpFails() throws Exception { before = time.milliseconds(); // After backoff, restart the process and this time succeed - expectRebalance(1, Arrays.asList(CONN1), Arrays.asList(TASK1), true); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1), true); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -2051,7 +2051,7 @@ public void testJoinLeaderCatchUpRetriesForIncrementalCooperative() throws Excep when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); - expectRebalance(1, Arrays.asList(CONN1), Arrays.asList(TASK1), true); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1), true); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -2072,7 +2072,7 @@ public void testJoinLeaderCatchUpRetriesForIncrementalCooperative() throws Excep // The leader gets the same assignment after a rebalance is triggered expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, - 1, "leader", "leaderUrl", Arrays.asList(CONN1), Arrays.asList(TASK1), 0, true); + 1, "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), 0, true); time.sleep(2000L); assertStatistics(3, 1, 100, 2000); @@ -2106,7 +2106,7 @@ public void testJoinLeaderCatchUpRetriesForIncrementalCooperative() throws Excep // After a few retries succeed to read the log to the end expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, - 1, "leader", "leaderUrl", Arrays.asList(CONN1), Arrays.asList(TASK1), 0, true); + 1, "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), 0, true); expectConfigRefreshAndSnapshot(SNAPSHOT); before = time.milliseconds(); @@ -2125,7 +2125,7 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); - expectRebalance(1, Arrays.asList(CONN1), Arrays.asList(TASK1), true); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1), true); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -2146,7 +2146,7 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti // The leader gets the same assignment after a rebalance is triggered expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, 1, - "leader", "leaderUrl", Arrays.asList(CONN1), Arrays.asList(TASK1), 0, true); + "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), 0, true); time.sleep(2000L); assertStatistics(3, 1, 100, 2000); @@ -2190,7 +2190,7 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti // The worker gets back the assignment that had given up expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, - 1, "leader", "leaderUrl", Arrays.asList(CONN1), Arrays.asList(TASK1), + 1, "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), 0, true); expectConfigRefreshAndSnapshot(SNAPSHOT); @@ -2267,7 +2267,7 @@ public void testAccessors() throws Exception { @Test public void testPutConnectorConfig() throws Exception { when(member.memberId()).thenReturn("leader"); - expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList(), true); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectConfigRefreshAndSnapshot(SNAPSHOT); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); @@ -2388,7 +2388,7 @@ public void testPatchConnectorConfigNotALeader() { // Patch the connector config. expectMemberEnsureActive(); - expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList(), false); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), false); FutureCallback> patchCallback = new FutureCallback<>(); herder.patchConnectorConfig(CONN1, new HashMap<>(), patchCallback); @@ -2401,7 +2401,7 @@ public void testPatchConnectorConfigNotALeader() { @Test public void testPatchConnectorConfig() throws Exception { when(member.memberId()).thenReturn("leader"); - expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList(), true); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); Map originalConnConfig = new HashMap<>(CONN1_CONFIG); @@ -2440,7 +2440,7 @@ public void testPatchConnectorConfig() throws Exception { patchedConnConfig.put("foo3", "added"); expectMemberEnsureActive(); - expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList(), true); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); ArgumentCaptor> validateCallback = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -2567,7 +2567,7 @@ public void testPutTaskConfigsSignatureNotRequiredV0() { verify(member).wakeup(); verifyNoMoreInteractions(member, taskConfigCb); assertEquals( - Arrays.asList("awaiting startup"), + singletonList("awaiting startup"), stages ); } @@ -2584,7 +2584,7 @@ public void testPutTaskConfigsSignatureNotRequiredV1() { verify(member).wakeup(); verifyNoMoreInteractions(member, taskConfigCb); assertEquals( - Arrays.asList("awaiting startup"), + singletonList("awaiting startup"), stages ); } @@ -2690,7 +2690,7 @@ public void testPutTaskConfigsValidRequiredSignature() { verifyNoMoreInteractions(member, taskConfigCb); assertEquals( - Arrays.asList("awaiting startup"), + singletonList("awaiting startup"), stages ); } @@ -3369,7 +3369,7 @@ public void testPollDurationOnSlowConnectorOperations() { public void shouldThrowWhenStartAndStopExecutorThrowsRejectedExecutionExceptionAndHerderNotStopping() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Arrays.asList(CONN1), Collections.emptyList(), true); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); herder.startAndStopExecutor.shutdown(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java index 3edb4d52dc0ba..319bdc9f9f8ef 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java @@ -659,7 +659,7 @@ public void testAssignConnectorsWhenBalanced() { .collect(Collectors.toList()); expectedAssignment.get(0).connectors().addAll(Arrays.asList("connector6", "connector9")); expectedAssignment.get(1).connectors().addAll(Arrays.asList("connector7", "connector10")); - expectedAssignment.get(2).connectors().addAll(Arrays.asList("connector8")); + expectedAssignment.get(2).connectors().add("connector8"); List newConnectors = newConnectors(6, 11); assignor.assignConnectors(existingAssignment, newConnectors); @@ -679,11 +679,11 @@ public void testAssignTasksWhenBalanced() { expectedAssignment.get(0).connectors().addAll(Arrays.asList("connector6", "connector9")); expectedAssignment.get(1).connectors().addAll(Arrays.asList("connector7", "connector10")); - expectedAssignment.get(2).connectors().addAll(Arrays.asList("connector8")); + expectedAssignment.get(2).connectors().add("connector8"); expectedAssignment.get(0).tasks().addAll(Arrays.asList(new ConnectorTaskId("task", 6), new ConnectorTaskId("task", 9))); expectedAssignment.get(1).tasks().addAll(Arrays.asList(new ConnectorTaskId("task", 7), new ConnectorTaskId("task", 10))); - expectedAssignment.get(2).tasks().addAll(Arrays.asList(new ConnectorTaskId("task", 8))); + expectedAssignment.get(2).tasks().add(new ConnectorTaskId("task", 8)); List newConnectors = newConnectors(6, 11); assignor.assignConnectors(existingAssignment, newConnectors); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java index 8b28c37aca92a..ca5c3bdc6f833 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java @@ -75,18 +75,18 @@ public class WorkerCoordinatorIncrementalTest { @Rule public MockitoRule rule = MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS); - private String connectorId1 = "connector1"; - private String connectorId2 = "connector2"; - private ConnectorTaskId taskId1x0 = new ConnectorTaskId(connectorId1, 0); - private ConnectorTaskId taskId2x0 = new ConnectorTaskId(connectorId2, 0); - - private String groupId = "test-group"; - private int sessionTimeoutMs = 10; - private int rebalanceTimeoutMs = 60; - private int heartbeatIntervalMs = 2; - private long retryBackoffMs = 100; - private long retryBackoffMaxMs = 1000; - private int requestTimeoutMs = 1000; + private final String connectorId1 = "connector1"; + private final String connectorId2 = "connector2"; + private final ConnectorTaskId taskId1x0 = new ConnectorTaskId(connectorId1, 0); + private final ConnectorTaskId taskId2x0 = new ConnectorTaskId(connectorId2, 0); + + private final String groupId = "test-group"; + private final int sessionTimeoutMs = 10; + private final int rebalanceTimeoutMs = 60; + private final int heartbeatIntervalMs = 2; + private final long retryBackoffMs = 100; + private final long retryBackoffMaxMs = 1000; + private final int requestTimeoutMs = 1000; private MockTime time; private MockClient client; private Node node; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java index 72a51afd500be..0494d00272668 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java @@ -72,7 +72,7 @@ public class ConnectRestServerTest { @Mock private Plugins plugins; private ConnectRestServer server; private CloseableHttpClient httpClient; - private Collection responses = new ArrayList<>(); + private final Collection responses = new ArrayList<>(); protected static final String KAFKA_CLUSTER_ID = "Xbafgnagvar"; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java index 28dd725afd455..4930c1a3ef221 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java @@ -21,6 +21,7 @@ import org.junit.Test; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -68,7 +69,7 @@ public void testListenersConfigAllowedValues() { props.put(RestServerConfig.LISTENERS_CONFIG, "http://a.b:9999"); config = RestServerConfig.forPublic(null, props); - assertEquals(Arrays.asList("http://a.b:9999"), config.listeners()); + assertEquals(Collections.singletonList("http://a.b:9999"), config.listeners()); props.put(RestServerConfig.LISTENERS_CONFIG, "http://a.b:9999, https://a.b:7812"); config = RestServerConfig.forPublic(null, props); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfoTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfoTest.java index 34e28ee47f6bb..97649ca599ee0 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfoTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfoTest.java @@ -19,8 +19,8 @@ import org.apache.kafka.connect.runtime.isolation.PluginDesc; import org.junit.Test; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; public class PluginInfoTest { @@ -29,9 +29,9 @@ public void testNoVersionFilter() { PluginInfo.NoVersionFilter filter = new PluginInfo.NoVersionFilter(); // We intentionally refrain from using assertEquals and assertNotEquals // here to ensure that the filter's equals() method is used - assertFalse(filter.equals("1.0")); - assertFalse(filter.equals(new Object())); - assertFalse(filter.equals(null)); - assertTrue(filter.equals(PluginDesc.UNDEFINED_VERSION)); + assertNotEquals("1.0", filter); + assertNotEquals(filter, new Object()); + assertNotEquals(null, filter); + assertEquals(PluginDesc.UNDEFINED_VERSION, filter); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java index 07d48a6e01f8a..4b3f6e673238b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java @@ -60,13 +60,13 @@ public class FileOffsetBackingStoreTest { private Converter converter; - private static Map firstSet = new HashMap<>(); + private static final Map FIRST_SET = new HashMap<>(); private static final Runnable EMPTY_RUNNABLE = () -> { }; static { - firstSet.put(buffer("key"), buffer("value")); - firstSet.put(null, null); + FIRST_SET.put(buffer("key"), buffer("value")); + FIRST_SET.put(null, null); } @Before @@ -96,7 +96,7 @@ public void testGetSet() throws Exception { @SuppressWarnings("unchecked") Callback setCallback = mock(Callback.class); - store.set(firstSet, setCallback).get(); + store.set(FIRST_SET, setCallback).get(); Map values = store.get(Arrays.asList(buffer("key"), buffer("bad"))).get(); assertEquals(buffer("value"), values.get(buffer("key"))); @@ -109,7 +109,7 @@ public void testSaveRestore() throws Exception { @SuppressWarnings("unchecked") Callback setCallback = mock(Callback.class); - store.set(firstSet, setCallback).get(); + store.set(FIRST_SET, setCallback).get(); store.stop(); // Restore into a new store to ensure correct reload from scratch diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java index 6c9057a35177c..6ebac341032a3 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java @@ -800,7 +800,7 @@ public void testRestoreZeroTasks() { // Should see a single connector and its config should be the last one seen anywhere in the log ClusterConfigState configState = configStorage.snapshot(); assertEquals(8, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(Arrays.asList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); // CONNECTOR_CONFIG_STRUCTS[2] -> SAMPLE_CONFIGS[2] assertEquals(SAMPLE_CONFIGS.get(2), configState.connectorConfig(CONNECTOR_IDS.get(0))); // Should see 0 tasks for that connector. @@ -1053,7 +1053,7 @@ public void testPutTaskConfigsDoesNotResolveAllInconsistencies() throws Exceptio // After reading the log, it should have been in an inconsistent state ClusterConfigState configState = configStorage.snapshot(); assertEquals(6, configState.offset()); // Should always be next to be read, not last committed - assertEquals(Arrays.asList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); // Inconsistent data should leave us with no tasks listed for the connector and an entry in the inconsistent list assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0))); // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] @@ -1086,8 +1086,8 @@ public void testPutTaskConfigsDoesNotResolveAllInconsistencies() throws Exceptio // This is only two more ahead of the last one because multiple calls fail, and so their configs are not written // to the topic. Only the last call with 1 task config + 1 commit actually gets written. assertEquals(8, configState.offset()); - assertEquals(Arrays.asList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); - assertEquals(Arrays.asList(TASK_IDS.get(0)), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(TASK_IDS.get(0)), configState.tasks(CONNECTOR_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java index fc2caf75d9b18..ae5f82cd3eeb2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java @@ -151,7 +151,7 @@ public class KafkaConfigBackingStoreTest { private Converter converter; @Mock private ConfigBackingStore.UpdateListener configUpdateListener; - private Map props = new HashMap<>(DEFAULT_CONFIG_STORAGE_PROPS); + private final Map props = new HashMap<>(DEFAULT_CONFIG_STORAGE_PROPS); private DistributedConfig config; @Mock KafkaBasedLog storeLog; @@ -328,7 +328,7 @@ public void testPutTaskConfigs() throws Exception { configState = configStorage.snapshot(); assertEquals(3, configState.offset()); String connectorName = CONNECTOR_IDS.get(0); - assertEquals(Arrays.asList(connectorName), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName)); assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1))); @@ -378,7 +378,7 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() throws Exception { "tasks", 1); // Starts with 2 tasks, after update has 3 // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - configUpdateListener.onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(2))); + configUpdateListener.onTaskConfigUpdate(Collections.singletonList(TASK_IDS.get(2))); EasyMock.expectLastCall(); // Records to be read by consumer as it reads to the end of the log @@ -473,7 +473,7 @@ public void testPutTaskConfigsZeroTasks() throws Exception { configState = configStorage.snapshot(); assertEquals(1, configState.offset()); String connectorName = CONNECTOR_IDS.get(0); - assertEquals(Arrays.asList(connectorName), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); assertEquals(Collections.emptyList(), configState.tasks(connectorName)); assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java index a0e4d569f403e..b8503ceb83945 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java @@ -110,7 +110,7 @@ public class KafkaOffsetBackingStoreTest { private static final ByteBuffer TP0_VALUE_NEW = buffer("VAL0_NEW"); private static final ByteBuffer TP1_VALUE_NEW = buffer("VAL1_NEW"); - private Map props = new HashMap<>(DEFAULT_PROPS); + private final Map props = new HashMap<>(DEFAULT_PROPS); @Mock KafkaBasedLog storeLog; @Mock diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConvertingFutureCallbackTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConvertingFutureCallbackTest.java index 7977a291df6de..b930cec34b573 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConvertingFutureCallbackTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConvertingFutureCallbackTest.java @@ -192,9 +192,9 @@ public void shouldNotCancelIfMayNotCancelWhileRunning() throws Exception { } protected static class TestConvertingFutureCallback extends ConvertingFutureCallback { - private AtomicInteger numberOfConversions = new AtomicInteger(); - private CountDownLatch getInvoked = new CountDownLatch(1); - private CountDownLatch cancelInvoked = new CountDownLatch(1); + private final AtomicInteger numberOfConversions = new AtomicInteger(); + private final CountDownLatch getInvoked = new CountDownLatch(1); + private final CountDownLatch cancelInvoked = new CountDownLatch(1); public int numberOfConversions() { return numberOfConversions.get(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TestFuture.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TestFuture.java index 0883040a33f21..9130d8badc9ba 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TestFuture.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TestFuture.java @@ -26,7 +26,7 @@ public class TestFuture implements Future { private volatile boolean resolved; private T result; private Throwable exception; - private CountDownLatch getCalledLatch; + private final CountDownLatch getCalledLatch; private volatile boolean resolveOnGet; private T resolveOnGetResult; diff --git a/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java b/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java index 058ab9522b283..af2c4ddfba466 100644 --- a/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java +++ b/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java @@ -71,6 +71,7 @@ import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -105,12 +106,12 @@ public KafkaPrincipal deserialize(byte[] bytes) throws SerializationException { UpdateMetadataBroker broker = new UpdateMetadataBroker() .setId(0) .setRack("rack") - .setEndpoints(Arrays.asList( - new UpdateMetadataRequestData.UpdateMetadataEndpoint() - .setHost("broker0") - .setPort(9092) - .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener(plaintextListener.value()) + .setEndpoints(Collections.singletonList( + new UpdateMetadataRequestData.UpdateMetadataEndpoint() + .setHost("broker0") + .setPort(9092) + .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + .setListener(plaintextListener.value()) )); @Test @@ -168,9 +169,9 @@ void testDescribeTopicPartitionsRequest() { .setPartitionId(1) .setReplicas(Arrays.asList(0, 1, 2)) .setLeader(0) - .setIsr(Arrays.asList(0)) - .setEligibleLeaderReplicas(Arrays.asList(1)) - .setLastKnownElr(Arrays.asList(2)) + .setIsr(Collections.singletonList(0)) + .setEligibleLeaderReplicas(Collections.singletonList(1)) + .setLastKnownElr(Collections.singletonList(2)) .setLeaderEpoch(0) .setPartitionEpoch(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), @@ -179,9 +180,9 @@ void testDescribeTopicPartitionsRequest() { .setPartitionId(0) .setReplicas(Arrays.asList(0, 1, 2)) .setLeader(0) - .setIsr(Arrays.asList(0)) - .setEligibleLeaderReplicas(Arrays.asList(1)) - .setLastKnownElr(Arrays.asList(2)) + .setIsr(Collections.singletonList(0)) + .setEligibleLeaderReplicas(Collections.singletonList(1)) + .setLastKnownElr(Collections.singletonList(2)) .setLeaderEpoch(0) .setPartitionEpoch(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), @@ -190,9 +191,9 @@ void testDescribeTopicPartitionsRequest() { .setPartitionId(0) .setReplicas(Arrays.asList(0, 1, 3)) .setLeader(0) - .setIsr(Arrays.asList(0)) - .setEligibleLeaderReplicas(Arrays.asList(1)) - .setLastKnownElr(Arrays.asList(3)) + .setIsr(Collections.singletonList(0)) + .setEligibleLeaderReplicas(Collections.singletonList(1)) + .setLastKnownElr(Collections.singletonList(3)) .setLeaderEpoch(0) .setPartitionEpoch(2) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()) @@ -371,9 +372,9 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() { .setPartitionId(0) .setReplicas(Arrays.asList(0, 1, 2)) .setLeader(0) - .setIsr(Arrays.asList(0)) - .setEligibleLeaderReplicas(Arrays.asList(1)) - .setLastKnownElr(Arrays.asList(2)) + .setIsr(Collections.singletonList(0)) + .setEligibleLeaderReplicas(Collections.singletonList(1)) + .setLastKnownElr(Collections.singletonList(2)) .setLeaderEpoch(0) .setPartitionEpoch(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), @@ -382,9 +383,9 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() { .setPartitionId(1) .setReplicas(Arrays.asList(0, 1, 2)) .setLeader(0) - .setIsr(Arrays.asList(0)) - .setEligibleLeaderReplicas(Arrays.asList(1)) - .setLastKnownElr(Arrays.asList(2)) + .setIsr(Collections.singletonList(0)) + .setEligibleLeaderReplicas(Collections.singletonList(1)) + .setLastKnownElr(Collections.singletonList(2)) .setLeaderEpoch(0) .setPartitionEpoch(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), @@ -393,9 +394,9 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() { .setPartitionId(0) .setReplicas(Arrays.asList(0, 1, 3)) .setLeader(0) - .setIsr(Arrays.asList(0)) - .setEligibleLeaderReplicas(Arrays.asList(1)) - .setLastKnownElr(Arrays.asList(3)) + .setIsr(Collections.singletonList(0)) + .setEligibleLeaderReplicas(Collections.singletonList(1)) + .setLastKnownElr(Collections.singletonList(3)) .setLeaderEpoch(0) .setPartitionEpoch(2) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()) diff --git a/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java b/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java index 8f170950365cc..297bee6380173 100644 --- a/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java +++ b/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java @@ -29,7 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Arrays; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -40,18 +40,18 @@ public class RuntimeLoggerManagerTest { @Test public void testValidateSetLogLevelConfig() { - MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). - setName(LOG.getName()). - setConfigOperation(OpType.SET.id()). - setValue("TRACE"))); + MANAGER.validateLogLevelConfigs(Collections.singletonList(new AlterableConfig(). + setName(LOG.getName()). + setConfigOperation(OpType.SET.id()). + setValue("TRACE"))); } @Test public void testValidateDeleteLogLevelConfig() { - MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). - setName(LOG.getName()). - setConfigOperation(OpType.DELETE.id()). - setValue(""))); + MANAGER.validateLogLevelConfigs(Collections.singletonList(new AlterableConfig(). + setName(LOG.getName()). + setConfigOperation(OpType.DELETE.id()). + setValue(""))); } @ParameterizedTest @@ -60,10 +60,10 @@ public void testOperationNotAllowed(byte id) { OpType opType = AlterConfigOp.OpType.forId(id); assertEquals(opType + " operation is not allowed for the BROKER_LOGGER resource", Assertions.assertThrows(InvalidRequestException.class, - () -> MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). - setName(LOG.getName()). - setConfigOperation(id). - setValue("TRACE")))).getMessage()); + () -> MANAGER.validateLogLevelConfigs(Collections.singletonList(new AlterableConfig(). + setName(LOG.getName()). + setConfigOperation(id). + setValue("TRACE")))).getMessage()); } @Test @@ -71,15 +71,15 @@ public void testValidateBogusLogLevelNameNotAllowed() { assertEquals("Cannot set the log level of " + LOG.getName() + " to BOGUS as it is not " + "a supported log level. Valid log levels are DEBUG, ERROR, FATAL, INFO, TRACE, WARN", Assertions.assertThrows(InvalidConfigurationException.class, - () -> MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). - setName(LOG.getName()). - setConfigOperation(OpType.SET.id()). - setValue("BOGUS")))).getMessage()); + () -> MANAGER.validateLogLevelConfigs(Collections.singletonList(new AlterableConfig(). + setName(LOG.getName()). + setConfigOperation(OpType.SET.id()). + setValue("BOGUS")))).getMessage()); } @Test public void testValidateSetRootLogLevelConfig() { - MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). + MANAGER.validateLogLevelConfigs(Collections.singletonList(new AlterableConfig(). setName(Log4jController.ROOT_LOGGER()). setConfigOperation(OpType.SET.id()). setValue("TRACE"))); @@ -90,9 +90,9 @@ public void testValidateRemoveRootLogLevelConfigNotAllowed() { assertEquals("Removing the log level of the " + Log4jController.ROOT_LOGGER() + " logger is not allowed", Assertions.assertThrows(InvalidRequestException.class, - () -> MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). - setName(Log4jController.ROOT_LOGGER()). - setConfigOperation(OpType.DELETE.id()). - setValue("")))).getMessage()); + () -> MANAGER.validateLogLevelConfigs(Collections.singletonList(new AlterableConfig(). + setName(Log4jController.ROOT_LOGGER()). + setConfigOperation(OpType.DELETE.id()). + setValue("")))).getMessage()); } } diff --git a/core/src/test/java/kafka/test/server/BootstrapControllersIntegrationTest.java b/core/src/test/java/kafka/test/server/BootstrapControllersIntegrationTest.java index 5182293f4b7fd..e267e92a7be70 100644 --- a/core/src/test/java/kafka/test/server/BootstrapControllersIntegrationTest.java +++ b/core/src/test/java/kafka/test/server/BootstrapControllersIntegrationTest.java @@ -50,7 +50,6 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -220,15 +219,15 @@ public void testIncrementalAlterConfigs(boolean usingBootstrapControllers) throw ConfigResource nodeResource = new ConfigResource(BROKER, "" + nodeId); ConfigResource defaultResource = new ConfigResource(BROKER, ""); Map> alterations = new HashMap<>(); - alterations.put(nodeResource, Arrays.asList( - new AlterConfigOp(new ConfigEntry("my.custom.config", "foo"), - AlterConfigOp.OpType.SET))); - alterations.put(defaultResource, Arrays.asList( - new AlterConfigOp(new ConfigEntry("my.custom.config", "bar"), - AlterConfigOp.OpType.SET))); + alterations.put(nodeResource, Collections.singletonList( + new AlterConfigOp(new ConfigEntry("my.custom.config", "foo"), + AlterConfigOp.OpType.SET))); + alterations.put(defaultResource, Collections.singletonList( + new AlterConfigOp(new ConfigEntry("my.custom.config", "bar"), + AlterConfigOp.OpType.SET))); admin.incrementalAlterConfigs(alterations).all().get(1, TimeUnit.MINUTES); TestUtils.retryOnExceptionWithTimeout(30_000, () -> { - Config config = admin.describeConfigs(Arrays.asList(nodeResource)). + Config config = admin.describeConfigs(Collections.singletonList(nodeResource)). all().get(1, TimeUnit.MINUTES).get(nodeResource); ConfigEntry entry = config.entries().stream(). filter(e -> e.name().equals("my.custom.config")). diff --git a/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java b/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java index 4d34ce040142b..c7c83bda8d982 100644 --- a/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java +++ b/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java @@ -147,9 +147,9 @@ public FaultHandler build(String name, boolean fatal, Runnable action) { } public static class Builder { - private TestKitNodes nodes; - private Map configProps = new HashMap<>(); - private SimpleFaultHandlerFactory faultHandlerFactory = new SimpleFaultHandlerFactory(); + private final TestKitNodes nodes; + private final Map configProps = new HashMap<>(); + private final SimpleFaultHandlerFactory faultHandlerFactory = new SimpleFaultHandlerFactory(); public Builder(TestKitNodes nodes) { this.nodes = nodes; @@ -473,7 +473,7 @@ public String quorumVotersConfig() throws ExecutionException, InterruptedExcepti } public class ClientPropertiesBuilder { - private Properties properties; + private final Properties properties; private boolean usingBootstrapControllers = false; public ClientPropertiesBuilder() { diff --git a/examples/src/main/java/kafka/examples/Utils.java b/examples/src/main/java/kafka/examples/Utils.java index 8846879f67d5f..d0b7734becfba 100644 --- a/examples/src/main/java/kafka/examples/Utils.java +++ b/examples/src/main/java/kafka/examples/Utils.java @@ -39,7 +39,7 @@ private Utils() { } public static void printHelp(String message, Object... args) { - System.out.println(format(message, args)); + System.out.printf(message + "%n", args); } public static void printOut(String message, Object... args) { diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java index 058cdf206db60..a7b65c6812e8d 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java @@ -9695,9 +9695,9 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( Arrays.asList(fooTopicName, barTopicName), null, - Arrays.asList( - new TopicPartition(barTopicName, 0) - ) + Collections.singletonList( + new TopicPartition(barTopicName, 0) + ) )))) ); @@ -9712,8 +9712,8 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw ); put( memberId2, - Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList( - new TopicPartition(barTopicName, 0) + Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Collections.singletonList( + new TopicPartition(barTopicName, 0) )))) ); } @@ -9942,9 +9942,9 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( Arrays.asList(fooTopicName, barTopicName), null, - Arrays.asList( - new TopicPartition(barTopicName, 0) - ) + Collections.singletonList( + new TopicPartition(barTopicName, 0) + ) )))) ); @@ -9959,8 +9959,8 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro ); put( memberId2, - Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList( - new TopicPartition(barTopicName, 0) + Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Collections.singletonList( + new TopicPartition(barTopicName, 0) )))) ); } @@ -10754,7 +10754,7 @@ public void testLastConsumerProtocolMemberRebalanceTimeoutInConsumerGroup() { .setPartitions(Arrays.asList(3, 4, 5)), new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Arrays.asList(2)) + .setPartitions(Collections.singletonList(2)) )) ); @@ -12261,9 +12261,9 @@ public void testClassicGroupSyncToConsumerGroupWithInconsistentGroupProtocol() t .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Arrays.asList("foo"), - null, - Collections.emptyList() + Collections.singletonList("foo"), + null, + Collections.emptyList() ) ))) ); @@ -12326,9 +12326,9 @@ public void testClassicGroupSyncToConsumerGroupWithIllegalGeneration() throws Ex .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Arrays.asList("foo"), - null, - Collections.emptyList() + Collections.singletonList("foo"), + null, + Collections.emptyList() ) ))) ); @@ -12369,9 +12369,9 @@ public void testClassicGroupSyncToConsumerGroupRebalanceInProgress() throws Exce .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Arrays.asList("foo"), - null, - Collections.emptyList() + Collections.singletonList("foo"), + null, + Collections.emptyList() ) ))) ); diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java index 995f1ee74a50b..2f6aacccebabc 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java @@ -27,7 +27,7 @@ import java.util.Arrays; public class MetadataImageBuilder { - private MetadataDelta delta = new MetadataDelta(MetadataImage.EMPTY); + private final MetadataDelta delta = new MetadataDelta(MetadataImage.EMPTY); public MetadataImageBuilder addTopic( Uuid topicId, diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java index e2684a7cab0ae..3c1dbbf1e0850 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java @@ -101,7 +101,7 @@ public static class Builder { private GroupMetadataManager groupMetadataManager = null; private MetadataImage metadataImage = null; private GroupCoordinatorConfig config = null; - private GroupCoordinatorMetricsShard metrics = mock(GroupCoordinatorMetricsShard.class); + private final GroupCoordinatorMetricsShard metrics = mock(GroupCoordinatorMetricsShard.class); Builder withOffsetMetadataMaxSize(int offsetMetadataMaxSize) { config = GroupCoordinatorConfigTest.createGroupCoordinatorConfig(offsetMetadataMaxSize, 60000L, 24 * 60 * 1000); @@ -2039,30 +2039,30 @@ public void testFetchAllOffsetsAtDifferentCommittedOffset() { assertEquals(Collections.emptyList(), context.fetchAllOffsets("group", 0L)); // Fetching with 1 should return data up to offset 1. - assertEquals(Arrays.asList( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(Arrays.asList( - mkOffsetPartitionResponse(0, 100L, 1, "metadata") - )) + assertEquals(Collections.singletonList( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("foo") + .setPartitions(Collections.singletonList( + mkOffsetPartitionResponse(0, 100L, 1, "metadata") + )) ), context.fetchAllOffsets("group", 1L)); // Fetching with 2 should return data up to offset 2. - assertEquals(Arrays.asList( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(Arrays.asList( - mkOffsetPartitionResponse(0, 100L, 1, "metadata"), - mkOffsetPartitionResponse(1, 110L, 1, "metadata") - )) + assertEquals(Collections.singletonList( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("foo") + .setPartitions(Arrays.asList( + mkOffsetPartitionResponse(0, 100L, 1, "metadata"), + mkOffsetPartitionResponse(1, 110L, 1, "metadata") + )) ), context.fetchAllOffsets("group", 2L)); // Fetching with 3 should return data up to offset 3. assertEquals(Arrays.asList( new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("bar") - .setPartitions(Arrays.asList( - mkOffsetPartitionResponse(0, 200L, 1, "metadata") + .setPartitions(Collections.singletonList( + mkOffsetPartitionResponse(0, 200L, 1, "metadata") )), new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("foo") @@ -2076,8 +2076,8 @@ public void testFetchAllOffsetsAtDifferentCommittedOffset() { assertEquals(Arrays.asList( new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("bar") - .setPartitions(Arrays.asList( - mkOffsetPartitionResponse(0, 200L, 1, "metadata") + .setPartitions(Collections.singletonList( + mkOffsetPartitionResponse(0, 200L, 1, "metadata") )), new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("foo") @@ -2130,8 +2130,8 @@ public void testFetchAllOffsetsWithPendingTransactionalOffsets() { assertEquals(Arrays.asList( new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("bar") - .setPartitions(Arrays.asList( - mkOffsetPartitionResponse(0, Errors.UNSTABLE_OFFSET_COMMIT) + .setPartitions(Collections.singletonList( + mkOffsetPartitionResponse(0, Errors.UNSTABLE_OFFSET_COMMIT) )), new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("foo") @@ -2146,8 +2146,8 @@ public void testFetchAllOffsetsWithPendingTransactionalOffsets() { assertEquals(Arrays.asList( new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("bar") - .setPartitions(Arrays.asList( - mkOffsetPartitionResponse(0, 200L, 1, "metadata") + .setPartitions(Collections.singletonList( + mkOffsetPartitionResponse(0, 200L, 1, "metadata") )), new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("foo") diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/ConsumerGroupMemberTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/ConsumerGroupMemberTest.java index 1c53445babe57..900eef3344470 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/ConsumerGroupMemberTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/consumer/ConsumerGroupMemberTest.java @@ -175,7 +175,7 @@ public void testUpdateMember() { .maybeUpdateRackId(Optional.of("new-rack-id")) .maybeUpdateInstanceId(Optional.of("new-instance-id")) .maybeUpdateServerAssignorName(Optional.of("new-assignor")) - .maybeUpdateSubscribedTopicNames(Optional.of(Arrays.asList("zar"))) + .maybeUpdateSubscribedTopicNames(Optional.of(Collections.singletonList("zar"))) .maybeUpdateSubscribedTopicRegex(Optional.of("new-regex")) .maybeUpdateRebalanceTimeoutMs(OptionalInt.of(6000)) .build(); @@ -183,7 +183,7 @@ public void testUpdateMember() { assertEquals("new-instance-id", updatedMember.instanceId()); assertEquals("new-rack-id", updatedMember.rackId()); // Names are sorted. - assertEquals(Arrays.asList("zar"), updatedMember.subscribedTopicNames()); + assertEquals(Collections.singletonList("zar"), updatedMember.subscribedTopicNames()); assertEquals("new-regex", updatedMember.subscribedTopicRegex()); assertEquals("new-assignor", updatedMember.serverAssignorName().get()); } @@ -356,11 +356,11 @@ public void testAsConsumerGroupDescribeWithTopicNameNotFound() { @Test public void testClassicProtocolListFromJoinRequestProtocolCollection() { JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestData.JoinGroupRequestProtocolCollection(); - protocols.addAll(Arrays.asList( - new JoinGroupRequestData.JoinGroupRequestProtocol() - .setName("range") - .setMetadata(new byte[]{1, 2, 3}) - )); + protocols.add( + new JoinGroupRequestData.JoinGroupRequestProtocol() + .setName("range") + .setMetadata(new byte[]{1, 2, 3}) + ); assertEquals( toClassicProtocolCollection("range"), diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java index 1be63c58ccfb0..a9cfb07a9a6c3 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java @@ -145,7 +145,7 @@ public void close() throws Exception {} * when poll() is called. */ private static class ManualEventProcessor implements CoordinatorEventProcessor { - private Deque queue = new LinkedList<>(); + private final Deque queue = new LinkedList<>(); @Override public void enqueueLast(CoordinatorEvent event) throws RejectedExecutionException { @@ -984,13 +984,13 @@ public void testScheduleWriteOp() throws ExecutionException, InterruptedExceptio // Records have been replayed to the coordinator. assertEquals(mkSet("record1", "record2"), ctx.coordinator.coordinator().records()); // Records have been written to the log. - assertEquals(Arrays.asList( - records(timer.time().milliseconds(), "record1", "record2") + assertEquals(Collections.singletonList( + records(timer.time().milliseconds(), "record1", "record2") ), writer.entries(TP)); // Write #2. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record3"), "response2")); + state -> new CoordinatorResult<>(Collections.singletonList("record3"), "response2")); // Verify that the write is not committed yet. assertFalse(write2.isDone()); @@ -1538,8 +1538,8 @@ public void testScheduleTransactionCompletion(TransactionResult result) throws E 100L )); // Records have been written to the log. - assertEquals(Arrays.asList( - transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2") + assertEquals(Collections.singletonList( + transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2") ), writer.entries(TP)); // Complete transaction #1. @@ -1783,8 +1783,8 @@ public void replayEndTransactionMarker( assertEquals(Arrays.asList(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(mkSet("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); - assertEquals(Arrays.asList( - transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2") + assertEquals(Collections.singletonList( + transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2") ), writer.entries(TP)); // Complete transaction #1. It should fail. @@ -1805,8 +1805,8 @@ public void replayEndTransactionMarker( assertEquals(Arrays.asList(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(mkSet("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); - assertEquals(Arrays.asList( - transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2") + assertEquals(Collections.singletonList( + transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2") ), writer.entries(TP)); } diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/InMemoryPartitionWriter.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/InMemoryPartitionWriter.java index adcf0fbe13706..cff65269c26db 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/InMemoryPartitionWriter.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/InMemoryPartitionWriter.java @@ -38,9 +38,9 @@ public class InMemoryPartitionWriter implements PartitionWriter { private class PartitionState { - private ReentrantLock lock = new ReentrantLock(); - private List listeners = new ArrayList<>(); - private List entries = new ArrayList<>(); + private final ReentrantLock lock = new ReentrantLock(); + private final List listeners = new ArrayList<>(); + private final List entries = new ArrayList<>(); private long endOffset = 0L; private long committedOffset = 0L; } diff --git a/log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java b/log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java index eb64a85bd8869..18ac97901cb0a 100644 --- a/log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java +++ b/log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java @@ -45,7 +45,7 @@ public class KafkaLog4jAppenderTest { - private Logger logger = Logger.getLogger(KafkaLog4jAppenderTest.class); + private final Logger logger = Logger.getLogger(KafkaLog4jAppenderTest.class); @BeforeEach public void setup() { diff --git a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java index f0bd98776bc38..6263094f2b7ff 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java @@ -257,7 +257,7 @@ boolean check() { */ private final boolean zkMigrationEnabled; - private BrokerUncleanShutdownHandler brokerUncleanShutdownHandler; + private final BrokerUncleanShutdownHandler brokerUncleanShutdownHandler; /** * Maps controller IDs to controller registrations. diff --git a/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java b/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java index 0d2c1bd6a9d58..86d7a0f1494f6 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java +++ b/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java @@ -58,8 +58,7 @@ public static boolean changeRecordIsNoOp(PartitionChangeRecord record) { if (record.removingReplicas() != null) return false; if (record.addingReplicas() != null) return false; if (record.leaderRecoveryState() != LeaderRecoveryState.NO_CHANGE) return false; - if (record.directories() != null) return false; - return true; + return record.directories() == null; } /** @@ -515,7 +514,7 @@ private void maybeUpdateLastKnownLeader(PartitionChangeRecord record) { if (record.isr() != null && record.isr().isEmpty() && (partition.lastKnownElr.length != 1 || partition.lastKnownElr[0] != partition.leader)) { // Only update the last known leader when the first time the partition becomes leaderless. - record.setLastKnownElr(Arrays.asList(partition.leader)); + record.setLastKnownElr(Collections.singletonList(partition.leader)); } else if ((record.leader() >= 0 || (partition.leader != NO_LEADER && record.leader() != NO_LEADER)) && partition.lastKnownElr.length > 0) { // Clear the LastKnownElr field if the partition will have or continues to have a valid leader. diff --git a/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java b/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java index 9d186d83d3ff4..cb2db1858b48c 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java +++ b/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java @@ -130,7 +130,6 @@ import org.slf4j.Logger; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; @@ -1405,7 +1404,7 @@ private void maybeScheduleNextWriteNoOpRecord() { maybeScheduleNextWriteNoOpRecord(); return ControllerResult.of( - Arrays.asList(new ApiMessageAndVersion(new NoOpRecord(), (short) 0)), + Collections.singletonList(new ApiMessageAndVersion(new NoOpRecord(), (short) 0)), null ); }, diff --git a/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java index 9b412ad105da5..c34293f35b38d 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java @@ -1904,7 +1904,7 @@ void generateLeaderAndIsrUpdates(String context, builder.setElection(PartitionChangeBuilder.Election.UNCLEAN); } if (brokerWithUncleanShutdown != NO_LEADER) { - builder.setUncleanShutdownReplicas(Arrays.asList(brokerWithUncleanShutdown)); + builder.setUncleanShutdownReplicas(Collections.singletonList(brokerWithUncleanShutdown)); } // Note: if brokerToRemove and brokerWithUncleanShutdown were passed as NO_LEADER, this is a no-op (the new @@ -2077,7 +2077,7 @@ Optional changePartitionReassignment(TopicIdPartition tp, tp.partitionId(), new LeaderAcceptor(clusterControl, part), featureControl.metadataVersion(), - getTopicEffectiveMinIsr(topics.get(tp.topicId()).name.toString()) + getTopicEffectiveMinIsr(topics.get(tp.topicId()).name) ); builder.setZkMigrationEnabled(clusterControl.zkRegistrationAllowed()); builder.setEligibleLeaderReplicasEnabled(isElrEnabled()); diff --git a/metadata/src/main/java/org/apache/kafka/controller/errors/ControllerExceptions.java b/metadata/src/main/java/org/apache/kafka/controller/errors/ControllerExceptions.java index b7e74446a4b6b..3c7427493833e 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/errors/ControllerExceptions.java +++ b/metadata/src/main/java/org/apache/kafka/controller/errors/ControllerExceptions.java @@ -37,8 +37,7 @@ public static boolean isTimeoutException(Throwable exception) { exception = exception.getCause(); if (exception == null) return false; } - if (!(exception instanceof TimeoutException)) return false; - return true; + return exception instanceof TimeoutException; } /** @@ -53,8 +52,7 @@ public static boolean isNotControllerException(Throwable exception) { exception = exception.getCause(); if (exception == null) return false; } - if (!(exception instanceof NotControllerException)) return false; - return true; + return exception instanceof NotControllerException; } /** diff --git a/metadata/src/main/java/org/apache/kafka/controller/errors/EventHandlerExceptionInfo.java b/metadata/src/main/java/org/apache/kafka/controller/errors/EventHandlerExceptionInfo.java index 4c95a553b109c..09848d0c2eef4 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/errors/EventHandlerExceptionInfo.java +++ b/metadata/src/main/java/org/apache/kafka/controller/errors/EventHandlerExceptionInfo.java @@ -116,8 +116,7 @@ static boolean exceptionClassesAndMessagesMatch(Throwable a, Throwable b) { if (a == null) return b == null; if (b == null) return false; if (!a.getClass().equals(b.getClass())) return false; - if (!Objects.equals(a.getMessage(), b.getMessage())) return false; - return true; + return Objects.equals(a.getMessage(), b.getMessage()); } EventHandlerExceptionInfo( diff --git a/metadata/src/main/java/org/apache/kafka/image/AclsDelta.java b/metadata/src/main/java/org/apache/kafka/image/AclsDelta.java index 3a38d52aca3ab..7d61439500bc7 100644 --- a/metadata/src/main/java/org/apache/kafka/image/AclsDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/AclsDelta.java @@ -113,7 +113,7 @@ public AclsImage apply() { public String toString() { return "AclsDelta(" + ", changes=" + changes.entrySet().stream(). - map(e -> "" + e.getKey() + "=" + e.getValue()). + map(e -> e.getKey() + "=" + e.getValue()). collect(Collectors.joining(", ")) + ")"; } } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java b/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java index bb9152022920e..896ba5b4e6b84 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java @@ -366,26 +366,24 @@ public boolean equals(Object o) { @Override public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("BrokerRegistration(id=").append(id); - bld.append(", epoch=").append(epoch); - bld.append(", incarnationId=").append(incarnationId); - bld.append(", listeners=[").append( - listeners.keySet().stream().sorted(). - map(n -> listeners.get(n).toString()). - collect(Collectors.joining(", "))); - bld.append("], supportedFeatures={").append( - supportedFeatures.keySet().stream().sorted(). - map(k -> k + ": " + supportedFeatures.get(k)). - collect(Collectors.joining(", "))); - bld.append("}"); - bld.append(", rack=").append(rack); - bld.append(", fenced=").append(fenced); - bld.append(", inControlledShutdown=").append(inControlledShutdown); - bld.append(", isMigratingZkBroker=").append(isMigratingZkBroker); - bld.append(", directories=").append(directories); - bld.append(")"); - return bld.toString(); + return "BrokerRegistration(id=" + id + + ", epoch=" + epoch + + ", incarnationId=" + incarnationId + + ", listeners=[" + + listeners.keySet().stream().sorted(). + map(n -> listeners.get(n).toString()). + collect(Collectors.joining(", ")) + + "], supportedFeatures={" + + supportedFeatures.keySet().stream().sorted(). + map(k -> k + ": " + supportedFeatures.get(k)). + collect(Collectors.joining(", ")) + + "}" + + ", rack=" + rack + + ", fenced=" + fenced + + ", inControlledShutdown=" + inControlledShutdown + + ", isMigratingZkBroker=" + isMigratingZkBroker + + ", directories=" + directories + + ")"; } public BrokerRegistration cloneWith( diff --git a/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java b/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java index c26880bfd15bb..a6b3d13bea267 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java @@ -214,20 +214,18 @@ public boolean equals(Object o) { @Override public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("ControllerRegistration(id=").append(id); - bld.append(", incarnationId=").append(incarnationId); - bld.append(", zkMigrationReady=").append(zkMigrationReady); - bld.append(", listeners=[").append( - listeners.keySet().stream().sorted(). - map(n -> listeners.get(n).toString()). - collect(Collectors.joining(", "))); - bld.append("], supportedFeatures={").append( - supportedFeatures.keySet().stream().sorted(). - map(k -> k + ": " + supportedFeatures.get(k)). - collect(Collectors.joining(", "))); - bld.append("}"); - bld.append(")"); - return bld.toString(); + return "ControllerRegistration(id=" + id + + ", incarnationId=" + incarnationId + + ", zkMigrationReady=" + zkMigrationReady + + ", listeners=[" + + listeners.keySet().stream().sorted(). + map(n -> listeners.get(n).toString()). + collect(Collectors.joining(", ")) + + "], supportedFeatures={" + + supportedFeatures.keySet().stream().sorted(). + map(k -> k + ": " + supportedFeatures.get(k)). + collect(Collectors.joining(", ")) + + "}" + + ")"; } } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java b/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java index 05ef45d1e9667..88bb688f6b61f 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java @@ -66,11 +66,9 @@ public boolean equals(Object o) { @Override public String toString() { - StringBuilder bld = new StringBuilder(); - bld.append("{"); - bld.append("featureMap=").append(featureMap.toString()); - bld.append(", epoch=").append(epoch); - bld.append("}"); - return bld.toString(); + return "{" + + "featureMap=" + featureMap.toString() + + ", epoch=" + epoch + + "}"; } } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java b/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java index 72476cf206cda..3a6aa51e7e266 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java @@ -439,20 +439,18 @@ public boolean equals(Object o) { @Override public String toString() { - StringBuilder builder = new StringBuilder("PartitionRegistration("); - builder.append("replicas=").append(Arrays.toString(replicas)); - builder.append(", directories=").append(Arrays.toString(directories)); - builder.append(", isr=").append(Arrays.toString(isr)); - builder.append(", removingReplicas=").append(Arrays.toString(removingReplicas)); - builder.append(", addingReplicas=").append(Arrays.toString(addingReplicas)); - builder.append(", elr=").append(Arrays.toString(elr)); - builder.append(", lastKnownElr=").append(Arrays.toString(lastKnownElr)); - builder.append(", leader=").append(leader); - builder.append(", leaderRecoveryState=").append(leaderRecoveryState); - builder.append(", leaderEpoch=").append(leaderEpoch); - builder.append(", partitionEpoch=").append(partitionEpoch); - builder.append(")"); - return builder.toString(); + return "PartitionRegistration(" + "replicas=" + Arrays.toString(replicas) + + ", directories=" + Arrays.toString(directories) + + ", isr=" + Arrays.toString(isr) + + ", removingReplicas=" + Arrays.toString(removingReplicas) + + ", addingReplicas=" + Arrays.toString(addingReplicas) + + ", elr=" + Arrays.toString(elr) + + ", lastKnownElr=" + Arrays.toString(lastKnownElr) + + ", leader=" + leader + + ", leaderRecoveryState=" + leaderRecoveryState + + ", leaderEpoch=" + leaderEpoch + + ", partitionEpoch=" + partitionEpoch + + ")"; } public boolean hasSameAssignment(PartitionRegistration registration) { diff --git a/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java index dd6c2d1518524..f9e9bd54f3367 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java @@ -318,16 +318,16 @@ public void testDeleteDedupe() { AclBinding aclBinding = new AclBinding(new ResourcePattern(TOPIC, "topic-1", LITERAL), new AccessControlEntry("User:user", "10.0.0.1", AclOperation.ALL, ALLOW)); - ControllerResult> createResult = manager.createAcls(Arrays.asList(aclBinding)); + ControllerResult> createResult = manager.createAcls(Collections.singletonList(aclBinding)); Uuid id = ((AccessControlEntryRecord) createResult.records().get(0).message()).id(); assertEquals(1, createResult.records().size()); - ControllerResult> deleteAclResultsAnyFilter = manager.deleteAcls(Arrays.asList(AclBindingFilter.ANY)); + ControllerResult> deleteAclResultsAnyFilter = manager.deleteAcls(Collections.singletonList(AclBindingFilter.ANY)); assertEquals(1, deleteAclResultsAnyFilter.records().size()); assertEquals(id, ((RemoveAccessControlEntryRecord) deleteAclResultsAnyFilter.records().get(0).message()).id()); assertEquals(1, deleteAclResultsAnyFilter.response().size()); - ControllerResult> deleteAclResultsSpecificFilter = manager.deleteAcls(Arrays.asList(aclBinding.toFilter())); + ControllerResult> deleteAclResultsSpecificFilter = manager.deleteAcls(Collections.singletonList(aclBinding.toFilter())); assertEquals(1, deleteAclResultsSpecificFilter.records().size()); assertEquals(id, ((RemoveAccessControlEntryRecord) deleteAclResultsSpecificFilter.records().get(0).message()).id()); assertEquals(1, deleteAclResultsSpecificFilter.response().size()); diff --git a/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java index a9d4cbfc6f60f..e647cd597d9d6 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ClientQuotaControlManagerTest.java @@ -228,20 +228,20 @@ public void testEntityTypes() throws Exception { new EntityData().setEntityType("user").setEntityName("user-3"), new EntityData().setEntityType("client-id").setEntityName(null))). setKey("request_percentage").setValue(55.55).setRemove(false), (short) 0), - new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Arrays.asList( - new EntityData().setEntityType("user").setEntityName("user-1"))). + new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( + new EntityData().setEntityType("user").setEntityName("user-1"))). setKey("request_percentage").setValue(56.56).setRemove(false), (short) 0), - new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Arrays.asList( - new EntityData().setEntityType("user").setEntityName("user-2"))). + new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( + new EntityData().setEntityType("user").setEntityName("user-2"))). setKey("request_percentage").setValue(57.57).setRemove(false), (short) 0), - new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Arrays.asList( - new EntityData().setEntityType("user").setEntityName("user-3"))). + new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( + new EntityData().setEntityType("user").setEntityName("user-3"))). setKey("request_percentage").setValue(58.58).setRemove(false), (short) 0), - new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Arrays.asList( - new EntityData().setEntityType("user").setEntityName(null))). + new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( + new EntityData().setEntityType("user").setEntityName(null))). setKey("request_percentage").setValue(59.59).setRemove(false), (short) 0), - new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Arrays.asList( - new EntityData().setEntityType("client-id").setEntityName("client-id-2"))). + new ApiMessageAndVersion(new ClientQuotaRecord().setEntity(Collections.singletonList( + new EntityData().setEntityType("client-id").setEntityName("client-id-2"))). setKey("request_percentage").setValue(60.60).setRemove(false), (short) 0)); records = new ArrayList<>(records); RecordTestUtils.deepSortRecords(records); @@ -323,7 +323,7 @@ public void testIsValidIpEntityWithLocalhost() { @Test public void testConfigKeysForEntityTypeWithUser() { - testConfigKeysForEntityType(Arrays.asList(ClientQuotaEntity.USER), + testConfigKeysForEntityType(Collections.singletonList(ClientQuotaEntity.USER), Arrays.asList( "producer_byte_rate", "consumer_byte_rate", @@ -334,7 +334,7 @@ public void testConfigKeysForEntityTypeWithUser() { @Test public void testConfigKeysForEntityTypeWithClientId() { - testConfigKeysForEntityType(Arrays.asList(ClientQuotaEntity.CLIENT_ID), + testConfigKeysForEntityType(Collections.singletonList(ClientQuotaEntity.CLIENT_ID), Arrays.asList( "producer_byte_rate", "consumer_byte_rate", @@ -356,10 +356,10 @@ public void testConfigKeysForEntityTypeWithUserAndClientId() { @Test public void testConfigKeysForEntityTypeWithIp() { - testConfigKeysForEntityType(Arrays.asList(ClientQuotaEntity.IP), - Arrays.asList( - "connection_creation_rate" - )); + testConfigKeysForEntityType(Collections.singletonList(ClientQuotaEntity.IP), + Collections.singletonList( + "connection_creation_rate" + )); } private static Map keysToEntity(List entityKeys) { @@ -386,7 +386,7 @@ private static void testConfigKeysForEntityType( @Test public void testConfigKeysForEmptyEntity() { - testConfigKeysError(Arrays.asList(), + testConfigKeysError(Collections.emptyList(), new ApiError(Errors.INVALID_REQUEST, "Invalid empty client quota entity")); } @@ -427,7 +427,7 @@ private static void testConfigKeysError( static { VALID_CLIENT_ID_QUOTA_KEYS = new HashMap<>(); assertEquals(ApiError.NONE, ClientQuotaControlManager.configKeysForEntityType( - keysToEntity(Arrays.asList(ClientQuotaEntity.CLIENT_ID)), VALID_CLIENT_ID_QUOTA_KEYS)); + keysToEntity(Collections.singletonList(ClientQuotaEntity.CLIENT_ID)), VALID_CLIENT_ID_QUOTA_KEYS)); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java index 20c2b7c690997..e2bfad53bc101 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java @@ -327,19 +327,19 @@ public void testRegisterBrokerRecordVersion(MetadataVersion metadataVersion) { short expectedVersion = metadataVersion.registerBrokerRecordVersion(); assertEquals( - asList(new ApiMessageAndVersion(new RegisterBrokerRecord(). - setBrokerEpoch(123L). - setBrokerId(0). - setRack(null). - setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")). - setFenced(true). - setLogDirs(logDirs). - setFeatures(new RegisterBrokerRecord.BrokerFeatureCollection(asList( - new RegisterBrokerRecord.BrokerFeature(). - setName(MetadataVersion.FEATURE_NAME). - setMinSupportedVersion((short) 1). - setMaxSupportedVersion((short) 1)).iterator())). - setInControlledShutdown(false), expectedVersion)), + Collections.singletonList(new ApiMessageAndVersion(new RegisterBrokerRecord(). + setBrokerEpoch(123L). + setBrokerId(0). + setRack(null). + setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")). + setFenced(true). + setLogDirs(logDirs). + setFeatures(new RegisterBrokerRecord.BrokerFeatureCollection(Collections.singletonList( + new RegisterBrokerRecord.BrokerFeature(). + setName(MetadataVersion.FEATURE_NAME). + setMinSupportedVersion((short) 1). + setMaxSupportedVersion((short) 1)).iterator())). + setInControlledShutdown(false), expectedVersion)), result.records()); } @@ -673,7 +673,7 @@ public void testDefaultDir() { RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(1).setLogDirs(Collections.emptyList()); brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("127.0.0.1")); clusterControl.replay(brokerRecord, 100L); - registerNewBrokerWithDirs(clusterControl, 2, asList(Uuid.fromString("singleOnlineDirectoryA"))); + registerNewBrokerWithDirs(clusterControl, 2, Collections.singletonList(Uuid.fromString("singleOnlineDirectoryA"))); registerNewBrokerWithDirs(clusterControl, 3, asList(Uuid.fromString("s4fRmyNFSH6J0vI8AVA5ew"), Uuid.fromString("UbtxBcqYSnKUEMcnTyZFWw"))); assertEquals(DirectoryId.MIGRATING, clusterControl.defaultDir(1)); assertEquals(Uuid.fromString("singleOnlineDirectoryA"), clusterControl.defaultDir(2)); diff --git a/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java index 5e76114d2e975..b24848147878d 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java @@ -33,7 +33,6 @@ import org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata; import java.util.AbstractMap.SimpleImmutableEntry; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -57,7 +56,7 @@ import static org.apache.kafka.common.metadata.MetadataRecordType.CONFIG_RECORD; import static org.apache.kafka.server.config.ConfigSynonym.HOURS_TO_MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNull; @Timeout(value = 40) @@ -80,9 +79,9 @@ public class ConfigurationControlManagerTest { public static final Map> SYNONYMS = new HashMap<>(); static { - SYNONYMS.put("abc", Arrays.asList(new ConfigSynonym("foo.bar"))); - SYNONYMS.put("def", Arrays.asList(new ConfigSynonym("baz"))); - SYNONYMS.put("quuux", Arrays.asList(new ConfigSynonym("quux", HOURS_TO_MILLISECONDS))); + SYNONYMS.put("abc", Collections.singletonList(new ConfigSynonym("foo.bar"))); + SYNONYMS.put("def", Collections.singletonList(new ConfigSynonym("baz"))); + SYNONYMS.put("quuux", Collections.singletonList(new ConfigSynonym("quux", HOURS_TO_MILLISECONDS))); } static final KafkaConfigSchema SCHEMA = new KafkaConfigSchema(CONFIGS, SYNONYMS); @@ -138,7 +137,7 @@ public void testReplay() throws Exception { assertEquals(toMap(entry("abc", "x,y,z"), entry("def", "blah")), manager.getConfigs(MYTOPIC)); assertEquals("x,y,z", manager.getTopicConfig(MYTOPIC.name(), "abc")); - assertTrue(manager.getTopicConfig(MYTOPIC.name(), "none-exists") == null); + assertNull(manager.getTopicConfig(MYTOPIC.name(), "none-exists")); } @Test @@ -382,14 +381,14 @@ expectedRecords1, toMap(entry(MYTOPIC, ApiError.NONE))), for (ApiMessageAndVersion message : expectedRecords1) { manager.replay((ConfigRecord) message.message()); } - assertEquals(ControllerResult.atomicOf(asList( - new ApiMessageAndVersion( - new ConfigRecord() - .setResourceType(TOPIC.id()) - .setResourceName("mytopic") - .setName("abc") - .setValue(null), - CONFIG_RECORD.highestSupportedVersion())), + assertEquals(ControllerResult.atomicOf(Collections.singletonList( + new ApiMessageAndVersion( + new ConfigRecord() + .setResourceType(TOPIC.id()) + .setResourceName("mytopic") + .setName("abc") + .setValue(null), + CONFIG_RECORD.highestSupportedVersion())), toMap(entry(MYTOPIC, ApiError.NONE))), manager.legacyAlterConfigs(toMap(entry(MYTOPIC, toMap(entry("def", "901")))), true)); diff --git a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java index b5f2239cd5a78..16f2809792687 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java @@ -169,8 +169,8 @@ public void testUpdateFeaturesErrorCases() { setQuorumFeatures(features("foo", 1, 5, "bar", 0, 3)). setSnapshotRegistry(snapshotRegistry). setClusterFeatureSupportDescriber(createFakeClusterFeatureSupportDescriber( - Arrays.asList(new SimpleImmutableEntry<>(5, Collections.singletonMap("bar", VersionRange.of(0, 3)))), - Arrays.asList())). + Collections.singletonList(new SimpleImmutableEntry<>(5, singletonMap("bar", VersionRange.of(0, 3)))), + emptyList())). build(); assertEquals(ControllerResult.atomicOf(emptyList(), @@ -389,15 +389,15 @@ public void testCreateFeatureLevelRecords() { FeatureControlManager manager = new FeatureControlManager.Builder(). setQuorumFeatures(new QuorumFeatures(0, localSupportedFeatures, emptyList())). setClusterFeatureSupportDescriber(createFakeClusterFeatureSupportDescriber( - Arrays.asList(new SimpleImmutableEntry<>(1, Collections.singletonMap("foo", VersionRange.of(0, 3)))), - Arrays.asList())). + Collections.singletonList(new SimpleImmutableEntry<>(1, singletonMap("foo", VersionRange.of(0, 3)))), + emptyList())). build(); ControllerResult> result = manager.updateFeatures( Collections.singletonMap("foo", (short) 1), Collections.singletonMap("foo", FeatureUpdate.UpgradeType.UPGRADE), false); - assertEquals(ControllerResult.atomicOf(Arrays.asList(new ApiMessageAndVersion( - new FeatureLevelRecord().setName("foo").setFeatureLevel((short) 1), (short) 0)), + assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( + new FeatureLevelRecord().setName("foo").setFeatureLevel((short) 1), (short) 0)), Collections.singletonMap("foo", ApiError.NONE)), result); RecordTestUtils.replayAll(manager, result.records()); assertEquals(Optional.of((short) 1), manager.finalizedFeatures(Long.MAX_VALUE).get("foo")); @@ -406,7 +406,7 @@ public void testCreateFeatureLevelRecords() { Collections.singletonMap("foo", (short) 0), Collections.singletonMap("foo", FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), false); - assertEquals(ControllerResult.atomicOf(Arrays.asList(new ApiMessageAndVersion( + assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( new FeatureLevelRecord().setName("foo").setFeatureLevel((short) 0), (short) 0)), Collections.singletonMap("foo", ApiError.NONE)), result2); RecordTestUtils.replayAll(manager, result2.records()); diff --git a/metadata/src/test/java/org/apache/kafka/controller/OffsetControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/OffsetControlManagerTest.java index 2b5133f55ba0a..f2f774517e797 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/OffsetControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/OffsetControlManagerTest.java @@ -54,7 +54,7 @@ public void testInitialValues() { assertEquals(-1L, offsetControl.transactionStartOffset()); assertEquals(-1L, offsetControl.nextWriteOffset()); assertFalse(offsetControl.active()); - assertEquals(Arrays.asList(-1L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(-1L), offsetControl.snapshotRegistry().epochsList()); } @Test @@ -64,7 +64,7 @@ public void testActivate() { assertEquals(1000L, offsetControl.nextWriteOffset()); assertTrue(offsetControl.active()); assertTrue(offsetControl.metrics().active()); - assertEquals(Arrays.asList(-1L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(-1L), offsetControl.snapshotRegistry().epochsList()); } @Test @@ -122,7 +122,7 @@ public void testHandleCommitBatch() { OffsetControlManager offsetControl = new OffsetControlManager.Builder().build(); offsetControl.handleCommitBatch(newFakeBatch(1000L, 200, 3000L)); - assertEquals(Arrays.asList(1000L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(1000L), offsetControl.snapshotRegistry().epochsList()); assertEquals(1000L, offsetControl.lastCommittedOffset()); assertEquals(200, offsetControl.lastCommittedEpoch()); assertEquals(1000L, offsetControl.lastStableOffset()); @@ -149,7 +149,7 @@ public void testHandleScheduleAtomicAppend() { offsetControl.handleCommitBatch(newFakeBatch(2000L, 200, 3000L)); assertEquals(2000L, offsetControl.lastStableOffset()); assertEquals(2000L, offsetControl.lastCommittedOffset()); - assertEquals(Arrays.asList(2000L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(2000L), offsetControl.snapshotRegistry().epochsList()); } @Test @@ -163,14 +163,14 @@ public void testHandleLoadSnapshot() { assertEquals(Arrays.asList("snapshot[-1]", "reset"), snapshotRegistry.operations()); assertEquals(new OffsetAndEpoch(4000L, 300), offsetControl.currentSnapshotId()); assertEquals("00000000000000004000-0000000300", offsetControl.currentSnapshotName()); - assertEquals(Arrays.asList(), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.emptyList(), offsetControl.snapshotRegistry().epochsList()); offsetControl.endLoadSnapshot(3456L); assertEquals(Arrays.asList("snapshot[-1]", "reset", "snapshot[4000]"), snapshotRegistry.operations()); assertNull(offsetControl.currentSnapshotId()); assertNull(offsetControl.currentSnapshotName()); - assertEquals(Arrays.asList(4000L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(4000L), offsetControl.snapshotRegistry().epochsList()); assertEquals(4000L, offsetControl.lastCommittedOffset()); assertEquals(300, offsetControl.lastCommittedEpoch()); assertEquals(4000L, offsetControl.lastStableOffset()); @@ -236,7 +236,7 @@ public void testReplayTransaction(boolean aborted) { assertEquals(1550L, offsetControl.lastCommittedOffset()); assertEquals(100, offsetControl.lastCommittedEpoch()); assertEquals(1499L, offsetControl.lastStableOffset()); - assertEquals(Arrays.asList(1499L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(1499L), offsetControl.snapshotRegistry().epochsList()); if (aborted) { offsetControl.replay(new AbortTransactionRecord(), 1600L); @@ -252,7 +252,7 @@ public void testReplayTransaction(boolean aborted) { offsetControl.handleCommitBatch(newFakeBatch(1650, 100, 2100L)); assertEquals(1650, offsetControl.lastStableOffset()); - assertEquals(Arrays.asList(1650L), offsetControl.snapshotRegistry().epochsList()); + assertEquals(Collections.singletonList(1650L), offsetControl.snapshotRegistry().epochsList()); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java b/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java index 044402d50872e..2bbcf01611bbf 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/PartitionChangeBuilderTest.java @@ -82,13 +82,13 @@ public void testChangeRecordIsNoOp() { assertFalse(changeRecordIsNoOp(new PartitionChangeRecord(). setIsr(Arrays.asList(1, 2, 3)))); assertFalse(changeRecordIsNoOp(new PartitionChangeRecord(). - setRemovingReplicas(Arrays.asList(1)))); + setRemovingReplicas(Collections.singletonList(1)))); assertFalse(changeRecordIsNoOp(new PartitionChangeRecord(). - setAddingReplicas(Arrays.asList(4)))); + setAddingReplicas(Collections.singletonList(4)))); assertFalse(changeRecordIsNoOp(new PartitionChangeRecord(). - setEligibleLeaderReplicas(Arrays.asList(5)))); + setEligibleLeaderReplicas(Collections.singletonList(5)))); assertFalse(changeRecordIsNoOp(new PartitionChangeRecord(). - setLastKnownElr(Arrays.asList(6)))); + setLastKnownElr(Collections.singletonList(6)))); assertFalse( changeRecordIsNoOp( new PartitionChangeRecord() @@ -274,12 +274,12 @@ public void testElectLeader(short version) { assertElectLeaderEquals(createFooBuilder(version).setElection(Election.UNCLEAN) .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(1, 3))), 1, false); assertElectLeaderEquals(createFooBuilder(version) - .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(3))), NO_LEADER, false); + .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Collections.singletonList(3))), NO_LEADER, false); assertElectLeaderEquals(createFooBuilder(version).setElection(Election.UNCLEAN). - setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(3))), 2, true); + setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Collections.singletonList(3))), 2, true); assertElectLeaderEquals( createFooBuilder(version).setElection(Election.UNCLEAN) - .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(4))).setTargetReplicas(Arrays.asList(2, 1, 3, 4)), + .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Collections.singletonList(4))).setTargetReplicas(Arrays.asList(2, 1, 3, 4)), 4, false ); @@ -424,7 +424,7 @@ public void testNoLeaderEpochBumpOnEmptyTargetIsr(String metadataVersionString) 2). setEligibleLeaderReplicasEnabled(metadataVersion.isElrSupported()). setDefaultDirProvider(DEFAULT_DIR_PROVIDER). - setTargetReplicas(Arrays.asList()); + setTargetReplicas(Collections.emptyList()); PartitionChangeRecord record = new PartitionChangeRecord(); builder.triggerLeaderEpochBumpForIsrShrinkIfNeeded(record); assertEquals(NO_LEADER_CHANGE, record.leader()); @@ -593,7 +593,7 @@ public void testUncleanLeaderElection(short version) { new PartitionChangeRecord() .setTopicId(FOO_ID) .setPartitionId(0) - .setIsr(Arrays.asList(2)) + .setIsr(Collections.singletonList(2)) .setLeader(2) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERING.value()), version @@ -601,13 +601,13 @@ public void testUncleanLeaderElection(short version) { assertEquals( Optional.of(expectedRecord), createFooBuilder(version).setElection(Election.UNCLEAN) - .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(3))).build() + .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Collections.singletonList(3))).build() ); PartitionChangeRecord record = new PartitionChangeRecord() .setTopicId(OFFLINE_ID) .setPartitionId(0) - .setIsr(Arrays.asList(1)) + .setIsr(Collections.singletonList(1)) .setLeader(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERING.value()); @@ -626,7 +626,7 @@ public void testUncleanLeaderElection(short version) { assertEquals( Optional.of(expectedRecord), createOfflineBuilder(version).setElection(Election.UNCLEAN) - .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Arrays.asList(2))).build() + .setTargetIsrWithBrokerStates(AlterPartitionRequest.newIsrToSimpleNewIsrWithBrokerEpochs(Collections.singletonList(2))).build() ); } @@ -1017,7 +1017,7 @@ public void testEligibleLeaderReplicas_RemoveUncleanShutdownReplicasFromElr(shor .setDefaultDirProvider(DEFAULT_DIR_PROVIDER) .setUseLastKnownLeaderInBalancedRecovery(false); - builder.setUncleanShutdownReplicas(Arrays.asList(3)); + builder.setUncleanShutdownReplicas(Collections.singletonList(3)); PartitionChangeRecord record = new PartitionChangeRecord() .setTopicId(topicId) @@ -1025,8 +1025,8 @@ public void testEligibleLeaderReplicas_RemoveUncleanShutdownReplicasFromElr(shor .setLeader(-2) .setLeaderRecoveryState(LeaderRecoveryState.NO_CHANGE); if (version >= 2) { - record.setEligibleLeaderReplicas(Arrays.asList(2)) - .setLastKnownElr(Arrays.asList(3)); + record.setEligibleLeaderReplicas(Collections.singletonList(2)) + .setLastKnownElr(Collections.singletonList(3)); } else { record.setEligibleLeaderReplicas(Collections.emptyList()); } @@ -1146,8 +1146,8 @@ public void testEligibleLeaderReplicas_ElrCanBeElected(boolean lastKnownLeaderEn new PartitionChangeRecord() .setTopicId(topicId) .setPartitionId(0) - .setIsr(Arrays.asList(3)) - .setEligibleLeaderReplicas(Arrays.asList(1)) + .setIsr(Collections.singletonList(3)) + .setEligibleLeaderReplicas(Collections.singletonList(1)) .setLeader(3) .setLeaderRecoveryState(LeaderRecoveryState.NO_CHANGE), version @@ -1200,7 +1200,7 @@ public void testEligibleLeaderReplicas_IsrCanShrinkToZero(boolean lastKnownLeade .setEligibleLeaderReplicas(Arrays.asList(1, 2, 3, 4)); if (lastKnownLeaderEnabled) { - record.setLastKnownElr(Arrays.asList(1)); + record.setLastKnownElr(Collections.singletonList(1)); } ApiMessageAndVersion expectedRecord = new ApiMessageAndVersion(record, version); @@ -1213,7 +1213,7 @@ public void testEligibleLeaderReplicas_IsrCanShrinkToZero(boolean lastKnownLeade metadataVersionForPartitionChangeRecordVersion(version), 3) .setElection(Election.PREFERRED) .setEligibleLeaderReplicasEnabled(true) - .setUncleanShutdownReplicas(Arrays.asList(2)) + .setUncleanShutdownReplicas(Collections.singletonList(2)) .setDefaultDirProvider(DEFAULT_DIR_PROVIDER) .setUseLastKnownLeaderInBalancedRecovery(lastKnownLeaderEnabled); PartitionChangeRecord changeRecord = (PartitionChangeRecord) builder.build().get().message(); @@ -1253,7 +1253,7 @@ public void testEligibleLeaderReplicas_ElectLastKnownLeader() { new PartitionChangeRecord() .setTopicId(topicId) .setPartitionId(0) - .setIsr(Arrays.asList(1)) + .setIsr(Collections.singletonList(1)) .setLeader(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERING.value()) .setLastKnownElr(Collections.emptyList()), diff --git a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java index 17be98d47f0b1..e35f468132983 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java @@ -202,7 +202,7 @@ public void testDoesNotCompleteReassignmentIfIsrDoesNotHaveAllTargetReplicas() { partitionAssignment(Arrays.asList(0, 1, 2)), partitionAssignment(Arrays.asList(0, 1, 3))); assertTrue(replicas.isReassignmentInProgress()); Optional reassignmentOptional = - replicas.maybeCompleteReassignment(Arrays.asList(3)); + replicas.maybeCompleteReassignment(Collections.singletonList(3)); assertFalse(reassignmentOptional.isPresent()); } diff --git a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java index 05148813e8108..ae1251e1c2afa 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentRevertTest.java @@ -18,6 +18,7 @@ package org.apache.kafka.controller; import java.util.Arrays; +import java.util.Collections; import org.apache.kafka.common.Uuid; import org.apache.kafka.metadata.LeaderRecoveryState; @@ -78,7 +79,7 @@ public void testSomeAdding() { setAddingReplicas(new int[]{4, 5}).setLeader(3).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).build(); PartitionReassignmentRevert revert = new PartitionReassignmentRevert(registration); assertEquals(Arrays.asList(3, 2, 1), revert.replicas()); - assertEquals(Arrays.asList(2), revert.isr()); + assertEquals(Collections.singletonList(2), revert.isr()); assertFalse(revert.unclean()); } @@ -96,7 +97,7 @@ public void testSomeRemovingAndAdding() { setRemovingReplicas(new int[]{2}).setAddingReplicas(new int[]{4, 5}).setLeader(3).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).build(); PartitionReassignmentRevert revert = new PartitionReassignmentRevert(registration); assertEquals(Arrays.asList(3, 2, 1), revert.replicas()); - assertEquals(Arrays.asList(2), revert.isr()); + assertEquals(Collections.singletonList(2), revert.isr()); assertFalse(revert.unclean()); } @@ -114,7 +115,7 @@ public void testIsrSpecialCase() { setRemovingReplicas(new int[]{2}).setAddingReplicas(new int[]{4, 5}).setLeader(3).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(100).setPartitionEpoch(200).build(); PartitionReassignmentRevert revert = new PartitionReassignmentRevert(registration); assertEquals(Arrays.asList(3, 2, 1), revert.replicas()); - assertEquals(Arrays.asList(3), revert.isr()); + assertEquals(Collections.singletonList(3), revert.isr()); assertTrue(revert.unclean()); } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java index 9fbb8ee855c36..bb3f0bbd57b8b 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java @@ -17,7 +17,6 @@ package org.apache.kafka.controller; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -100,11 +99,11 @@ static Map registerBrokersAndUnfence( Uuid.fromString("TESTBROKER" + Integer.toString(100000 + brokerId).substring(1) + "DIRAAAA") )) .setListeners(new ListenerCollection( - Arrays.asList( - new Listener() - .setName("PLAINTEXT") - .setHost("localhost") - .setPort(9092 + brokerId) + Collections.singletonList( + new Listener() + .setName("PLAINTEXT") + .setHost("localhost") + .setPort(9092 + brokerId) ).iterator() ) ) diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java index 1b18c9648de4f..96843f63207e9 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java @@ -270,7 +270,7 @@ private void testDelayedConfigurationOperations( @Test public void testFenceMultipleBrokers() throws Throwable { List allBrokers = Arrays.asList(1, 2, 3, 4, 5); - List brokersToKeepUnfenced = Arrays.asList(1); + List brokersToKeepUnfenced = singletonList(1); List brokersToFence = Arrays.asList(2, 3, 4, 5); short replicationFactor = (short) allBrokers.size(); short numberOfPartitions = (short) allBrokers.size(); @@ -484,7 +484,7 @@ public void testUncleanShutdownBroker() throws Throwable { assertArrayEquals(lastKnownElr, partition.lastKnownElr, partition.toString()); // Unfence the last one in the ELR, it should be elected. - sendBrokerHeartbeatToUnfenceBrokers(active, Arrays.asList(brokerToBeTheLeader), brokerEpochs); + sendBrokerHeartbeatToUnfenceBrokers(active, singletonList(brokerToBeTheLeader), brokerEpochs); TestUtils.waitForCondition(() -> { return active.clusterControl().isUnfenced(brokerToBeTheLeader); }, sessionTimeoutMillis * 3, @@ -798,21 +798,21 @@ public void testSnapshotSaveAndLoad() throws Throwable { setIncarnationId(new Uuid(3465346L, i)). setZkMigrationReady(false). setListeners(new ControllerRegistrationRequestData.ListenerCollection( - Arrays.asList( - new ControllerRegistrationRequestData.Listener(). - setName("CONTROLLER"). - setHost("localhost"). - setPort(8000 + i). - setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + singletonList( + new ControllerRegistrationRequestData.Listener(). + setName("CONTROLLER"). + setHost("localhost"). + setPort(8000 + i). + setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) ).iterator() )). setFeatures(new ControllerRegistrationRequestData.FeatureCollection( - Arrays.asList( - new ControllerRegistrationRequestData.Feature(). - setName(MetadataVersion.FEATURE_NAME). - setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). - setMaxSupportedVersion(MetadataVersion.IBP_3_7_IV0.featureLevel()) - ).iterator() + singletonList( + new ControllerRegistrationRequestData.Feature(). + setName(MetadataVersion.FEATURE_NAME). + setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). + setMaxSupportedVersion(MetadataVersion.IBP_3_7_IV0.featureLevel()) + ).iterator() ))).get(); } for (int i = 0; i < numBrokers; i++) { @@ -823,9 +823,9 @@ public void testSnapshotSaveAndLoad() throws Throwable { setClusterId(active.clusterId()). setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_3_7_IV0)). setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + i)). - setListeners(new ListenerCollection(Arrays.asList(new Listener(). - setName("PLAINTEXT").setHost("localhost"). - setPort(9092 + i)).iterator()))).get(); + setListeners(new ListenerCollection(singletonList(new Listener(). + setName("PLAINTEXT").setHost("localhost"). + setPort(9092 + i)).iterator()))).get(); brokerEpochs.put(i, reply.epoch()); } for (int i = 0; i < numBrokers - 1; i++) { @@ -872,68 +872,68 @@ private List generateTestRecords(Uuid fooId, Map generateTestRecords(Uuid fooId, Map> collisionMap = new HashMap<>(); - collisionMap.put("foo_bar", new TreeSet<>(Arrays.asList("foo_bar"))); + collisionMap.put("foo_bar", new TreeSet<>(singletonList("foo_bar"))); collisionMap.put("woo_bar_foo", new TreeSet<>(Arrays.asList("woo.bar.foo", "woo_bar.foo"))); ReplicationControlManager.validateNewTopicNames(topicErrors, topics, collisionMap); Map expectedTopicErrors = new HashMap<>(); @@ -1157,11 +1157,11 @@ public void testAlterPartitionHandleUnknownTopicIdOrName(short version) { AlterPartitionRequestData request = new AlterPartitionRequestData() .setBrokerId(0) .setBrokerEpoch(100) - .setTopics(asList(new AlterPartitionRequestData.TopicData() - .setTopicName(version <= 1 ? topicName : "") - .setTopicId(version > 1 ? topicId : Uuid.ZERO_UUID) - .setPartitions(asList(new PartitionData() - .setPartitionIndex(0))))); + .setTopics(singletonList(new TopicData() + .setTopicName(version <= 1 ? topicName : "") + .setTopicId(version > 1 ? topicId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new PartitionData() + .setPartitionIndex(0))))); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.ALTER_PARTITION, version); @@ -1171,12 +1171,12 @@ public void testAlterPartitionHandleUnknownTopicIdOrName(short version) { Errors expectedError = version > 1 ? UNKNOWN_TOPIC_ID : UNKNOWN_TOPIC_OR_PARTITION; AlterPartitionResponseData expectedResponse = new AlterPartitionResponseData() - .setTopics(asList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? topicName : "") - .setTopicId(version > 1 ? topicId : Uuid.ZERO_UUID) - .setPartitions(asList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(expectedError.code()))))); + .setTopics(singletonList(new AlterPartitionResponseData.TopicData() + .setTopicName(version <= 1 ? topicName : "") + .setTopicId(version > 1 ? topicId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setErrorCode(expectedError.code()))))); assertEquals(expectedResponse, result.response()); } @@ -1509,17 +1509,17 @@ public void testCreatePartitions() { ctx.replay(createPartitionsResult.records()); List topics2 = new ArrayList<>(); topics2.add(new CreatePartitionsTopic(). - setName("foo").setCount(6).setAssignments(asList( - new CreatePartitionsAssignment().setBrokerIds(asList(1, 3))))); + setName("foo").setCount(6).setAssignments(singletonList( + new CreatePartitionsAssignment().setBrokerIds(asList(1, 3))))); topics2.add(new CreatePartitionsTopic(). - setName("bar").setCount(5).setAssignments(asList( - new CreatePartitionsAssignment().setBrokerIds(asList(1))))); + setName("bar").setCount(5).setAssignments(singletonList( + new CreatePartitionsAssignment().setBrokerIds(singletonList(1))))); topics2.add(new CreatePartitionsTopic(). - setName("quux").setCount(4).setAssignments(asList( - new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); + setName("quux").setCount(4).setAssignments(singletonList( + new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); topics2.add(new CreatePartitionsTopic(). - setName("foo2").setCount(3).setAssignments(asList( - new CreatePartitionsAssignment().setBrokerIds(asList(2, 0))))); + setName("foo2").setCount(3).setAssignments(singletonList( + new CreatePartitionsAssignment().setBrokerIds(asList(2, 0))))); ControllerResult> createPartitionsResult2 = replicationControl.createPartitions(requestContext, topics2); assertEquals(asList(new CreatePartitionsTopicResult(). @@ -1579,8 +1579,8 @@ public void testCreatePartitionsWithMutationQuotaExceeded() { // now test the explicit assignment case List topics2 = new ArrayList<>(); topics2.add(new CreatePartitionsTopic(). - setName("foo").setCount(4).setAssignments(asList( - new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); + setName("foo").setCount(4).setAssignments(singletonList( + new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); ControllerResult> createPartitionsResult2 = replicationControl.createPartitions(createPartitionsRequestContext, topics2); assertEquals(expectedThrottled, createPartitionsResult2.response()); @@ -1600,7 +1600,7 @@ public void testCreatePartitionsFailsWhenAllBrokersAreFencedOrInControlledShutdo ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.CREATE_TOPICS); ControllerResult createTopicResult = replicationControl. - createTopics(requestContext, request, new HashSet<>(Arrays.asList("foo"))); + createTopics(requestContext, request, new HashSet<>(singletonList("foo"))); ctx.replay(createTopicResult.records()); ctx.registerBrokers(0, 1); @@ -1614,11 +1614,11 @@ public void testCreatePartitionsFailsWhenAllBrokersAreFencedOrInControlledShutdo replicationControl.createPartitions(requestContext, topics); assertEquals( - asList(new CreatePartitionsTopicResult(). - setName("foo"). - setErrorCode(INVALID_REPLICATION_FACTOR.code()). - setErrorMessage("Unable to replicate the partition 2 time(s): All " + - "brokers are currently fenced or in controlled shutdown.")), + singletonList(new CreatePartitionsTopicResult(). + setName("foo"). + setErrorCode(INVALID_REPLICATION_FACTOR.code()). + setErrorMessage("Unable to replicate the partition 2 time(s): All " + + "brokers are currently fenced or in controlled shutdown.")), createPartitionsResult.response()); } @@ -1640,8 +1640,8 @@ public void testCreatePartitionsISRInvariants() { replicationControl.createTopics(requestContext, request, Collections.singleton("foo")); ctx.replay(result.records()); - List topics = asList(new CreatePartitionsTopic(). - setName("foo").setCount(2).setAssignments(null)); + List topics = singletonList(new CreatePartitionsTopic(). + setName("foo").setCount(2).setAssignments(null)); ControllerResult> createPartitionsResult = replicationControl.createPartitions(requestContext, topics); @@ -1670,9 +1670,9 @@ public void testCreatePartitionsISRInvariants() { public void testValidateGoodManualPartitionAssignments() { ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().build(); ctx.registerBrokers(1, 2, 3); - ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList(1)), + ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(singletonList(1)), OptionalInt.of(1)); - ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList(1)), + ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(singletonList(1)), OptionalInt.empty()); ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList(1, 2, 3)), OptionalInt.of(3)); @@ -1686,7 +1686,7 @@ public void testValidateBadManualPartitionAssignments() { ctx.registerBrokers(1, 2); assertEquals("The manual partition assignment includes an empty replica list.", assertThrows(InvalidReplicaAssignmentException.class, () -> - ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList()), + ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(Collections.emptyList()), OptionalInt.empty())).getMessage()); assertEquals("The manual partition assignment includes broker 3, but no such " + "broker is registered.", assertThrows(InvalidReplicaAssignmentException.class, () -> @@ -1748,19 +1748,19 @@ public void testReassignPartitions(short version) { ctx.replay(alterResult.records()); ListPartitionReassignmentsResponseData currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). - setTopics(asList(new OngoingTopicReassignment(). - setName("foo").setPartitions(asList( - new OngoingPartitionReassignment().setPartitionIndex(1). - setRemovingReplicas(asList(3)). - setAddingReplicas(asList(0)). - setReplicas(asList(0, 2, 1, 3)))))); + setTopics(singletonList(new OngoingTopicReassignment(). + setName("foo").setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(1). + setRemovingReplicas(singletonList(3)). + setAddingReplicas(singletonList(0)). + setReplicas(asList(0, 2, 1, 3)))))); assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); - assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(asList( + assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(singletonList( new ListPartitionReassignmentsTopics().setName("bar"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); - assertEquals(currentReassigning, replication.listPartitionReassignments(asList( - new ListPartitionReassignmentsTopics().setName("foo"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + assertEquals(currentReassigning, replication.listPartitionReassignments(singletonList( + new ListPartitionReassignmentsTopics().setName("foo"). + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); ControllerResult cancelResult = replication.alterPartitionReassignments( new AlterPartitionReassignmentsRequestData().setTopics(asList( @@ -1771,9 +1771,9 @@ public void testReassignPartitions(short version) { setReplicas(null), new ReassignablePartition().setPartitionIndex(2). setReplicas(null))), - new ReassignableTopic().setName("bar").setPartitions(asList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(null)))))); + new ReassignableTopic().setName("bar").setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(null)))))); assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( new PartitionChangeRecord().setTopicId(fooId). setPartitionId(1). @@ -1795,10 +1795,10 @@ public void testReassignPartitions(short version) { new ReassignablePartitionResponse().setPartitionIndex(2). setErrorCode(UNKNOWN_TOPIC_OR_PARTITION.code()). setErrorMessage("Unable to find partition foo:2."))), - new ReassignableTopicResponse().setName("bar").setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()). - setErrorMessage(null)))))), + new ReassignableTopicResponse().setName("bar").setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()). + setErrorMessage(null)))))), cancelResult); log.info("running final alterPartition..."); ControllerRequestContext requestContext = @@ -1806,26 +1806,26 @@ public void testReassignPartitions(short version) { AlterPartitionRequestData alterPartitionRequestData = new AlterPartitionRequestData(). setBrokerId(3). setBrokerEpoch(103). - setTopics(asList(new TopicData(). - setTopicName(version <= 1 ? "foo" : ""). - setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). - setPartitions(asList(new PartitionData(). - setPartitionIndex(1). - setPartitionEpoch(1). - setLeaderEpoch(0). - setNewIsrWithEpochs(isrWithDefaultEpoch(3, 0, 2, 1)))))); + setTopics(singletonList(new TopicData(). + setTopicName(version <= 1 ? "foo" : ""). + setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(1). + setPartitionEpoch(1). + setLeaderEpoch(0). + setNewIsrWithEpochs(isrWithDefaultEpoch(3, 0, 2, 1)))))); ControllerResult alterPartitionResult = replication.alterPartition( requestContext, new AlterPartitionRequest.Builder(alterPartitionRequestData, version > 1).build(version).data()); Errors expectedError = version > 1 ? NEW_LEADER_ELECTED : FENCED_LEADER_EPOCH; - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData(). - setTopicName(version <= 1 ? "foo" : ""). - setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). - setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(1). - setErrorCode(expectedError.code()))))), + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData(). + setTopicName(version <= 1 ? "foo" : ""). + setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). + setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(1). + setErrorCode(expectedError.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -1867,14 +1867,14 @@ public void testAlterPartitionShouldRejectFencedBrokers(short version) { AlterPartitionRequestData alterIsrRequest = new AlterPartitionRequestData() .setBrokerId(1) .setBrokerEpoch(101) - .setTopics(asList(new TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(asList(new PartitionData() - .setPartitionIndex(0) - .setPartitionEpoch(1) - .setLeaderEpoch(0) - .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); + .setTopics(singletonList(new TopicData() + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new PartitionData() + .setPartitionIndex(0) + .setPartitionEpoch(1) + .setLeaderEpoch(0) + .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.ALTER_PARTITION, version); @@ -1885,12 +1885,12 @@ public void testAlterPartitionShouldRejectFencedBrokers(short version) { Errors expectedError = version <= 1 ? OPERATION_NOT_ATTEMPTED : INELIGIBLE_REPLICA; assertEquals( new AlterPartitionResponseData() - .setTopics(asList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(asList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(expectedError.code()))))), + .setTopics(singletonList(new AlterPartitionResponseData.TopicData() + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setErrorCode(expectedError.code()))))), alterPartitionResult.response()); fenceRecords = new ArrayList<>(); @@ -1901,16 +1901,16 @@ public void testAlterPartitionShouldRejectFencedBrokers(short version) { assertEquals( new AlterPartitionResponseData() - .setTopics(asList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(asList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setLeaderId(1) - .setLeaderEpoch(0) - .setIsr(asList(1, 2, 3, 4)) - .setPartitionEpoch(2) - .setErrorCode(NONE.code()))))), + .setTopics(singletonList(new AlterPartitionResponseData.TopicData() + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setLeaderId(1) + .setLeaderEpoch(0) + .setIsr(asList(1, 2, 3, 4)) + .setPartitionEpoch(2) + .setErrorCode(NONE.code()))))), alterPartitionResult.response()); } @@ -1931,14 +1931,14 @@ public void testAlterPartitionShouldRejectBrokersWithStaleEpoch(short version) { AlterPartitionRequestData alterIsrRequest = new AlterPartitionRequestData(). setBrokerId(1). setBrokerEpoch(101). - setTopics(asList(new TopicData(). - setTopicName(version <= 1 ? "foo" : ""). - setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). - setPartitions(asList(new PartitionData(). - setPartitionIndex(0). - setPartitionEpoch(1). - setLeaderEpoch(0). - setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); + setTopics(singletonList(new TopicData(). + setTopicName(version <= 1 ? "foo" : ""). + setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0). + setPartitionEpoch(1). + setLeaderEpoch(0). + setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); // The broker 4 has failed silently and now registers again. long newEpoch = defaultBrokerEpoch(4) + 1000; @@ -1971,12 +1971,12 @@ public void testAlterPartitionShouldRejectBrokersWithStaleEpoch(short version) { if (version >= 3) { assertEquals( new AlterPartitionResponseData(). - setTopics(asList(new AlterPartitionResponseData.TopicData(). - setTopicName(""). - setTopicId(fooId). - setPartitions(asList(new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setErrorCode(INELIGIBLE_REPLICA.code()))))), + setTopics(singletonList(new AlterPartitionResponseData.TopicData(). + setTopicName(""). + setTopicId(fooId). + setPartitions(singletonList(new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setErrorCode(INELIGIBLE_REPLICA.code()))))), alterPartitionResult.response()); } else { assertEquals(NONE.code(), alterPartitionResult.response().errorCode()); @@ -2017,14 +2017,14 @@ public void testAlterPartitionShouldRejectShuttingDownBrokers(short version) { AlterPartitionRequestData alterIsrRequest = new AlterPartitionRequestData() .setBrokerId(1) .setBrokerEpoch(101) - .setTopics(asList(new TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(asList(new PartitionData() - .setPartitionIndex(0) - .setPartitionEpoch(0) - .setLeaderEpoch(0) - .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); + .setTopics(singletonList(new TopicData() + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new PartitionData() + .setPartitionIndex(0) + .setPartitionEpoch(0) + .setLeaderEpoch(0) + .setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3, 4)))))); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.ALTER_PARTITION, version); @@ -2035,12 +2035,12 @@ public void testAlterPartitionShouldRejectShuttingDownBrokers(short version) { Errors expectedError = version <= 1 ? OPERATION_NOT_ATTEMPTED : INELIGIBLE_REPLICA; assertEquals( new AlterPartitionResponseData() - .setTopics(asList(new AlterPartitionResponseData.TopicData() - .setTopicName(version <= 1 ? "foo" : "") - .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) - .setPartitions(asList(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(0) - .setErrorCode(expectedError.code()))))), + .setTopics(singletonList(new AlterPartitionResponseData.TopicData() + .setTopicName(version <= 1 ? "foo" : "") + .setTopicId(version > 1 ? fooId : Uuid.ZERO_UUID) + .setPartitions(singletonList(new AlterPartitionResponseData.PartitionData() + .setPartitionIndex(0) + .setErrorCode(expectedError.code()))))), alterPartitionResult.response()); } @@ -2081,10 +2081,10 @@ public void testCancelReassignPartitions() { new ReassignablePartition().setPartitionIndex(2). setReplicas(asList(5, 6, 7)), new ReassignablePartition().setPartitionIndex(3). - setReplicas(asList()))), - new ReassignableTopic().setName("bar").setPartitions(asList( + setReplicas(Collections.emptyList()))), + new ReassignableTopic().setName("bar").setPartitions(singletonList( new ReassignablePartition().setPartitionIndex(0). - setReplicas(asList(1, 2, 3, 4, 0))))))); + setReplicas(asList(1, 2, 3, 4, 0))))))); assertEquals(new AlterPartitionReassignmentsResponseData(). setErrorMessage(null).setResponses(asList( new ReassignableTopicResponse().setName("foo").setPartitions(asList( @@ -2100,9 +2100,9 @@ public void testCancelReassignPartitions() { setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()). setErrorMessage("The manual partition assignment includes an empty " + "replica list."))), - new ReassignableTopicResponse().setName("bar").setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null))))), + new ReassignableTopicResponse().setName("bar").setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null))))), alterResult.response()); ctx.replay(alterResult.records()); assertEquals(new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 4}).setIsr(new int[] {1, 2, 4}). @@ -2131,44 +2131,44 @@ public void testCancelReassignPartitions() { setAddingReplicas(new int[] {0, 1}).setLeader(4).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(0).setPartitionEpoch(2).build(), replication.getPartition(barId, 0)); ListPartitionReassignmentsResponseData currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). - setTopics(asList(new OngoingTopicReassignment(). - setName("bar").setPartitions(asList( - new OngoingPartitionReassignment().setPartitionIndex(0). - setRemovingReplicas(Collections.emptyList()). - setAddingReplicas(asList(0, 1)). - setReplicas(asList(1, 2, 3, 4, 0)))))); + setTopics(singletonList(new OngoingTopicReassignment(). + setName("bar").setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(0). + setRemovingReplicas(Collections.emptyList()). + setAddingReplicas(asList(0, 1)). + setReplicas(asList(1, 2, 3, 4, 0)))))); assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); - assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(asList( - new ListPartitionReassignmentsTopics().setName("foo"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); - assertEquals(currentReassigning, replication.listPartitionReassignments(asList( - new ListPartitionReassignmentsTopics().setName("bar"). - setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(singletonList( + new ListPartitionReassignmentsTopics().setName("foo"). + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); + assertEquals(currentReassigning, replication.listPartitionReassignments(singletonList( + new ListPartitionReassignmentsTopics().setName("bar"). + setPartitionIndexes(asList(0, 1, 2))), Long.MAX_VALUE)); ControllerResult alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(4).setBrokerEpoch(104). - setTopics(asList(new TopicData().setTopicId(barId).setPartitions(asList( - new PartitionData().setPartitionIndex(0).setPartitionEpoch(2). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(4, 1, 2, 0))))))); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData().setTopicId(barId).setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setLeaderId(4). - setLeaderEpoch(0). - setIsr(asList(4, 1, 2, 0)). - setPartitionEpoch(3). - setErrorCode(NONE.code()))))), + setTopics(singletonList(new TopicData().setTopicId(barId).setPartitions(singletonList( + new PartitionData().setPartitionIndex(0).setPartitionEpoch(2). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(4, 1, 2, 0))))))); + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData().setTopicId(barId).setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setLeaderId(4). + setLeaderEpoch(0). + setIsr(asList(4, 1, 2, 0)). + setPartitionEpoch(3). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ControllerResult cancelResult = replication.alterPartitionReassignments( new AlterPartitionReassignmentsRequestData().setTopics(asList( - new ReassignableTopic().setName("foo").setPartitions(asList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(null))), - new ReassignableTopic().setName("bar").setPartitions(asList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(null)))))); + new ReassignableTopic().setName("foo").setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(null))), + new ReassignableTopic().setName("bar").setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(null)))))); assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( new PartitionChangeRecord().setTopicId(barId). setPartitionId(0). @@ -2182,12 +2182,12 @@ public void testCancelReassignPartitions() { setRemovingReplicas(null). setAddingReplicas(Collections.emptyList()), MetadataVersion.latestTesting().partitionChangeRecordVersion())), new AlterPartitionReassignmentsResponseData().setErrorMessage(null).setResponses(asList( - new ReassignableTopicResponse().setName("foo").setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()).setErrorMessage(null))), - new ReassignableTopicResponse().setName("bar").setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null)))))), + new ReassignableTopicResponse().setName("foo").setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorCode(NO_REASSIGNMENT_IN_PROGRESS.code()).setErrorMessage(null))), + new ReassignableTopicResponse().setName("bar").setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null)))))), cancelResult); ctx.replay(cancelResult.records()); assertEquals(NONE_REASSIGNING, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -2461,30 +2461,30 @@ public void testElectPreferredLeaders() { ControllerResult alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(2).setBrokerEpoch(102). - setTopics(asList(new AlterPartitionRequestData.TopicData().setTopicId(fooId). - setPartitions(asList( - new AlterPartitionRequestData.PartitionData(). - setPartitionIndex(0).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3)), - new AlterPartitionRequestData.PartitionData(). - setPartitionIndex(2).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(1, 2, 3)). - setPartitionEpoch(1). - setErrorCode(NONE.code()), - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(2). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(0, 2, 1)). - setPartitionEpoch(1). - setErrorCode(NONE.code()))))), + setTopics(singletonList(new TopicData().setTopicId(fooId). + setPartitions(asList( + new PartitionData(). + setPartitionIndex(0).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3)), + new PartitionData(). + setPartitionIndex(2).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(asList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(1, 2, 3)). + setPartitionEpoch(1). + setErrorCode(NONE.code()), + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(2). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(0, 2, 1)). + setPartitionEpoch(1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ElectLeadersResponseData expectedResponse2 = buildElectLeadersResponse(NONE, false, Utils.mkMap( @@ -2547,19 +2547,19 @@ public void testBalancePartitionLeaders() { ControllerResult alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(2).setBrokerEpoch(102). - setTopics(asList(new AlterPartitionRequestData.TopicData().setTopicId(fooId). - setPartitions(asList(new AlterPartitionRequestData.PartitionData(). - setPartitionIndex(0).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3))))))); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(1, 2, 3)). - setPartitionEpoch(1). - setErrorCode(NONE.code()))))), + setTopics(singletonList(new TopicData().setTopicId(fooId). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(1, 2, 3))))))); + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(1, 2, 3)). + setPartitionEpoch(1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); @@ -2570,7 +2570,7 @@ public void testBalancePartitionLeaders() { .setPartitionId(0) .setTopicId(fooId) .setLeader(1); - assertEquals(asList(new ApiMessageAndVersion(expectedChangeRecord, MetadataVersion.latestTesting().partitionChangeRecordVersion())), balanceResult.records()); + assertEquals(singletonList(new ApiMessageAndVersion(expectedChangeRecord, MetadataVersion.latestTesting().partitionChangeRecordVersion())), balanceResult.records()); assertTrue(replication.arePartitionLeadersImbalanced()); assertFalse(balanceResult.response()); @@ -2579,19 +2579,19 @@ public void testBalancePartitionLeaders() { alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequestData().setBrokerId(2).setBrokerEpoch(102). - setTopics(asList(new AlterPartitionRequestData.TopicData().setTopicId(fooId). - setPartitions(asList(new AlterPartitionRequestData.PartitionData(). - setPartitionIndex(2).setPartitionEpoch(0). - setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(2). - setLeaderId(2). - setLeaderEpoch(0). - setIsr(asList(0, 2, 1)). - setPartitionEpoch(1). - setErrorCode(NONE.code()))))), + setTopics(singletonList(new TopicData().setTopicId(fooId). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(2).setPartitionEpoch(0). + setLeaderEpoch(0).setNewIsrWithEpochs(isrWithDefaultEpoch(0, 2, 1))))))); + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData().setTopicId(fooId).setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(2). + setLeaderId(2). + setLeaderEpoch(0). + setIsr(asList(0, 2, 1)). + setPartitionEpoch(1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); @@ -2602,7 +2602,7 @@ public void testBalancePartitionLeaders() { .setPartitionId(2) .setTopicId(fooId) .setLeader(0); - assertEquals(asList(new ApiMessageAndVersion(expectedChangeRecord, MetadataVersion.latestTesting().partitionChangeRecordVersion())), balanceResult.records()); + assertEquals(singletonList(new ApiMessageAndVersion(expectedChangeRecord, MetadataVersion.latestTesting().partitionChangeRecordVersion())), balanceResult.records()); assertFalse(replication.arePartitionLeadersImbalanced()); assertFalse(balanceResult.response()); } @@ -2664,7 +2664,7 @@ public void testKRaftClusterDescriber() { ctx.registerBrokersWithDirs( 0, Collections.emptyList(), 1, Collections.emptyList(), - 2, asList(Uuid.fromString("ozwqsVMFSNiYQUPSJA3j0w")), + 2, singletonList(Uuid.fromString("ozwqsVMFSNiYQUPSJA3j0w")), 3, asList(Uuid.fromString("SSDgCZ4BTyec5QojGT65qg"), Uuid.fromString("K8KwMrviRcOUvgI8FPOJWg")), 4, Collections.emptyList() ); @@ -2773,25 +2773,25 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd // Reassign to [2, 3] ControllerResult alterResultOne = replication.alterPartitionReassignments( - new AlterPartitionReassignmentsRequestData().setTopics(asList( - new ReassignableTopic().setName(topic).setPartitions(asList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(asList(2, 3))))))); + new AlterPartitionReassignmentsRequestData().setTopics(singletonList( + new ReassignableTopic().setName(topic).setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(asList(2, 3))))))); assertEquals(new AlterPartitionReassignmentsResponseData(). - setErrorMessage(null).setResponses(asList( - new ReassignableTopicResponse().setName(topic).setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null))))), alterResultOne.response()); + setErrorMessage(null).setResponses(singletonList( + new ReassignableTopicResponse().setName(topic).setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null))))), alterResultOne.response()); ctx.replay(alterResultOne.records()); ListPartitionReassignmentsResponseData currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). - setTopics(asList(new OngoingTopicReassignment(). - setName(topic).setPartitions(asList( - new OngoingPartitionReassignment().setPartitionIndex(0). - setRemovingReplicas(asList(0, 1)). - setAddingReplicas(asList(2, 3)). - setReplicas(asList(2, 3, 0, 1)))))); + setTopics(singletonList(new OngoingTopicReassignment(). + setName(topic).setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(0). + setRemovingReplicas(asList(0, 1)). + setAddingReplicas(asList(2, 3)). + setReplicas(asList(2, 3, 0, 1)))))); // Make sure the reassignment metadata is as expected. assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -2802,25 +2802,25 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd AlterPartitionRequestData alterPartitionRequestData = new AlterPartitionRequestData(). setBrokerId(partition.leader). setBrokerEpoch(ctx.currentBrokerEpoch(partition.leader)). - setTopics(asList(new TopicData(). - setTopicId(topicId). - setPartitions(asList(new PartitionData(). - setPartitionIndex(0). - setPartitionEpoch(partition.partitionEpoch). - setLeaderEpoch(partition.leaderEpoch). - setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2)))))); + setTopics(singletonList(new TopicData(). + setTopicId(topicId). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0). + setPartitionEpoch(partition.partitionEpoch). + setLeaderEpoch(partition.leaderEpoch). + setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2)))))); ControllerResult alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequest.Builder(alterPartitionRequestData, true).build().data()); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData(). - setTopicId(topicId). - setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setIsr(Arrays.asList(0, 1, 2)). - setPartitionEpoch(partition.partitionEpoch + 1). - setErrorCode(NONE.code()))))), + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData(). + setTopicId(topicId). + setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setIsr(asList(0, 1, 2)). + setPartitionEpoch(partition.partitionEpoch + 1). + setErrorCode(NONE.code()))))), alterPartitionResult.response()); ctx.replay(alterPartitionResult.records()); @@ -2832,9 +2832,9 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd ); ControllerResult electLeaderTwoResult = replication.electLeaders(request); ReplicaElectionResult replicaElectionResult = new ReplicaElectionResult().setTopic(topic); - replicaElectionResult.setPartitionResult(Arrays.asList(new PartitionResult().setPartitionId(0).setErrorCode(NONE.code()).setErrorMessage(null))); + replicaElectionResult.setPartitionResult(singletonList(new PartitionResult().setPartitionId(0).setErrorCode(NONE.code()).setErrorMessage(null))); assertEquals( - new ElectLeadersResponseData().setErrorCode(NONE.code()).setReplicaElectionResults(Arrays.asList(replicaElectionResult)), + new ElectLeadersResponseData().setErrorCode(NONE.code()).setReplicaElectionResults(singletonList(replicaElectionResult)), electLeaderTwoResult.response() ); ctx.replay(electLeaderTwoResult.records()); @@ -2845,26 +2845,26 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd // Reassign to [4, 5] ControllerResult alterResultTwo = replication.alterPartitionReassignments( - new AlterPartitionReassignmentsRequestData().setTopics(asList( - new ReassignableTopic().setName(topic).setPartitions(asList( - new ReassignablePartition().setPartitionIndex(0). - setReplicas(asList(4, 5))))))); + new AlterPartitionReassignmentsRequestData().setTopics(singletonList( + new ReassignableTopic().setName(topic).setPartitions(singletonList( + new ReassignablePartition().setPartitionIndex(0). + setReplicas(asList(4, 5))))))); assertEquals(new AlterPartitionReassignmentsResponseData(). - setErrorMessage(null).setResponses(asList( - new ReassignableTopicResponse().setName(topic).setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null))))), alterResultTwo.response()); + setErrorMessage(null).setResponses(singletonList( + new ReassignableTopicResponse().setName(topic).setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0). + setErrorMessage(null))))), alterResultTwo.response()); ctx.replay(alterResultTwo.records()); // Make sure the replicas list contains all the previous replicas 0, 1, 2, 3 as well as the new replicas 3, 4 currentReassigning = new ListPartitionReassignmentsResponseData().setErrorMessage(null). - setTopics(asList(new OngoingTopicReassignment(). - setName(topic).setPartitions(asList( - new OngoingPartitionReassignment().setPartitionIndex(0). - setRemovingReplicas(asList(0, 1, 2, 3)). - setAddingReplicas(asList(4, 5)). - setReplicas(asList(4, 5, 0, 1, 2, 3)))))); + setTopics(singletonList(new OngoingTopicReassignment(). + setName(topic).setPartitions(singletonList( + new OngoingPartitionReassignment().setPartitionIndex(0). + setRemovingReplicas(asList(0, 1, 2, 3)). + setAddingReplicas(asList(4, 5)). + setReplicas(asList(4, 5, 0, 1, 2, 3)))))); assertEquals(currentReassigning, replication.listPartitionReassignments(null, Long.MAX_VALUE)); @@ -2877,23 +2877,23 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd AlterPartitionRequestData alterPartitionRequestDataTwo = new AlterPartitionRequestData(). setBrokerId(partition.leader). setBrokerEpoch(ctx.currentBrokerEpoch(partition.leader)). - setTopics(asList(new TopicData(). - setTopicId(topicId). - setPartitions(asList(new PartitionData(). - setPartitionIndex(0). - setPartitionEpoch(partition.partitionEpoch). - setLeaderEpoch(partition.leaderEpoch). - setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2, 3, 4, 5)))))); + setTopics(singletonList(new TopicData(). + setTopicId(topicId). + setPartitions(singletonList(new PartitionData(). + setPartitionIndex(0). + setPartitionEpoch(partition.partitionEpoch). + setLeaderEpoch(partition.leaderEpoch). + setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2, 3, 4, 5)))))); ControllerResult alterPartitionResultTwo = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), new AlterPartitionRequest.Builder(alterPartitionRequestDataTwo, true).build().data()); - assertEquals(new AlterPartitionResponseData().setTopics(asList( - new AlterPartitionResponseData.TopicData(). - setTopicId(topicId). - setPartitions(asList( - new AlterPartitionResponseData.PartitionData(). - setPartitionIndex(0). - setErrorCode(NEW_LEADER_ELECTED.code()))))), + assertEquals(new AlterPartitionResponseData().setTopics(singletonList( + new AlterPartitionResponseData.TopicData(). + setTopicId(topicId). + setPartitions(singletonList( + new AlterPartitionResponseData.PartitionData(). + setPartitionIndex(0). + setErrorCode(NEW_LEADER_ELECTED.code()))))), alterPartitionResultTwo.response()); ctx.replay(alterPartitionResultTwo.records()); diff --git a/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java b/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java index 3b9fd83910b4f..bef1d35efc3ed 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ClientQuotasImageTest.java @@ -73,14 +73,14 @@ public class ClientQuotasImageTest { setRemove(true), CLIENT_QUOTA_RECORD.highestSupportedVersion())); // alter quota DELTA1_RECORDS.add(new ApiMessageAndVersion(new ClientQuotaRecord(). - setEntity(Arrays.asList( - new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). + setEntity(Collections.singletonList( + new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). setKey(QuotaConfigs.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG). setValue(234.0), CLIENT_QUOTA_RECORD.highestSupportedVersion())); // add quota to entity with existing quota DELTA1_RECORDS.add(new ApiMessageAndVersion(new ClientQuotaRecord(). - setEntity(Arrays.asList( - new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). + setEntity(Collections.singletonList( + new EntityData().setEntityType(ClientQuotaEntity.USER).setEntityName("foo"))). setKey(QuotaConfigs.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG). setValue(999.0), CLIENT_QUOTA_RECORD.highestSupportedVersion())); diff --git a/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java b/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java index 820d5a83fa82f..730cfec4963ef 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java @@ -45,7 +45,6 @@ import org.junit.jupiter.api.Timeout; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -83,7 +82,7 @@ public class ClusterImageTest { setId(0). setEpoch(1000). setIncarnationId(Uuid.fromString("vZKYST0pSA2HO5x_6hoO2Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(true). @@ -92,7 +91,7 @@ public class ClusterImageTest { setId(1). setEpoch(1001). setIncarnationId(Uuid.fromString("U52uRe20RsGI0RvpcTx33Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(false). @@ -101,7 +100,7 @@ public class ClusterImageTest { setId(2). setEpoch(123). setIncarnationId(Uuid.fromString("hr4TVh3YQiu3p16Awkka6w")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9094))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9094))). setSupportedFeatures(Collections.emptyMap()). setRack(Optional.of("arack")). setFenced(false). @@ -154,7 +153,7 @@ public class ClusterImageTest { setId(0). setEpoch(1000). setIncarnationId(Uuid.fromString("vZKYST0pSA2HO5x_6hoO2Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(false). @@ -163,7 +162,7 @@ public class ClusterImageTest { setId(1). setEpoch(1001). setIncarnationId(Uuid.fromString("U52uRe20RsGI0RvpcTx33Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(true). @@ -194,8 +193,8 @@ public class ClusterImageTest { DELTA2_RECORDS.add(new ApiMessageAndVersion(new RegisterBrokerRecord(). setBrokerId(2).setIsMigratingZkBroker(true).setIncarnationId(Uuid.fromString("Am5Yse7GQxaw0b2alM74bP")). setBrokerEpoch(1002).setEndPoints(new BrokerEndpointCollection( - Arrays.asList(new BrokerEndpoint().setName("PLAINTEXT").setHost("localhost"). - setPort(9094).setSecurityProtocol((short) 0)).iterator())). + Collections.singletonList(new BrokerEndpoint().setName("PLAINTEXT").setHost("localhost"). + setPort(9094).setSecurityProtocol((short) 0)).iterator())). setFeatures(new BrokerFeatureCollection( Collections.singleton(new BrokerFeature(). setName(MetadataVersion.FEATURE_NAME). @@ -212,7 +211,7 @@ public class ClusterImageTest { setId(0). setEpoch(1000). setIncarnationId(Uuid.fromString("vZKYST0pSA2HO5x_6hoO2Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9092))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(true). @@ -221,7 +220,7 @@ public class ClusterImageTest { setId(1). setEpoch(1001). setIncarnationId(Uuid.fromString("U52uRe20RsGI0RvpcTx33Q")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9093))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 3))). setRack(Optional.empty()). setFenced(false). @@ -230,7 +229,7 @@ public class ClusterImageTest { setId(2). setEpoch(1002). setIncarnationId(Uuid.fromString("Am5Yse7GQxaw0b2alM74bP")). - setListeners(Arrays.asList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9094))). + setListeners(Collections.singletonList(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9094))). setSupportedFeatures(Collections.singletonMap("metadata.version", VersionRange.of(MetadataVersion.IBP_3_3_IV3.featureLevel(), MetadataVersion.IBP_3_6_IV0.featureLevel()))). setRack(Optional.of("rack3")). diff --git a/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java b/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java index 4a29779e0acff..e078ffec8b300 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java @@ -83,8 +83,8 @@ static ApiMessageAndVersion metadataVersionRecord(MetadataVersion metadataVersio @Test public void testPremodernVersion() { writeWithExpectedLosses(MetadataVersion.IBP_3_2_IV0, - Arrays.asList( - "feature flag(s): foo.feature"), + Collections.singletonList( + "feature flag(s): foo.feature"), Arrays.asList( metadataVersionRecord(MetadataVersion.IBP_3_3_IV0), TEST_RECORDS.get(0), @@ -103,7 +103,7 @@ public void testPremodernVersion() { @Test public void testPreControlledShutdownStateVersion() { writeWithExpectedLosses(MetadataVersion.IBP_3_3_IV2, - Arrays.asList( + Collections.singletonList( "the inControlledShutdown state of one or more brokers"), Arrays.asList( metadataVersionRecord(MetadataVersion.IBP_3_3_IV3), @@ -134,8 +134,8 @@ public void testPreControlledShutdownStateVersion() { @Test public void testPreZkMigrationSupportVersion() { writeWithExpectedLosses(MetadataVersion.IBP_3_3_IV3, - Arrays.asList( - "the isMigratingZkBroker state of one or more brokers"), + Collections.singletonList( + "the isMigratingZkBroker state of one or more brokers"), Arrays.asList( metadataVersionRecord(MetadataVersion.IBP_3_4_IV0), new ApiMessageAndVersion(new RegisterBrokerRecord(). diff --git a/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java b/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java index eabb63ff858e2..be85516a19c2e 100644 --- a/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java @@ -247,11 +247,11 @@ public void testBasicLocalChanges() { LocalReplicaChanges changes = delta.localChanges(localId); assertEquals( - new HashSet<>(Arrays.asList(new TopicPartition("baz", 0))), + new HashSet<>(Collections.singletonList(new TopicPartition("baz", 0))), changes.electedLeaders().keySet() ); assertEquals( - new HashSet<>(Arrays.asList(new TopicPartition("baz", 0))), + new HashSet<>(Collections.singletonList(new TopicPartition("baz", 0))), changes.leaders().keySet() ); assertEquals( @@ -303,7 +303,7 @@ public void testDeleteAfterChanges() { RecordTestUtils.replayAll(delta, topicRecords); LocalReplicaChanges changes = delta.localChanges(localId); - assertEquals(new HashSet<>(Arrays.asList(new TopicPartition("zoo", 0))), changes.deletes()); + assertEquals(new HashSet<>(Collections.singletonList(new TopicPartition("zoo", 0))), changes.deletes()); assertEquals(Collections.emptyMap(), changes.electedLeaders()); assertEquals(Collections.emptyMap(), changes.leaders()); assertEquals(Collections.emptyMap(), changes.followers()); @@ -345,7 +345,7 @@ public void testUpdatedLeaders() { assertEquals(Collections.emptySet(), changes.deletes()); assertEquals(Collections.emptyMap(), changes.electedLeaders()); assertEquals( - new HashSet<>(Arrays.asList(new TopicPartition("zoo", 0))), + new HashSet<>(Collections.singletonList(new TopicPartition("zoo", 0))), changes.leaders().keySet() ); assertEquals(Collections.emptyMap(), changes.followers()); diff --git a/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java b/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java index 921c241a09afb..e7287ab71810a 100644 --- a/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/loader/MetadataLoaderTest.java @@ -249,19 +249,19 @@ public void testPublisherCannotBeInstalledMoreThanOnce( setFaultHandler(faultHandler). setHighWaterMarkAccessor(() -> OptionalLong.of(0L)). build()) { - loader.installPublishers(asList(publisher)).get(); + loader.installPublishers(Collections.singletonList(publisher)).get(); if (loadSnapshot) { MockSnapshotReader snapshotReader = new MockSnapshotReader( new MetadataProvenance(200, 100, 4000), - asList( - Batch.control( - 200, - 100, - 4000, - 10, - asList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + Collections.singletonList( + Batch.control( + 200, + 100, + 4000, + 10, + Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + ) ) - ) ); loader.handleLoadSnapshot(snapshotReader); TestUtils.retryOnExceptionWithTimeout(30_000, () -> { @@ -277,13 +277,13 @@ public void testPublisherCannotBeInstalledMoreThanOnce( assertEquals("testPublisherCannotBeInstalledMoreThanOnce: Attempted to install " + "publisher MockPublisher, which is already installed.", assertThrows(ExecutionException.class, - () -> loader.installPublishers(asList(publisher)).get()). + () -> loader.installPublishers(Collections.singletonList(publisher)).get()). getCause().getMessage()); } else { assertEquals("testPublisherCannotBeInstalledMoreThanOnce: Attempted to install " + "a new publisher named MockPublisher, but there is already a publisher with that name.", assertThrows(ExecutionException.class, - () -> loader.installPublishers(asList(new MockPublisher())).get()). + () -> loader.installPublishers(Collections.singletonList(new MockPublisher())).get()). getCause().getMessage()); } } @@ -306,10 +306,10 @@ public void testRemovePublisher() throws Exception { loader.removeAndClosePublisher(publishers.get(1)).get(); MockSnapshotReader snapshotReader = MockSnapshotReader.fromRecordLists( new MetadataProvenance(100, 50, 2000), - asList(asList(new ApiMessageAndVersion( - new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(IBP_3_3_IV2.featureLevel()), (short) 0)))); + Collections.singletonList(Collections.singletonList(new ApiMessageAndVersion( + new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(IBP_3_3_IV2.featureLevel()), (short) 0)))); assertFalse(snapshotReader.closed); loader.handleLoadSnapshot(snapshotReader); loader.waitForAllEventsToBeHandled(); @@ -334,7 +334,7 @@ public void testRemovePublisher() throws Exception { public void testLoadEmptySnapshot() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testLoadEmptySnapshot"); MockTime time = new MockTime(); - List publishers = asList(new MockPublisher()); + List publishers = Collections.singletonList(new MockPublisher()); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setTime(time). @@ -364,15 +364,15 @@ private void loadEmptySnapshot( ) throws Exception { MockSnapshotReader snapshotReader = new MockSnapshotReader( new MetadataProvenance(offset, 100, 4000), - asList( - Batch.control( - 200, - 100, - 4000, - 10, - asList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + Collections.singletonList( + Batch.control( + 200, + 100, + 4000, + 10, + Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + ) ) - ) ); if (loader.time() instanceof MockTime) { snapshotReader.setTime((MockTime) loader.time()); @@ -452,7 +452,7 @@ public Batch next() { public void testLoadEmptyBatch() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testLoadEmptyBatch"); MockTime time = new MockTime(); - List publishers = asList(new MockPublisher()); + List publishers = Collections.singletonList(new MockPublisher()); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setTime(time). @@ -463,15 +463,15 @@ public void testLoadEmptyBatch() throws Exception { publishers.get(0).firstPublish.get(10, TimeUnit.SECONDS); MockBatchReader batchReader = new MockBatchReader( 300, - asList( - Batch.control( - 300, - 100, - 4000, - 10, - asList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + Collections.singletonList( + Batch.control( + 300, + 100, + 4000, + 10, + Collections.singletonList(new ControlRecord(ControlRecordType.SNAPSHOT_HEADER, new SnapshotHeaderRecord())) + ) ) - ) ).setTime(time); loader.handleCommit(batchReader); loader.waitForAllEventsToBeHandled(); @@ -508,22 +508,22 @@ public void testLastAppliedOffset() throws Exception { loader.installPublishers(publishers).get(); loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( new MetadataProvenance(200, 100, 4000), asList( - asList(new ApiMessageAndVersion(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(IBP_3_3_IV1.featureLevel()), (short) 0)), - asList(new ApiMessageAndVersion(new TopicRecord(). - setName("foo"). - setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) + Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(IBP_3_3_IV1.featureLevel()), (short) 0)), + Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). + setName("foo"). + setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) ))); for (MockPublisher publisher : publishers) { publisher.firstPublish.get(1, TimeUnit.MINUTES); } loader.waitForAllEventsToBeHandled(); assertEquals(200L, loader.lastAppliedOffset()); - loader.handleCommit(new MockBatchReader(201, asList( - MockBatchReader.newBatch(201, 100, asList( - new ApiMessageAndVersion(new RemoveTopicRecord(). - setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)))))); + loader.handleCommit(new MockBatchReader(201, Collections.singletonList( + MockBatchReader.newBatch(201, 100, Collections.singletonList( + new ApiMessageAndVersion(new RemoveTopicRecord(). + setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)))))); loader.waitForAllEventsToBeHandled(); assertEquals(201L, loader.lastAppliedOffset()); } @@ -578,10 +578,10 @@ private void loadTestSnapshot( ) throws Exception { loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( new MetadataProvenance(offset, 100, 4000), asList( - asList(new ApiMessageAndVersion(new FeatureLevelRecord(). + Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel(IBP_3_3_IV1.featureLevel()), (short) 0)), - asList(new ApiMessageAndVersion(new TopicRecord(). + Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). setName("foo"). setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) ))); @@ -594,10 +594,10 @@ private void loadTestSnapshot2( ) throws Exception { loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( new MetadataProvenance(offset, 100, 4000), asList( - asList(new ApiMessageAndVersion(new FeatureLevelRecord(). + Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel(IBP_3_3_IV2.featureLevel()), (short) 0)), - asList(new ApiMessageAndVersion(new TopicRecord(). + Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). setName("bar"). setTopicId(Uuid.fromString("VcL2Mw-cT4aL6XV9VujzoQ")), (short) 0)) ))); @@ -610,7 +610,7 @@ private void loadTestSnapshot2( @Test public void testReloadSnapshot() throws Exception { MockFaultHandler faultHandler = new MockFaultHandler("testLastAppliedOffset"); - List publishers = asList(new MockPublisher("a")); + List publishers = Collections.singletonList(new MockPublisher("a")); try (MetadataLoader loader = new MetadataLoader.Builder(). setFaultHandler(faultHandler). setHighWaterMarkAccessor(() -> OptionalLong.of(0)). @@ -637,11 +637,11 @@ public void testReloadSnapshot() throws Exception { assertFalse(publishers.get(0).latestImage.topics().topicsByName().containsKey("foo")); assertTrue(publishers.get(0).latestImage.topics().topicsByName().containsKey("bar")); - loader.handleCommit(new MockBatchReader(500, asList( - MockBatchReader.newBatch(500, 100, asList( - new ApiMessageAndVersion(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(IBP_3_5_IV0.featureLevel()), (short) 0)))))); + loader.handleCommit(new MockBatchReader(500, Collections.singletonList( + MockBatchReader.newBatch(500, 100, Collections.singletonList( + new ApiMessageAndVersion(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(IBP_3_5_IV0.featureLevel()), (short) 0)))))); loader.waitForAllEventsToBeHandled(); assertEquals(IBP_3_5_IV0.featureLevel(), loader.metrics().currentMetadataVersion().featureLevel()); @@ -690,8 +690,8 @@ public void testPublishTransaction(boolean abortTxn) throws Exception { if (abortTxn) { loader.handleCommit( - MockBatchReader.newSingleBatchReader(500, 100, Arrays.asList( - new ApiMessageAndVersion(new AbortTransactionRecord(), (short) 0) + MockBatchReader.newSingleBatchReader(500, 100, Collections.singletonList( + new ApiMessageAndVersion(new AbortTransactionRecord(), (short) 0) ))); loader.waitForAllEventsToBeHandled(); @@ -699,8 +699,8 @@ public void testPublishTransaction(boolean abortTxn) throws Exception { "Topic should not be visible since the transaction was aborted"); } else { loader.handleCommit( - MockBatchReader.newSingleBatchReader(500, 100, Arrays.asList( - new ApiMessageAndVersion(new EndTransactionRecord(), (short) 0) + MockBatchReader.newSingleBatchReader(500, 100, Collections.singletonList( + new ApiMessageAndVersion(new EndTransactionRecord(), (short) 0) ))); loader.waitForAllEventsToBeHandled(); @@ -768,11 +768,11 @@ public void testSnapshotDuringTransaction() throws Exception { // loading a snapshot discards any in-flight transaction loader.handleLoadSnapshot(MockSnapshotReader.fromRecordLists( - new MetadataProvenance(600, 101, 4000), asList( - asList(new ApiMessageAndVersion(new TopicRecord(). - setName("foo"). - setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) - ))); + new MetadataProvenance(600, 101, 4000), Collections.singletonList( + Collections.singletonList(new ApiMessageAndVersion(new TopicRecord(). + setName("foo"). + setTopicId(Uuid.fromString("Uum7sfhHQP-obSvfywmNUA")), (short) 0)) + ))); loader.waitForAllEventsToBeHandled(); assertEquals("Uum7sfhHQP-obSvfywmNUA", publisher.latestImage.topics().getTopic("foo").id().toString()); diff --git a/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageBrokersNodeTest.java b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageBrokersNodeTest.java index 23d943a160318..f8e9021fd25dd 100644 --- a/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageBrokersNodeTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageBrokersNodeTest.java @@ -53,7 +53,7 @@ public class ClusterImageBrokersNodeTest { @Test public void testChildNames() { - assertEquals(Arrays.asList("1"), NODE.childNames()); + assertEquals(Collections.singletonList("1"), NODE.childNames()); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageControllersNodeTest.java b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageControllersNodeTest.java index 3d347ec3178f4..4540e83502512 100644 --- a/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageControllersNodeTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageControllersNodeTest.java @@ -25,7 +25,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.util.Arrays; import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -50,7 +49,7 @@ public class ClusterImageControllersNodeTest { @Test public void testChildNames() { - assertEquals(Arrays.asList("2"), NODE.childNames()); + assertEquals(Collections.singletonList("2"), NODE.childNames()); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotGeneratorTest.java b/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotGeneratorTest.java index ebbe52d24e014..68cc77bcf6e5f 100644 --- a/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotGeneratorTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotGeneratorTest.java @@ -31,7 +31,6 @@ import org.junit.jupiter.api.Timeout; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; @@ -118,7 +117,7 @@ public void testCreateSnapshot() throws Exception { assertEquals(Collections.emptyList(), emitter.images()); emitter.setReady(); } - assertEquals(Arrays.asList(TEST_IMAGE), emitter.images()); + assertEquals(Collections.singletonList(TEST_IMAGE), emitter.images()); faultHandler.maybeRethrowFirstException(); } @@ -163,7 +162,7 @@ public void testTimeBasedSnapshots() throws Exception { // so this does not trigger a new snapshot. generator.publishLogDelta(TEST_DELTA, TEST_IMAGE, logDeltaManifestBuilder().numBytes(150).build()); } - assertEquals(Arrays.asList(TEST_IMAGE), emitter.images()); + assertEquals(Collections.singletonList(TEST_IMAGE), emitter.images()); faultHandler.maybeRethrowFirstException(); } diff --git a/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java b/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java index 0137aeb077224..a7f7578fe590b 100644 --- a/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/writer/RaftSnapshotWriterTest.java @@ -22,6 +22,7 @@ import org.junit.jupiter.api.Timeout; import java.util.Arrays; +import java.util.Collections; import static java.util.Collections.emptyList; import static org.apache.kafka.metadata.RecordTestUtils.testRecord; @@ -44,7 +45,7 @@ public void testFreezeAndClose() { assertTrue(snapshotWriter.isClosed()); assertEquals(Arrays.asList( Arrays.asList(testRecord(0), testRecord(1)), - Arrays.asList(testRecord(2))), snapshotWriter.batches()); + Collections.singletonList(testRecord(2))), snapshotWriter.batches()); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java b/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java index 35551e7c6570d..8f3eac706f04a 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java @@ -48,7 +48,7 @@ public class BrokerRegistrationTest { setId(0). setEpoch(0). setIncarnationId(Uuid.fromString("pc1GhUlBS92cGGaKXl6ipw")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 2))). setRack(Optional.empty()). setFenced(false). @@ -57,7 +57,7 @@ public class BrokerRegistrationTest { setId(1). setEpoch(0). setIncarnationId(Uuid.fromString("3MfdxWlNSn2UDYsmDP1pYg")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9091))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9091))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 2))). setRack(Optional.empty()). setFenced(true). @@ -66,7 +66,7 @@ public class BrokerRegistrationTest { setId(2). setEpoch(0). setIncarnationId(Uuid.fromString("eY7oaG1RREie5Kk9uy1l6g")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9092))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9092))). setSupportedFeatures(Stream.of(new SimpleEntry<>("foo", VersionRange.of((short) 2, (short) 3)), new SimpleEntry<>("bar", VersionRange.of((short) 1, (short) 4))).collect( Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue))). @@ -77,14 +77,14 @@ public class BrokerRegistrationTest { setId(3). setEpoch(0). setIncarnationId(Uuid.fromString("1t8VyWx2TCSTpUWuqj-FOw")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9093))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9093))). setSupportedFeatures(Stream.of(new SimpleEntry<>("metadata.version", VersionRange.of((short) 7, (short) 7))) .collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue))). setRack(Optional.empty()). setFenced(false). setInControlledShutdown(true). setIsMigratingZkBroker(true). - setDirectories(Arrays.asList(Uuid.fromString("r4HpEsMuST6nQ4rznIEJVA"))). + setDirectories(Collections.singletonList(Uuid.fromString("r4HpEsMuST6nQ4rznIEJVA"))). build()); @Test @@ -172,7 +172,7 @@ public void testDirectoriesAreSorted() { setId(0). setEpoch(0). setIncarnationId(Uuid.fromString("ik32HZbLTW6ulw1yyrC8jQ")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 2))). setRack(Optional.empty()). setFenced(false). @@ -202,7 +202,7 @@ void testHasOnlineDir() { setId(0). setEpoch(0). setIncarnationId(Uuid.fromString("m6CiJvfITZeKVC6UuhlZew")). - setListeners(Arrays.asList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). + setListeners(Collections.singletonList(new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9090))). setSupportedFeatures(Collections.singletonMap("foo", VersionRange.of((short) 1, (short) 2))). setRack(Optional.empty()). setFenced(false). diff --git a/metadata/src/test/java/org/apache/kafka/metadata/DelegationTokenDataTest.java b/metadata/src/test/java/org/apache/kafka/metadata/DelegationTokenDataTest.java index cc0f74b9c581c..2c84d8d6031f1 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/DelegationTokenDataTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/DelegationTokenDataTest.java @@ -26,6 +26,7 @@ import org.junit.jupiter.api.Timeout; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -39,7 +40,7 @@ public class DelegationTokenDataTest { Uuid.randomUuid().toString(), Uuid.randomUuid().toString()); - private static final List EMPTYRENEWERS = Arrays.asList(); + private static final List EMPTYRENEWERS = Collections.emptyList(); private static final List TOKENINFORMATION = Arrays.asList( new TokenInformation( diff --git a/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java b/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java index 49597fff9ae7b..2574747f0a1ed 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/KafkaConfigSchemaTest.java @@ -26,6 +26,7 @@ import org.junit.jupiter.api.Timeout; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -61,9 +62,9 @@ public class KafkaConfigSchemaTest { public static final Map> SYNONYMS = new HashMap<>(); static { - SYNONYMS.put("abc", Arrays.asList(new ConfigSynonym("foo.bar"))); - SYNONYMS.put("def", Arrays.asList(new ConfigSynonym("quux", HOURS_TO_MILLISECONDS))); - SYNONYMS.put("ghi", Arrays.asList(new ConfigSynonym("ghi"))); + SYNONYMS.put("abc", Collections.singletonList(new ConfigSynonym("foo.bar"))); + SYNONYMS.put("def", Collections.singletonList(new ConfigSynonym("quux", HOURS_TO_MILLISECONDS))); + SYNONYMS.put("ghi", Collections.singletonList(new ConfigSynonym("ghi"))); SYNONYMS.put("xyz", Arrays.asList(new ConfigSynonym("quuux"), new ConfigSynonym("quuux2"))); } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/ListenerInfoTest.java b/metadata/src/test/java/org/apache/kafka/metadata/ListenerInfoTest.java index f4f2a843c250a..ac90599d7d9d0 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/ListenerInfoTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/ListenerInfoTest.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -65,23 +66,23 @@ public class ListenerInfoTest { @Test public void testNullHostname() { - assertNull(ListenerInfo.create(Arrays.asList(INTERNAL)).firstListener().host()); + assertNull(ListenerInfo.create(Collections.singletonList(INTERNAL)).firstListener().host()); } @Test public void testNullHostnameGetsResolved() throws Exception { - assertNotNull(ListenerInfo.create(Arrays.asList(INTERNAL)). + assertNotNull(ListenerInfo.create(Collections.singletonList(INTERNAL)). withWildcardHostnamesResolved().firstListener().host()); } @Test public void testEmptyHostname() { - assertEquals("", ListenerInfo.create(Arrays.asList(SSL)).firstListener().host()); + assertEquals("", ListenerInfo.create(Collections.singletonList(SSL)).firstListener().host()); } @Test public void testEmptyHostnameGetsResolved() throws Exception { - assertNotEquals("", ListenerInfo.create(Arrays.asList(SSL)). + assertNotEquals("", ListenerInfo.create(Collections.singletonList(SSL)). withWildcardHostnamesResolved().firstListener().host()); } @@ -118,14 +119,14 @@ public void testRoundTripToControllerRegistrationRequest() throws Exception { @Test public void testToControllerRegistrationRequestFailsOnNullHost() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). toControllerRegistrationRequest()); } @Test public void testToControllerRegistrationRequestFailsOnZeroPort() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). withWildcardHostnamesResolved(). toControllerRegistrationRequest()); } @@ -143,14 +144,14 @@ public void testRoundTripToControllerRegistrationRecord() throws Exception { @Test public void testToControllerRegistrationRecordFailsOnNullHost() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). toControllerRegistrationRecord()); } @Test public void testToControllerRegistrationRecordFailsOnZeroPort() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). withWildcardHostnamesResolved(). toControllerRegistrationRecord()); } @@ -168,14 +169,14 @@ public void testRoundTripToBrokerRegistrationRequest() throws Exception { @Test public void testToBrokerRegistrationRequestFailsOnNullHost() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). toBrokerRegistrationRequest()); } @Test public void testToBrokerRegistrationRequestFailsOnZeroPort() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). withWildcardHostnamesResolved(). toBrokerRegistrationRequest()); } @@ -193,14 +194,14 @@ public void testRoundTripToBrokerRegistrationRecord() throws Exception { @Test public void testToBrokerRegistrationRecordFailsOnNullHost() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). toBrokerRegistrationRecord()); } @Test public void testToBrokerRegistrationRecordFailsOnZeroPort() { assertThrows(RuntimeException.class, - () -> ListenerInfo.create(Arrays.asList(INTERNAL)). + () -> ListenerInfo.create(Collections.singletonList(INTERNAL)). withWildcardHostnamesResolved(). toBrokerRegistrationRecord()); } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/PartitionRegistrationTest.java b/metadata/src/test/java/org/apache/kafka/metadata/PartitionRegistrationTest.java index 8816f2f141de9..9cf47faa23aa2 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/PartitionRegistrationTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/PartitionRegistrationTest.java @@ -74,7 +74,7 @@ public void testPartitionControlInfoMergeAndDiff() { setReplicas(new int[]{1, 2, 3}).setDirectories(DirectoryId.unassignedArray(3)). setIsr(new int[]{1}).setLastKnownElr(new int[]{3}).setElr(new int[]{2}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(0).setPartitionEpoch(1).build(); assertEquals(b, a.merge(new PartitionChangeRecord(). - setLeader(3).setIsr(Arrays.asList(3)))); + setLeader(3).setIsr(Collections.singletonList(3)))); assertEquals("isr: [1, 2] -> [3], leader: 1 -> 3, leaderEpoch: 0 -> 1, partitionEpoch: 0 -> 1", b.diff(a)); assertEquals("isr: [1, 2] -> [1], elr: [] -> [2], lastKnownElr: [] -> [3], partitionEpoch: 0 -> 1", @@ -320,7 +320,7 @@ public void testPartitionRegistrationToRecord(MetadataVersion metadataVersion) { if (metadataVersion.isElrSupported()) { expectRecord. setEligibleLeaderReplicas(Arrays.asList(2, 3)). - setLastKnownElr(Arrays.asList(4)); + setLastKnownElr(Collections.singletonList(4)); } if (metadataVersion.isDirectoryAssignmentSupported()) { expectRecord.setDirectories(Arrays.asList( diff --git a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java index b35f807564522..608665e751110 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java @@ -373,12 +373,12 @@ public static RegisterControllerRecord createTestControllerRegistration( ).iterator() )). setFeatures(new RegisterControllerRecord.ControllerFeatureCollection( - Arrays.asList( - new RegisterControllerRecord.ControllerFeature(). - setName(MetadataVersion.FEATURE_NAME). - setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). - setMaxSupportedVersion(MetadataVersion.IBP_3_6_IV1.featureLevel()) - ).iterator() + Collections.singletonList( + new RegisterControllerRecord.ControllerFeature(). + setName(MetadataVersion.FEATURE_NAME). + setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). + setMaxSupportedVersion(MetadataVersion.IBP_3_6_IV1.featureLevel()) + ).iterator() )); } } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java b/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java index 7a26d48f63b3b..365c5eb1690bc 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java @@ -35,15 +35,15 @@ public class ReplicasTest { @Test public void testToList() { assertEquals(Arrays.asList(1, 2, 3, 4), Replicas.toList(new int[] {1, 2, 3, 4})); - assertEquals(Arrays.asList(), Replicas.toList(Replicas.NONE)); - assertEquals(Arrays.asList(2), Replicas.toList(new int[] {2})); + assertEquals(Collections.emptyList(), Replicas.toList(Replicas.NONE)); + assertEquals(Collections.singletonList(2), Replicas.toList(new int[] {2})); } @Test public void testToArray() { assertArrayEquals(new int[] {3, 2, 1}, Replicas.toArray(Arrays.asList(3, 2, 1))); - assertArrayEquals(new int[] {}, Replicas.toArray(Arrays.asList())); - assertArrayEquals(new int[] {2}, Replicas.toArray(Arrays.asList(2))); + assertArrayEquals(new int[] {}, Replicas.toArray(Collections.emptyList())); + assertArrayEquals(new int[] {2}, Replicas.toArray(Collections.singletonList(2))); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java b/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java index de21438c8c848..51ab544b18913 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAclRecordIteratorTest.java @@ -22,6 +22,7 @@ import org.junit.jupiter.api.Timeout; import java.util.Arrays; +import java.util.Collections; import java.util.NoSuchElementException; import static org.apache.kafka.metadata.authorizer.StandardAclWithIdTest.TEST_ACLS; @@ -47,8 +48,8 @@ public void testIteration() { new ApiMessageAndVersion(TEST_ACLS.get(3).toRecord(), (short) 0)), iterator.next()); assertTrue(iterator.hasNext()); - assertEquals(Arrays.asList( - new ApiMessageAndVersion(TEST_ACLS.get(4).toRecord(), (short) 0)), + assertEquals(Collections.singletonList( + new ApiMessageAndVersion(TEST_ACLS.get(4).toRecord(), (short) 0)), iterator.next()); assertFalse(iterator.hasNext()); } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java b/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java index f22351f10fbaf..731a944639bf0 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java @@ -76,10 +76,10 @@ public void testCopyWithOnlyVersion() { BootstrapMetadata.fromRecords(SAMPLE_RECORDS1, "baz").copyWithOnlyVersion()); } - final static List RECORDS_WITH_OLD_METADATA_VERSION = unmodifiableList(asList( + final static List RECORDS_WITH_OLD_METADATA_VERSION = unmodifiableList(Collections.singletonList( new ApiMessageAndVersion(new FeatureLevelRecord(). - setName(FEATURE_NAME). - setFeatureLevel(IBP_3_0_IV1.featureLevel()), (short) 0))); + setName(FEATURE_NAME). + setFeatureLevel(IBP_3_0_IV1.featureLevel()), (short) 0))); @Test public void testFromRecordsListWithOldMetadataVersion() { diff --git a/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java b/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java index 14f347ca1e1ea..4f0b3c37c974b 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java @@ -471,9 +471,9 @@ public void testShouldNotMoveToNextStateIfControllerNodesAreNotReadyToMigrate( startAndWaitForRecoveringMigrationStateFromZK(driver); if (allNodePresent) { - setupDeltaWithControllerRegistrations(delta, Arrays.asList(4, 5, 6), Arrays.asList()); + setupDeltaWithControllerRegistrations(delta, Arrays.asList(4, 5, 6), Collections.emptyList()); } else { - setupDeltaWithControllerRegistrations(delta, Arrays.asList(), Arrays.asList(4, 5)); + setupDeltaWithControllerRegistrations(delta, Collections.emptyList(), Arrays.asList(4, 5)); } delta.replay(zkBrokerRecord(1)); MetadataProvenance provenance = new MetadataProvenance(100, 1, 1); @@ -493,7 +493,7 @@ public void testShouldNotMoveToNextStateIfControllerNodesAreNotReadyToMigrate( // Update so that all controller nodes are zkMigrationReady. Now we should be able to move to the next state. delta = new MetadataDelta(image); - setupDeltaWithControllerRegistrations(delta, Arrays.asList(), Arrays.asList(4, 5, 6)); + setupDeltaWithControllerRegistrations(delta, Collections.emptyList(), Arrays.asList(4, 5, 6)); image = delta.apply(new MetadataProvenance(200, 1, 2)); driver.onMetadataUpdate(delta, image, new LogDeltaManifest.Builder(). provenance(image.provenance()). diff --git a/metadata/src/test/java/org/apache/kafka/metadata/placement/StripedReplicaPlacerTest.java b/metadata/src/test/java/org/apache/kafka/metadata/placement/StripedReplicaPlacerTest.java index 924fcdb7559aa..d0b4ad956ea1a 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/placement/StripedReplicaPlacerTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/placement/StripedReplicaPlacerTest.java @@ -119,9 +119,9 @@ public Uuid defaultDir(int brokerId) { public void testMultiPartitionTopicPlacementOnSingleUnfencedBroker() { MockRandom random = new MockRandom(); StripedReplicaPlacer placer = new StripedReplicaPlacer(random); - assertEquals(new TopicAssignment(Arrays.asList(partitionAssignment(Arrays.asList(0)), - partitionAssignment(Arrays.asList(0)), - partitionAssignment(Arrays.asList(0)))), + assertEquals(new TopicAssignment(Arrays.asList(partitionAssignment(Collections.singletonList(0)), + partitionAssignment(Collections.singletonList(0)), + partitionAssignment(Collections.singletonList(0)))), place(placer, 0, 3, (short) 1, Arrays.asList( new UsableBroker(0, Optional.empty(), false), new UsableBroker(1, Optional.empty(), true)))); diff --git a/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java b/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java index 26f8841d834f5..289739b53292f 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java @@ -25,6 +25,7 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; +import java.util.Collections; import java.util.List; public class TopicAssignmentTest { @@ -44,18 +45,18 @@ public void testTopicAssignmentReplicas() { public void testConsistentEqualsAndHashCode() { List topicAssignments = Arrays.asList( new TopicAssignment( - Arrays.asList( - partitionAssignment( - Arrays.asList(0, 1, 2) + Collections.singletonList( + partitionAssignment( + Arrays.asList(0, 1, 2) + ) ) - ) ), new TopicAssignment( - Arrays.asList( - partitionAssignment( - Arrays.asList(1, 2, 0) + Collections.singletonList( + partitionAssignment( + Arrays.asList(1, 2, 0) + ) ) - ) ) ); @@ -81,8 +82,8 @@ public void testToString() { Uuid.fromString("MvUIAsOiRlSePeiBHdZrSQ"), Uuid.fromString("jUqCchHtTHqMxeVv4dw1RA") ); - List partitionAssignments = Arrays.asList( - new PartitionAssignment(replicas, directories::get) + List partitionAssignments = Collections.singletonList( + new PartitionAssignment(replicas, directories::get) ); TopicAssignment topicAssignment = new TopicAssignment(partitionAssignments); assertEquals("TopicAssignment(assignments=[PartitionAssignment(replicas=[0, 1, 2], " + diff --git a/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java b/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java index 6266df68065a0..38f24707b3b0c 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java @@ -54,7 +54,7 @@ final public class MetaPropertiesEnsembleTest { private static final MetaPropertiesEnsemble FOO = new MetaPropertiesEnsemble( new HashSet<>(Arrays.asList("/tmp/empty1", "/tmp/empty2")), - new HashSet<>(Arrays.asList("/tmp/error3")), + new HashSet<>(Collections.singletonList("/tmp/error3")), Stream.of( new SimpleImmutableEntry<>("/tmp/dir4", new MetaProperties.Builder(). @@ -104,7 +104,7 @@ public void testEmptyLogDirsForEmpty() { @Test public void testErrorLogDirsForFoo() { - assertEquals(new HashSet<>(Arrays.asList("/tmp/error3")), FOO.errorLogDirs()); + assertEquals(new HashSet<>(Collections.singletonList("/tmp/error3")), FOO.errorLogDirs()); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java b/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java index 3b82d5765c976..f7336b90c5703 100644 --- a/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java +++ b/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java @@ -99,8 +99,7 @@ public int size() { public boolean equals(Object o) { if (!(o instanceof LeaderChangeBatch)) return false; LeaderChangeBatch other = (LeaderChangeBatch) o; - if (!other.newLeader.equals(newLeader)) return false; - return true; + return other.newLeader.equals(newLeader); } @Override diff --git a/raft/src/main/java/org/apache/kafka/raft/ElectionState.java b/raft/src/main/java/org/apache/kafka/raft/ElectionState.java index 005ff23a4f927..825acf7df69fd 100644 --- a/raft/src/main/java/org/apache/kafka/raft/ElectionState.java +++ b/raft/src/main/java/org/apache/kafka/raft/ElectionState.java @@ -30,9 +30,9 @@ * Encapsulate election state stored on disk after every state change. */ final public class ElectionState { - private static int unknownLeaderId = -1; - private static int notVoted = -1; - private static Uuid noVotedDirectoryId = Uuid.ZERO_UUID; + private static final int UNKNOWN_LEADER_ID = -1; + private static final int NOT_VOTED = -1; + private static final Uuid NO_VOTED_DIRECTORY_ID = Uuid.ZERO_UUID; private final int epoch; private final OptionalInt leaderId; @@ -95,7 +95,7 @@ public int leaderId() { } public int leaderIdOrSentinel() { - return leaderId.orElse(unknownLeaderId); + return leaderId.orElse(UNKNOWN_LEADER_ID); } public OptionalInt optionalLeaderId() { @@ -126,7 +126,7 @@ public QuorumStateData toQuorumStateData(short version) { QuorumStateData data = new QuorumStateData() .setLeaderEpoch(epoch) .setLeaderId(leaderIdOrSentinel()) - .setVotedId(votedKey.map(ReplicaKey::id).orElse(notVoted)); + .setVotedId(votedKey.map(ReplicaKey::id).orElse(NOT_VOTED)); if (version == 0) { List dataVoters = voters @@ -135,7 +135,7 @@ public QuorumStateData toQuorumStateData(short version) { .collect(Collectors.toList()); data.setCurrentVoters(dataVoters); } else if (version == 1) { - data.setVotedDirectoryId(votedKey.flatMap(ReplicaKey::directoryId).orElse(noVotedDirectoryId)); + data.setVotedDirectoryId(votedKey.flatMap(ReplicaKey::directoryId).orElse(NO_VOTED_DIRECTORY_ID)); } else { throw new IllegalStateException( String.format( @@ -198,17 +198,17 @@ public static ElectionState withUnknownLeader(int epoch, Set voters) { } public static ElectionState fromQuorumStateData(QuorumStateData data) { - Optional votedDirectoryId = data.votedDirectoryId().equals(noVotedDirectoryId) ? + Optional votedDirectoryId = data.votedDirectoryId().equals(NO_VOTED_DIRECTORY_ID) ? Optional.empty() : Optional.of(data.votedDirectoryId()); - Optional votedKey = data.votedId() == notVoted ? + Optional votedKey = data.votedId() == NOT_VOTED ? Optional.empty() : Optional.of(ReplicaKey.of(data.votedId(), votedDirectoryId)); return new ElectionState( data.leaderEpoch(), - data.leaderId() == unknownLeaderId ? OptionalInt.empty() : OptionalInt.of(data.leaderId()), + data.leaderId() == UNKNOWN_LEADER_ID ? OptionalInt.empty() : OptionalInt.of(data.leaderId()), votedKey, data.currentVoters().stream().map(QuorumStateData.Voter::voterId).collect(Collectors.toSet()) ); diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/LogHistory.java b/raft/src/main/java/org/apache/kafka/raft/internals/LogHistory.java index 6751400678e14..a3a37daf80c71 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/LogHistory.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/LogHistory.java @@ -76,7 +76,7 @@ public interface LogHistory { */ void clear(); - final static class Entry { + final class Entry { private final long offset; private final T value; @@ -101,9 +101,7 @@ public boolean equals(Object o) { Entry that = (Entry) o; if (offset != that.offset) return false; - if (!Objects.equals(value, that.value)) return false; - - return true; + return Objects.equals(value, that.value); } @Override diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/ReplicaKey.java b/raft/src/main/java/org/apache/kafka/raft/internals/ReplicaKey.java index 7d799a9bd6d40..002a2dee1914a 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/ReplicaKey.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/ReplicaKey.java @@ -45,9 +45,7 @@ public boolean equals(Object o) { ReplicaKey that = (ReplicaKey) o; if (id != that.id) return false; - if (!Objects.equals(directoryId, that.directoryId)) return false; - - return true; + return Objects.equals(directoryId, that.directoryId); } @Override diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/VoterSet.java b/raft/src/main/java/org/apache/kafka/raft/internals/VoterSet.java index d5a046e8f8916..229ed103b0dfb 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/VoterSet.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/VoterSet.java @@ -218,9 +218,7 @@ public boolean hasOverlappingMajority(VoterSet that) { .collect(Collectors.toSet()); if (Utils.diff(HashSet::new, thisReplicaKeys, thatReplicaKeys).size() > 1) return false; - if (Utils.diff(HashSet::new, thatReplicaKeys, thisReplicaKeys).size() > 1) return false; - - return true; + return Utils.diff(HashSet::new, thatReplicaKeys, thisReplicaKeys).size() <= 1; } @Override @@ -284,9 +282,7 @@ public boolean equals(Object o) { if (!Objects.equals(voterKey, that.voterKey)) return false; if (!Objects.equals(supportedKRaftVersion, that.supportedKRaftVersion)) return false; - if (!Objects.equals(listeners, that.listeners)) return false; - - return true; + return Objects.equals(listeners, that.listeners); } @Override diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java index 3fcbec4229e70..cfe1ea01d4372 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java @@ -41,6 +41,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.OptionalLong; @@ -122,7 +123,7 @@ public void testLeaderListenerNotified(boolean entireLog) throws Exception { // Check that listener was notified of the new snapshot try (SnapshotReader snapshot = context.listener.drainHandledSnapshot().get()) { assertEquals(snapshotId, snapshot.snapshotId()); - SnapshotWriterReaderTest.assertSnapshot(Arrays.asList(), snapshot); + SnapshotWriterReaderTest.assertSnapshot(Collections.emptyList(), snapshot); } } @@ -164,7 +165,7 @@ public void testFollowerListenerNotified(boolean entireLog) throws Exception { // Check that listener was notified of the new snapshot try (SnapshotReader snapshot = context.listener.drainHandledSnapshot().get()) { assertEquals(snapshotId, snapshot.snapshotId()); - SnapshotWriterReaderTest.assertSnapshot(Arrays.asList(), snapshot); + SnapshotWriterReaderTest.assertSnapshot(Collections.emptyList(), snapshot); } } @@ -210,7 +211,7 @@ public void testSecondListenerNotified(boolean entireLog) throws Exception { // Check that the second listener was notified of the new snapshot try (SnapshotReader snapshot = secondListener.drainHandledSnapshot().get()) { assertEquals(snapshotId, snapshot.snapshotId()); - SnapshotWriterReaderTest.assertSnapshot(Arrays.asList(), snapshot); + SnapshotWriterReaderTest.assertSnapshot(Collections.emptyList(), snapshot); } } @@ -245,7 +246,7 @@ public void testListenerRenotified() throws Exception { // Check that listener was notified of the new snapshot try (SnapshotReader snapshot = context.listener.drainHandledSnapshot().get()) { assertEquals(snapshotId, snapshot.snapshotId()); - SnapshotWriterReaderTest.assertSnapshot(Arrays.asList(), snapshot); + SnapshotWriterReaderTest.assertSnapshot(Collections.emptyList(), snapshot); } // Generate a new snapshot @@ -264,7 +265,7 @@ public void testListenerRenotified() throws Exception { // Check that listener was notified of the second snapshot try (SnapshotReader snapshot = context.listener.drainHandledSnapshot().get()) { assertEquals(secondSnapshotId, snapshot.snapshotId()); - SnapshotWriterReaderTest.assertSnapshot(Arrays.asList(), snapshot); + SnapshotWriterReaderTest.assertSnapshot(Collections.emptyList(), snapshot); } } @@ -660,7 +661,7 @@ public void testFetchSnapshotRequestAsLeader() throws Exception { List records = Arrays.asList("foo", "bar"); RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) - .appendToLog(snapshotId.epoch(), Arrays.asList("a")) + .appendToLog(snapshotId.epoch(), Collections.singletonList("a")) .build(); context.becomeLeader(); @@ -712,7 +713,7 @@ public void testLeaderShouldResignLeadershipIfNotGetFetchSnapshotRequestFromMajo List records = Arrays.asList("foo", "bar"); RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) - .appendToLog(snapshotId.epoch(), Arrays.asList("a")) + .appendToLog(snapshotId.epoch(), Collections.singletonList("a")) .build(); int resignLeadershipTimeout = context.checkQuorumTimeoutMs; @@ -909,7 +910,7 @@ public void testFetchSnapshotRequestWithInvalidPosition() throws Exception { List records = Arrays.asList("foo", "bar"); RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) - .appendToLog(snapshotId.epoch(), Arrays.asList("a")) + .appendToLog(snapshotId.epoch(), Collections.singletonList("a")) .build(); context.becomeLeader(); @@ -1136,12 +1137,12 @@ public void testFetchResponseWithSnapshotId() throws Exception { // Check that the snapshot was written to the log RawSnapshotReader snapshot = context.log.readSnapshot(snapshotId).get(); assertEquals(memorySnapshot.buffer().remaining(), snapshot.sizeInBytes()); - SnapshotWriterReaderTest.assertSnapshot(Arrays.asList(records), snapshot); + SnapshotWriterReaderTest.assertSnapshot(Collections.singletonList(records), snapshot); // Check that listener was notified of the new snapshot try (SnapshotReader reader = context.listener.drainHandledSnapshot().get()) { assertEquals(snapshotId, reader.snapshotId()); - SnapshotWriterReaderTest.assertSnapshot(Arrays.asList(records), reader); + SnapshotWriterReaderTest.assertSnapshot(Collections.singletonList(records), reader); } } @@ -1239,12 +1240,12 @@ public void testFetchSnapshotResponsePartialData() throws Exception { // Check that the snapshot was written to the log RawSnapshotReader snapshot = context.log.readSnapshot(snapshotId).get(); assertEquals(memorySnapshot.buffer().remaining(), snapshot.sizeInBytes()); - SnapshotWriterReaderTest.assertSnapshot(Arrays.asList(records), snapshot); + SnapshotWriterReaderTest.assertSnapshot(Collections.singletonList(records), snapshot); // Check that listener was notified of the new snapshot try (SnapshotReader reader = context.listener.drainHandledSnapshot().get()) { assertEquals(snapshotId, reader.snapshotId()); - SnapshotWriterReaderTest.assertSnapshot(Arrays.asList(records), reader); + SnapshotWriterReaderTest.assertSnapshot(Collections.singletonList(records), reader); } } diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java index c531e5860ac80..135523fdda662 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java @@ -456,7 +456,7 @@ public void testResignInOlderEpochIgnored() throws Exception { context.client.poll(); // Ensure we are still leader even after expiration of the election timeout. - context.time.sleep(context.electionTimeoutMs() * 2); + context.time.sleep(context.electionTimeoutMs() * 2L); context.client.poll(); context.assertElectedLeader(currentEpoch, localId); } @@ -598,7 +598,7 @@ public void testElectionTimeoutAfterUserInitiatedResign() throws Exception { resignedEpoch, OptionalInt.of(localId)); // After the election timer, we should become a candidate. - context.time.sleep(2 * context.electionTimeoutMs()); + context.time.sleep(2L * context.electionTimeoutMs()); context.pollUntil(context.client.quorum()::isCandidate); assertEquals(resignedEpoch + 1, context.currentEpoch()); assertEquals(new LeaderAndEpoch(OptionalInt.empty(), resignedEpoch + 1), @@ -681,7 +681,7 @@ public void testInitializeAsCandidateAndBecomeLeader() throws Exception { RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).build(); context.assertUnknownLeader(0); - context.time.sleep(2 * context.electionTimeoutMs()); + context.time.sleep(2L * context.electionTimeoutMs()); context.pollUntilRequest(); context.assertVotedCandidate(1, localId); @@ -721,7 +721,7 @@ public void testInitializeAsCandidateAndBecomeLeaderQuorumOfThree() throws Excep RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).build(); context.assertUnknownLeader(0); - context.time.sleep(2 * context.electionTimeoutMs()); + context.time.sleep(2L * context.electionTimeoutMs()); context.pollUntilRequest(); context.assertVotedCandidate(1, localId); @@ -1098,7 +1098,7 @@ public void testVoteRequestTimeout() throws Exception { RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).build(); context.assertUnknownLeader(0); - context.time.sleep(2 * context.electionTimeoutMs()); + context.time.sleep(2L * context.electionTimeoutMs()); context.pollUntilRequest(); context.assertVotedCandidate(epoch, localId); @@ -1333,7 +1333,7 @@ public void testRetryElection() throws Exception { context.assertUnknownLeader(0); - context.time.sleep(2 * context.electionTimeoutMs()); + context.time.sleep(2L * context.electionTimeoutMs()); context.pollUntilRequest(); context.assertVotedCandidate(epoch, localId); @@ -1894,7 +1894,7 @@ public void testVoteResponseIgnoredAfterBecomingFollower() throws Exception { context.assertUnknownLeader(epoch - 1); // Sleep a little to ensure that we become a candidate - context.time.sleep(context.electionTimeoutMs() * 2); + context.time.sleep(context.electionTimeoutMs() * 2L); // Wait until the vote requests are inflight context.pollUntilRequest(); @@ -2446,7 +2446,7 @@ public void testFollowerLogReconciliation() throws Exception { RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) .withElectedLeader(epoch, otherNodeId) .appendToLog(lastEpoch, Arrays.asList("foo", "bar")) - .appendToLog(lastEpoch, Arrays.asList("baz")) + .appendToLog(lastEpoch, singletonList("baz")) .build(); context.assertElectedLeader(epoch, otherNodeId); @@ -2573,7 +2573,7 @@ public void testClusterAuthorizationFailedInVote() throws Exception { .build(); // Sleep a little to ensure that we become a candidate - context.time.sleep(context.electionTimeoutMs() * 2); + context.time.sleep(context.electionTimeoutMs() * 2L); context.pollUntilRequest(); context.assertVotedCandidate(epoch, localId); @@ -2926,7 +2926,7 @@ public void testHandleCommitCallbackFiresInCandidateState() throws Exception { // Timeout the election and become candidate int candidateEpoch = epoch + 2; - context.time.sleep(context.electionTimeoutMs() * 2); + context.time.sleep(context.electionTimeoutMs() * 2L); context.client.poll(); context.assertVotedCandidate(candidateEpoch, localId); diff --git a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java index 5c2ab10957df3..75837c38782a6 100644 --- a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java +++ b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java @@ -391,7 +391,7 @@ static RaftClientTestContext initializeAsLeader(int localId, Set voters public void becomeLeader() throws Exception { int currentEpoch = currentEpoch(); - time.sleep(electionTimeoutMs * 2); + time.sleep(electionTimeoutMs * 2L); expectAndGrantVotes(currentEpoch + 1); expectBeginEpoch(currentEpoch + 1); } diff --git a/raft/src/test/java/org/apache/kafka/raft/internals/BatchBuilderTest.java b/raft/src/test/java/org/apache/kafka/raft/internals/BatchBuilderTest.java index 7fedd5175de51..438c523dc27b1 100644 --- a/raft/src/test/java/org/apache/kafka/raft/internals/BatchBuilderTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/internals/BatchBuilderTest.java @@ -27,6 +27,7 @@ import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -68,7 +69,7 @@ void testBuildBatch(CompressionType compressionType) { records.forEach(record -> builder.appendRecord(record, null)); MemoryRecords builtRecordSet = builder.build(); - assertTrue(builder.bytesNeeded(Arrays.asList("a"), null).isPresent()); + assertTrue(builder.bytesNeeded(Collections.singletonList("a"), null).isPresent()); assertThrows(IllegalStateException.class, () -> builder.appendRecord("a", null)); List builtBatches = Utils.toList(builtRecordSet.batchIterator()); @@ -112,7 +113,7 @@ public void testHasRoomForUncompressed(int batchSize) { String record = "i am a record"; - while (!builder.bytesNeeded(Arrays.asList(record), null).isPresent()) { + while (!builder.bytesNeeded(Collections.singletonList(record), null).isPresent()) { builder.appendRecord(record, null); } diff --git a/raft/src/test/java/org/apache/kafka/raft/internals/VoterSetTest.java b/raft/src/test/java/org/apache/kafka/raft/internals/VoterSetTest.java index f0ed10a542857..190a2622faf15 100644 --- a/raft/src/test/java/org/apache/kafka/raft/internals/VoterSetTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/internals/VoterSetTest.java @@ -111,7 +111,7 @@ void testIsVoterWithoutDirectoryId() { @Test void testIsOnlyVoterInStandalone() { - Map aVoterMap = voterMap(Arrays.asList(1), true); + Map aVoterMap = voterMap(Collections.singletonList(1), true); VoterSet voterSet = new VoterSet(new HashMap<>(aVoterMap)); assertTrue(voterSet.isOnlyVoter(aVoterMap.get(1).voterKey())); diff --git a/server-common/src/main/java/org/apache/kafka/server/config/ServerTopicConfigSynonyms.java b/server-common/src/main/java/org/apache/kafka/server/config/ServerTopicConfigSynonyms.java index 9cc0f967fdb5f..460737a232887 100644 --- a/server-common/src/main/java/org/apache/kafka/server/config/ServerTopicConfigSynonyms.java +++ b/server-common/src/main/java/org/apache/kafka/server/config/ServerTopicConfigSynonyms.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.server.config; -import static java.util.Arrays.asList; import java.util.Arrays; import java.util.Collections; @@ -112,23 +111,23 @@ public static String serverSynonym(String topicConfigName) { } private static Entry> sameName(String configName) { - return Utils.mkEntry(configName, asList(new ConfigSynonym(configName))); + return Utils.mkEntry(configName, Collections.singletonList(new ConfigSynonym(configName))); } private static Entry> sameNameWithLogPrefix(String configName) { - return Utils.mkEntry(configName, asList(new ConfigSynonym(LOG_PREFIX + configName))); + return Utils.mkEntry(configName, Collections.singletonList(new ConfigSynonym(LOG_PREFIX + configName))); } private static Entry> sameNameWithLogCleanerPrefix(String configName) { - return Utils.mkEntry(configName, asList(new ConfigSynonym(LOG_CLEANER_PREFIX + configName))); + return Utils.mkEntry(configName, Collections.singletonList(new ConfigSynonym(LOG_CLEANER_PREFIX + configName))); } private static Entry> singleWithLogPrefix(String topicConfigName, String brokerConfigName) { - return Utils.mkEntry(topicConfigName, asList(new ConfigSynonym(LOG_PREFIX + brokerConfigName))); + return Utils.mkEntry(topicConfigName, Collections.singletonList(new ConfigSynonym(LOG_PREFIX + brokerConfigName))); } private static Entry> singleWithLogCleanerPrefix(String topicConfigName, String brokerConfigName) { - return Utils.mkEntry(topicConfigName, asList(new ConfigSynonym(LOG_CLEANER_PREFIX + brokerConfigName))); + return Utils.mkEntry(topicConfigName, Collections.singletonList(new ConfigSynonym(LOG_CLEANER_PREFIX + brokerConfigName))); } private static Entry> listWithLogPrefix(String topicConfigName, ConfigSynonym... synonyms) { @@ -139,6 +138,6 @@ private static Entry> listWithLogPrefix(String topic } private static Entry> single(String topicConfigName, String brokerConfigName) { - return Utils.mkEntry(topicConfigName, asList(new ConfigSynonym(brokerConfigName))); + return Utils.mkEntry(topicConfigName, Collections.singletonList(new ConfigSynonym(brokerConfigName))); } } diff --git a/server-common/src/test/java/org/apache/kafka/server/mutable/BoundedListTest.java b/server-common/src/test/java/org/apache/kafka/server/mutable/BoundedListTest.java index df2608430f4ae..70c38c63a7752 100644 --- a/server-common/src/test/java/org/apache/kafka/server/mutable/BoundedListTest.java +++ b/server-common/src/test/java/org/apache/kafka/server/mutable/BoundedListTest.java @@ -21,6 +21,7 @@ import org.junit.jupiter.api.Timeout; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -122,7 +123,7 @@ public void testRemove() { list.remove("a"); assertEquals(Arrays.asList("a", "c"), list); list.remove(0); - assertEquals(Arrays.asList("c"), list); + assertEquals(Collections.singletonList("c"), list); } @Test @@ -132,7 +133,7 @@ public void testClear() { list.add("a"); list.add("c"); list.clear(); - assertEquals(Arrays.asList(), list); + assertEquals(Collections.emptyList(), list); assertTrue(list.isEmpty()); } @@ -205,7 +206,7 @@ public void testSubList() { list.add(1); list.add(2); list.add(3); - assertEquals(Arrays.asList(2), list.subList(1, 2)); + assertEquals(Collections.singletonList(2), list.subList(1, 2)); assertThrows(UnsupportedOperationException.class, () -> list.subList(1, 2).remove(2)); } diff --git a/server-common/src/test/java/org/apache/kafka/server/network/EndpointReadyFuturesTest.java b/server-common/src/test/java/org/apache/kafka/server/network/EndpointReadyFuturesTest.java index 2d76e5df37dbd..df9c600317bb3 100644 --- a/server-common/src/test/java/org/apache/kafka/server/network/EndpointReadyFuturesTest.java +++ b/server-common/src/test/java/org/apache/kafka/server/network/EndpointReadyFuturesTest.java @@ -18,6 +18,7 @@ package org.apache.kafka.server.network; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -47,7 +48,7 @@ final public class EndpointReadyFuturesTest { 1, Arrays.asList(EXTERNAL, INTERNAL), INTERNAL, - Arrays.asList("INTERNAL")); + Collections.singletonList("INTERNAL")); static void assertComplete( EndpointReadyFutures readyFutures, diff --git a/server-common/src/test/java/org/apache/kafka/server/util/CommandLineUtilsTest.java b/server-common/src/test/java/org/apache/kafka/server/util/CommandLineUtilsTest.java index e52f39cf846a6..672eb93b65449 100644 --- a/server-common/src/test/java/org/apache/kafka/server/util/CommandLineUtilsTest.java +++ b/server-common/src/test/java/org/apache/kafka/server/util/CommandLineUtilsTest.java @@ -22,6 +22,7 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.Properties; @@ -33,14 +34,14 @@ public class CommandLineUtilsTest { @Test public void testParseEmptyArg() { - List argArray = Arrays.asList("my.empty.property="); + List argArray = Collections.singletonList("my.empty.property="); assertThrows(IllegalArgumentException.class, () -> CommandLineUtils.parseKeyValueArgs(argArray, false)); } @Test public void testParseEmptyArgWithNoDelimiter() { - List argArray = Arrays.asList("my.empty.property"); + List argArray = Collections.singletonList("my.empty.property"); assertThrows(IllegalArgumentException.class, () -> CommandLineUtils.parseKeyValueArgs(argArray, false)); } @@ -56,7 +57,7 @@ public void testParseEmptyArgAsValid() { @Test public void testParseSingleArg() { - List argArray = Arrays.asList("my.property=value"); + List argArray = Collections.singletonList("my.property=value"); Properties props = CommandLineUtils.parseKeyValueArgs(argArray); assertEquals(props.getProperty("my.property"), "value", "Value of a single property should be 'value'"); diff --git a/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashMapTest.java b/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashMapTest.java index 65413ab5cf432..1b4218c0c44e5 100644 --- a/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashMapTest.java +++ b/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashMapTest.java @@ -18,7 +18,6 @@ package org.apache.kafka.timeline; import java.util.ArrayList; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -96,7 +95,7 @@ public void testMapMethods() { assertNull(map.putIfAbsent(1, "xyz")); assertEquals("xyz", map.putIfAbsent(1, "123")); assertEquals("xyz", map.putIfAbsent(1, "ghi")); - map.putAll(Collections.singletonMap(2, "b")); + map.put(2, "b"); assertTrue(map.containsKey(2)); assertEquals("xyz", map.remove(1)); assertEquals("b", map.remove(2)); diff --git a/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashSetTest.java b/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashSetTest.java index 070893cdc84be..82c220f5969b4 100644 --- a/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashSetTest.java +++ b/server-common/src/test/java/org/apache/kafka/timeline/TimelineHashSetTest.java @@ -18,6 +18,7 @@ package org.apache.kafka.timeline; import java.util.Arrays; +import java.util.Collections; import org.apache.kafka.common.utils.LogContext; import org.junit.jupiter.api.Test; @@ -61,9 +62,9 @@ public void testIteration() { set.add("d"); assertTrue(set.retainAll(Arrays.asList("a", "b", "c"))); assertFalse(set.retainAll(Arrays.asList("a", "b", "c"))); - assertFalse(set.removeAll(Arrays.asList("d"))); + assertFalse(set.removeAll(Collections.singletonList("d"))); registry.getOrCreateSnapshot(2); - assertTrue(set.removeAll(Arrays.asList("c"))); + assertTrue(set.removeAll(Collections.singletonList("c"))); assertThat(TimelineHashMapTest.iteratorToList(set.iterator(2)), containsInAnyOrder("a", "b", "c")); assertThat(TimelineHashMapTest.iteratorToList(set.iterator()), diff --git a/shell/src/main/java/org/apache/kafka/shell/command/CatCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/CatCommandHandler.java index 9cd7603f94caa..51b2f6d3cb78d 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/CatCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/CatCommandHandler.java @@ -121,7 +121,6 @@ public int hashCode() { public boolean equals(Object other) { if (!(other instanceof CatCommandHandler)) return false; CatCommandHandler o = (CatCommandHandler) other; - if (!Objects.equals(o.targets, targets)) return false; - return true; + return Objects.equals(o.targets, targets); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/CdCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/CdCommandHandler.java index 71057a4ade75f..ba22c0bb8ecf6 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/CdCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/CdCommandHandler.java @@ -112,7 +112,6 @@ public int hashCode() { public boolean equals(Object other) { if (!(other instanceof CdCommandHandler)) return false; CdCommandHandler o = (CdCommandHandler) other; - if (!o.target.equals(target)) return false; - return true; + return o.target.equals(target); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/ErroneousCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/ErroneousCommandHandler.java index 27cb02a906b7c..e8b8096f8b2d4 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/ErroneousCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/ErroneousCommandHandler.java @@ -52,8 +52,7 @@ public int hashCode() { public boolean equals(Object other) { if (!(other instanceof ErroneousCommandHandler)) return false; ErroneousCommandHandler o = (ErroneousCommandHandler) other; - if (!Objects.equals(o.message, message)) return false; - return true; + return Objects.equals(o.message, message); } @Override diff --git a/shell/src/main/java/org/apache/kafka/shell/command/ExitCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/ExitCommandHandler.java index 56f92de30f318..fab54be2c98be 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/ExitCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/ExitCommandHandler.java @@ -89,7 +89,6 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof ExitCommandHandler)) return false; - return true; + return other instanceof ExitCommandHandler; } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/FindCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/FindCommandHandler.java index a41b0b21ca318..133cb988d01f9 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/FindCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/FindCommandHandler.java @@ -127,7 +127,6 @@ public int hashCode() { public boolean equals(Object other) { if (!(other instanceof FindCommandHandler)) return false; FindCommandHandler o = (FindCommandHandler) other; - if (!Objects.equals(o.paths, paths)) return false; - return true; + return Objects.equals(o.paths, paths); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/HelpCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/HelpCommandHandler.java index 52345487b44a5..e0a5aa03288b0 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/HelpCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/HelpCommandHandler.java @@ -89,7 +89,6 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof HelpCommandHandler)) return false; - return true; + return other instanceof HelpCommandHandler; } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/LsCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/LsCommandHandler.java index 848b7bd5185be..e42f7414cf41c 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/LsCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/LsCommandHandler.java @@ -271,8 +271,7 @@ public boolean equals(Object o) { if (!(o instanceof ColumnSchema)) return false; ColumnSchema other = (ColumnSchema) o; if (entriesPerColumn != other.entriesPerColumn) return false; - if (!Arrays.equals(columnWidths, other.columnWidths)) return false; - return true; + return Arrays.equals(columnWidths, other.columnWidths); } @Override @@ -298,7 +297,6 @@ public int hashCode() { public boolean equals(Object other) { if (!(other instanceof LsCommandHandler)) return false; LsCommandHandler o = (LsCommandHandler) other; - if (!Objects.equals(o.targets, targets)) return false; - return true; + return Objects.equals(o.targets, targets); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/ManCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/ManCommandHandler.java index f10e89b2bffef..5892cdff4c5ad 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/ManCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/ManCommandHandler.java @@ -110,7 +110,6 @@ public int hashCode() { public boolean equals(Object other) { if (!(other instanceof ManCommandHandler)) return false; ManCommandHandler o = (ManCommandHandler) other; - if (!o.cmd.equals(cmd)) return false; - return true; + return o.cmd.equals(cmd); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/NoOpCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/NoOpCommandHandler.java index 106d2ddb0147c..e7168127e26c3 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/NoOpCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/NoOpCommandHandler.java @@ -42,7 +42,6 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof NoOpCommandHandler)) return false; - return true; + return other instanceof NoOpCommandHandler; } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/PwdCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/PwdCommandHandler.java index 4a0752a4e701d..55046cf5e7243 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/PwdCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/PwdCommandHandler.java @@ -88,7 +88,6 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof PwdCommandHandler)) return false; - return true; + return other instanceof PwdCommandHandler; } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/TreeCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/TreeCommandHandler.java index 1489e1f150090..ee937d1c2f5bb 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/TreeCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/TreeCommandHandler.java @@ -117,7 +117,6 @@ public int hashCode() { public boolean equals(Object other) { if (!(other instanceof TreeCommandHandler)) return false; TreeCommandHandler o = (TreeCommandHandler) other; - if (!Objects.equals(o.targets, targets)) return false; - return true; + return Objects.equals(o.targets, targets); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/glob/GlobVisitor.java b/shell/src/main/java/org/apache/kafka/shell/glob/GlobVisitor.java index 23b3a52d77623..6af3011d55946 100644 --- a/shell/src/main/java/org/apache/kafka/shell/glob/GlobVisitor.java +++ b/shell/src/main/java/org/apache/kafka/shell/glob/GlobVisitor.java @@ -76,8 +76,7 @@ public boolean equals(Object o) { if (!(o instanceof MetadataNodeInfo)) return false; MetadataNodeInfo other = (MetadataNodeInfo) o; if (!Arrays.equals(path, other.path)) return false; - if (!node.equals(other.node)) return false; - return true; + return node.equals(other.node); } @Override diff --git a/shell/src/test/java/org/apache/kafka/shell/command/CommandTest.java b/shell/src/test/java/org/apache/kafka/shell/command/CommandTest.java index 212ac11e69b5c..58528335c4be8 100644 --- a/shell/src/test/java/org/apache/kafka/shell/command/CommandTest.java +++ b/shell/src/test/java/org/apache/kafka/shell/command/CommandTest.java @@ -30,39 +30,39 @@ public class CommandTest { @Test public void testParseCommands() { - assertEquals(new CatCommandHandler(Arrays.asList("foo")), + assertEquals(new CatCommandHandler(Collections.singletonList("foo")), new Commands(true).parseCommand(Arrays.asList("cat", "foo"))); assertEquals(new CdCommandHandler(Optional.empty()), - new Commands(true).parseCommand(Arrays.asList("cd"))); + new Commands(true).parseCommand(Collections.singletonList("cd"))); assertEquals(new CdCommandHandler(Optional.of("foo")), new Commands(true).parseCommand(Arrays.asList("cd", "foo"))); assertEquals(new ExitCommandHandler(), - new Commands(true).parseCommand(Arrays.asList("exit"))); + new Commands(true).parseCommand(Collections.singletonList("exit"))); assertEquals(new HelpCommandHandler(), - new Commands(true).parseCommand(Arrays.asList("help"))); + new Commands(true).parseCommand(Collections.singletonList("help"))); assertEquals(new HistoryCommandHandler(3), new Commands(true).parseCommand(Arrays.asList("history", "3"))); assertEquals(new HistoryCommandHandler(Integer.MAX_VALUE), - new Commands(true).parseCommand(Arrays.asList("history"))); + new Commands(true).parseCommand(Collections.singletonList("history"))); assertEquals(new LsCommandHandler(Collections.emptyList()), - new Commands(true).parseCommand(Arrays.asList("ls"))); + new Commands(true).parseCommand(Collections.singletonList("ls"))); assertEquals(new LsCommandHandler(Arrays.asList("abc", "123")), new Commands(true).parseCommand(Arrays.asList("ls", "abc", "123"))); assertEquals(new PwdCommandHandler(), - new Commands(true).parseCommand(Arrays.asList("pwd"))); + new Commands(true).parseCommand(Collections.singletonList("pwd"))); } @Test public void testParseInvalidCommand() { assertEquals(new ErroneousCommandHandler("invalid choice: 'blah' (choose " + "from 'cat', 'cd', 'exit', 'find', 'help', 'history', 'ls', 'man', 'pwd', 'tree')"), - new Commands(true).parseCommand(Arrays.asList("blah"))); + new Commands(true).parseCommand(Collections.singletonList("blah"))); } @Test public void testEmptyCommandLine() { assertEquals(new NoOpCommandHandler(), - new Commands(true).parseCommand(Arrays.asList(""))); + new Commands(true).parseCommand(Collections.singletonList(""))); assertEquals(new NoOpCommandHandler(), new Commands(true).parseCommand(Collections.emptyList())); } diff --git a/shell/src/test/java/org/apache/kafka/shell/glob/GlobVisitorTest.java b/shell/src/test/java/org/apache/kafka/shell/glob/GlobVisitorTest.java index 000d8f2f01775..2bb74910f5ffc 100644 --- a/shell/src/test/java/org/apache/kafka/shell/glob/GlobVisitorTest.java +++ b/shell/src/test/java/org/apache/kafka/shell/glob/GlobVisitorTest.java @@ -29,6 +29,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -137,8 +138,8 @@ public void testDotDot() { InfoConsumer consumer = new InfoConsumer(); GlobVisitor visitor = new GlobVisitor("..", consumer); visitor.accept(DATA); - assertEquals(Optional.of(Arrays.asList( - new MetadataNodeInfo(new String[0], DATA.root()))), consumer.infos); + assertEquals(Optional.of(Collections.singletonList( + new MetadataNodeInfo(new String[0], DATA.root()))), consumer.infos); } @Test @@ -146,8 +147,8 @@ public void testDoubleDotDot() { InfoConsumer consumer = new InfoConsumer(); GlobVisitor visitor = new GlobVisitor("../..", consumer); visitor.accept(DATA); - assertEquals(Optional.of(Arrays.asList( - new MetadataNodeInfo(new String[0], DATA.root()))), consumer.infos); + assertEquals(Optional.of(Collections.singletonList( + new MetadataNodeInfo(new String[0], DATA.root()))), consumer.infos); } @Test @@ -189,8 +190,8 @@ public void testAbsoluteGlob() { InfoConsumer consumer = new InfoConsumer(); GlobVisitor visitor = new GlobVisitor("/a?pha", consumer); visitor.accept(DATA); - assertEquals(Optional.of(Arrays.asList( - new MetadataNodeInfo(new String[] {"alpha"}, - DATA.root().child("alpha")))), consumer.infos); + assertEquals(Optional.of(Collections.singletonList( + new MetadataNodeInfo(new String[]{"alpha"}, + DATA.root().child("alpha")))), consumer.infos); } } diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageEvent.java b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageEvent.java index 9617da17c6593..83884e6ce3da6 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageEvent.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageEvent.java @@ -76,10 +76,7 @@ public boolean matches(final LocalTieredStorageCondition condition) { if (condition.baseOffset != null && !metadata.isPresent()) { return false; } - if (condition.baseOffset != null && metadata.get().startOffset() != condition.baseOffset) { - return false; - } - return true; + return condition.baseOffset == null || metadata.get().startOffset() == condition.baseOffset; } /** diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageTest.java index de6e8d2692921..2908837fa4d2c 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageTest.java @@ -304,7 +304,7 @@ public void traverseMultipleOffloadedRecordsInOneSegment() throws RemoteStorageE final LocalTieredStorageSnapshot snapshot = takeSnapshot(tieredStorage); - assertEquals(asList(topicPartition), snapshot.getTopicPartitions()); + assertEquals(Collections.singletonList(topicPartition), snapshot.getTopicPartitions()); assertEquals(asList(wrap(record1), wrap(record2)), extractRecordsValue(snapshot, id)); } @@ -331,7 +331,7 @@ public void traverseMultipleOffloadedRecordsInTwoSegments() throws RemoteStorage actual.put(idA, extractRecordsValue(snapshot, idA)); actual.put(idB, extractRecordsValue(snapshot, idB)); - assertEquals(asList(topicPartition), snapshot.getTopicPartitions()); + assertEquals(Collections.singletonList(topicPartition), snapshot.getTopicPartitions()); assertEquals(expected, actual); } diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogMetadataManagerTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogMetadataManagerTest.java index 5a71a6a51ba10..3522ea6a81a63 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogMetadataManagerTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogMetadataManagerTest.java @@ -45,7 +45,7 @@ public class RemoteLogMetadataManagerTest { private final Time time = new MockTime(1); - private RemoteLogMetadataManager remoteLogMetadataManager = new TopicBasedRemoteLogMetadataManagerWrapperWithHarness(); + private final RemoteLogMetadataManager remoteLogMetadataManager = new TopicBasedRemoteLogMetadataManagerWrapperWithHarness(); @Test public void testFetchSegments() throws Exception { diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/utils/RecordsKeyValueMatcher.java b/storage/src/test/java/org/apache/kafka/tiered/storage/utils/RecordsKeyValueMatcher.java index 902b5c2d7132a..736a9cc4dbec1 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/utils/RecordsKeyValueMatcher.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/utils/RecordsKeyValueMatcher.java @@ -111,13 +111,10 @@ private boolean matches(R1 expected, R2 actual, Description mismatchDescription) .appendValue(actual.getClass().getSimpleName()); return false; } - if (!compare(expectedRecord.key(), actualRecord.key(), keySerde.deserializer(), "Record key", - mismatchDescription) || - !compare(expectedRecord.value(), actualRecord.value(), valueSerde.deserializer(), "Record value", - mismatchDescription)) { - return false; - } - return true; + return compare(expectedRecord.key(), actualRecord.key(), keySerde.deserializer(), "Record key", + mismatchDescription) && + compare(expectedRecord.value(), actualRecord.value(), valueSerde.deserializer(), "Record value", + mismatchDescription); } private boolean compare(ByteBuffer lhs, diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopicConfig.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopicConfig.java index 0b5a32799d8e9..48c0a66241733 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopicConfig.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopicConfig.java @@ -82,8 +82,7 @@ public Optional numberOfPartitions() { public void setNumberOfPartitions(final int numberOfPartitions) { if (hasEnforcedNumberOfPartitions()) { - throw new UnsupportedOperationException("number of partitions are enforced on topic " + - "" + name() + " and can't be altered."); + throw new UnsupportedOperationException("number of partitions are enforced on topic " + name() + " and can't be altered."); } validateNumberOfPartitions(numberOfPartitions); diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/SinkNode.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/SinkNode.java index 6f508eff2792a..7bc193ca5a08e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/SinkNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/SinkNode.java @@ -108,11 +108,9 @@ public String toString() { */ @Override public String toString(final String indent) { - final StringBuilder sb = new StringBuilder(super.toString(indent)); - sb.append(indent).append("\ttopic:\t\t"); - sb.append(topicExtractor); - sb.append("\n"); - return sb.toString(); + return super.toString(indent) + indent + "\ttopic:\t\t" + + topicExtractor + + "\n"; } } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java index 0ceb611bfa92e..7efa927364a88 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java @@ -77,13 +77,11 @@ public String toString() { } public String toString(final String indent) { - final StringBuilder builder = new StringBuilder(); - builder.append(indent).append("GlobalMetadata: ").append(allMetadata).append("\n"); - builder.append(indent).append("GlobalStores: ").append(globalStores).append("\n"); - builder.append(indent).append("My HostInfo: ").append(thisHost).append("\n"); - builder.append(indent).append("PartitionsByTopic: ").append(partitionsByTopic).append("\n"); - return builder.toString(); + return indent + "GlobalMetadata: " + allMetadata + "\n" + + indent + "GlobalStores: " + globalStores + "\n" + + indent + "My HostInfo: " + thisHost + "\n" + + indent + "PartitionsByTopic: " + partitionsByTopic + "\n"; } /** diff --git a/streams/src/main/java/org/apache/kafka/streams/state/HostInfo.java b/streams/src/main/java/org/apache/kafka/streams/state/HostInfo.java index c25f1840e52bf..8daef3e03d0ef 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/HostInfo.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/HostInfo.java @@ -105,7 +105,7 @@ public int port() { @Override public String toString() { return "HostInfo{" + - "host=\'" + host + '\'' + + "host='" + host + '\'' + ", port=" + port + '}'; } diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/EosV2UpgradeIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/EosV2UpgradeIntegrationTest.java index bac9ae37eb8e7..6a846eb972fbf 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/EosV2UpgradeIntegrationTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/EosV2UpgradeIntegrationTest.java @@ -161,7 +161,7 @@ public static void closeCluster() { private final AtomicInteger commitRequested = new AtomicInteger(0); private int testNumber = 0; - private Map exceptionCounts = new HashMap() { + private final Map exceptionCounts = new HashMap() { { put(APP_DIR_1, 0); put(APP_DIR_2, 0); diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/IQv2IntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/IQv2IntegrationTest.java index edfc27e458f08..8a79bce2c477c 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/IQv2IntegrationTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/IQv2IntegrationTest.java @@ -313,7 +313,7 @@ public String name() { public KeyValueStore get() { return new KeyValueStore() { private boolean open = false; - private Map map = new HashMap<>(); + private final Map map = new HashMap<>(); private Position position; private StateStoreContext context; diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/JoinGracePeriodDurabilityIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/JoinGracePeriodDurabilityIntegrationTest.java index 6fa378cb4a739..ce25e89ae358a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/JoinGracePeriodDurabilityIntegrationTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/JoinGracePeriodDurabilityIntegrationTest.java @@ -54,6 +54,7 @@ import java.time.Duration; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.Properties; @@ -205,9 +206,9 @@ public void shouldRecoverBufferAfterShutdown() { // flush those recovered buffered events out. produceSynchronouslyToPartitionZero( streamInput, - asList( - new KeyValueTimestamp<>("k6", "v6", scaledTime(20L)) - ) + Collections.singletonList( + new KeyValueTimestamp<>("k6", "v6", scaledTime(20L)) + ) ); verifyOutput( output, diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/KTableEfficientRangeQueryTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/KTableEfficientRangeQueryTest.java index 571bbf95ed03b..3ec8c351d2f97 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/KTableEfficientRangeQueryTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/KTableEfficientRangeQueryTest.java @@ -71,12 +71,12 @@ private enum StoreType { InMemory, RocksDB, Timed } private static final String TABLE_NAME = "mytable"; private static final int DATA_SIZE = 5; - private StoreType storeType; - private boolean enableLogging; - private boolean enableCaching; - private boolean forward; + private final StoreType storeType; + private final boolean enableLogging; + private final boolean enableCaching; + private final boolean forward; - private LinkedList> records; + private final LinkedList> records; private String low; private String high; private String middle; diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest.java index 6ed47c6c41dbd..4c58b21450007 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest.java @@ -18,6 +18,7 @@ package org.apache.kafka.streams.integration; import java.io.IOException; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Properties; @@ -131,8 +132,8 @@ public static void startCluster() throws IOException, InterruptedException { new KeyValue<>("ID123-4", "ID123-A4") ); - final List> table2 = asList( - new KeyValue<>("ID123", "BBB") + final List> table2 = Collections.singletonList( + new KeyValue<>("ID123", "BBB") ); IntegrationTestUtils.produceKeyValuesSynchronously(TABLE_1, table1, PRODUCER_CONFIG_1, MOCK_TIME); diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/RegexSourceIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/RegexSourceIntegrationTest.java index cb5a3da163175..ae12f261003bb 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/RegexSourceIntegrationTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/RegexSourceIntegrationTest.java @@ -122,12 +122,12 @@ public static void closeCluster() { private Properties streamsConfiguration; private static final String STREAM_TASKS_NOT_UPDATED = "Stream tasks not updated"; private KafkaStreams streams; - private static volatile AtomicInteger topicSuffixGenerator = new AtomicInteger(0); + private static final AtomicInteger TOPIC_SUFFIX_GENERATOR = new AtomicInteger(0); private String outputTopic; @BeforeEach public void setUp(final TestInfo testInfo) throws InterruptedException { - outputTopic = createTopic(topicSuffixGenerator.incrementAndGet()); + outputTopic = createTopic(TOPIC_SUFFIX_GENERATOR.incrementAndGet()); final Properties properties = new Properties(); properties.put(StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG, 0); properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L); diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/SelfJoinUpgradeIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/SelfJoinUpgradeIntegrationTest.java index 21bcb609541b6..301fbaafcffd8 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/SelfJoinUpgradeIntegrationTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/SelfJoinUpgradeIntegrationTest.java @@ -136,10 +136,10 @@ public void shouldUpgradeWithTopologyOptimizationOff() throws Exception { final long currentTime = CLUSTER.time.milliseconds(); processKeyValueAndVerifyCount( - "1", - "A", - currentTime + 42L, - asList(new KeyValueTimestamp<>("1", "AA", currentTime + 42L)) + "1", + "A", + currentTime + 42L, + singletonList(new KeyValueTimestamp<>("1", "AA", currentTime + 42L)) ); processKeyValueAndVerifyCount( @@ -201,10 +201,10 @@ public void shouldRestartWithTopologyOptimizationOn() throws Exception { final long currentTime = CLUSTER.time.milliseconds(); processKeyValueAndVerifyCount( - "1", - "A", - currentTime + 42L, - asList(new KeyValueTimestamp<>("1", "AA", currentTime + 42L)) + "1", + "A", + currentTime + 42L, + singletonList(new KeyValueTimestamp<>("1", "AA", currentTime + 42L)) ); processKeyValueAndVerifyCount( diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/StreamStreamJoinIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/StreamStreamJoinIntegrationTest.java index b1d2390dc8354..6121658932e4a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/StreamStreamJoinIntegrationTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/StreamStreamJoinIntegrationTest.java @@ -205,38 +205,38 @@ public void testLeft() { STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, appID + "-left"); final List>> expectedResult = Arrays.asList( - null, - null, - null, - Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "A-a", null, 4L)), - Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "B-a", null, 5L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-b", null, 6L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-b", null, 6L)), - null, - null, - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "C-a", null, 9L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-b", null, 9L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-c", null, 10L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-c", null, 10L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-c", null, 10L)), - null, - null, - null, - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-d", null, 14L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-d", null, 14L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-d", null, 14L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "D-a", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-b", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-c", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-d", null, 15L)), - Arrays.asList( - new TestRecord<>(null, "E-null", null, 16L)), - null + null, + null, + null, + Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "A-a", null, 4L)), + Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "B-a", null, 5L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-b", null, 6L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-b", null, 6L)), + null, + null, + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "C-a", null, 9L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-b", null, 9L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-c", null, 10L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-c", null, 10L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-c", null, 10L)), + null, + null, + null, + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-d", null, 14L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-d", null, 14L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-d", null, 14L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "D-a", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-b", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-c", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-d", null, 15L)), + Collections.singletonList( + new TestRecord<>(null, "E-null", null, 16L)), + null ); leftStream.leftJoin( @@ -253,38 +253,38 @@ public void testLeftRepartitioned() { STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, appID + "-left-repartitioned"); final List>> expectedResult = Arrays.asList( - null, - null, - null, - Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "A-a", null, 4L)), - Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "B-a", null, 5L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-b", null, 6L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-b", null, 6L)), - null, - null, - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "C-a", null, 9L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-b", null, 9L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-c", null, 10L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-c", null, 10L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-c", null, 10L)), - null, - null, - null, - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-d", null, 14L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-d", null, 14L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-d", null, 14L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "D-a", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-b", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-c", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-d", null, 15L)), - Arrays.asList( - new TestRecord<>(null, "E-null", null, 16L)), - null + null, + null, + null, + Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "A-a", null, 4L)), + Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "B-a", null, 5L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-b", null, 6L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-b", null, 6L)), + null, + null, + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "C-a", null, 9L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-b", null, 9L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-c", null, 10L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-c", null, 10L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-c", null, 10L)), + null, + null, + null, + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-d", null, 14L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-d", null, 14L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-d", null, 14L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "D-a", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-b", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-c", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-d", null, 15L)), + Collections.singletonList( + new TestRecord<>(null, "E-null", null, 16L)), + null ); leftStream.map(MockMapper.noOpKeyValueMapper()) @@ -303,39 +303,39 @@ public void testOuter() { STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, appID + "-outer"); final List>> expectedResult = Arrays.asList( - null, - null, - null, - Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "A-a", null, 4L)), - Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "B-a", null, 5L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-b", null, 6L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-b", null, 6L)), - null, - null, - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "C-a", null, 9L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-b", null, 9L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-c", null, 10L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-c", null, 10L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-c", null, 10L)), - null, - null, - null, - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-d", null, 14L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-d", null, 14L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-d", null, 14L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "D-a", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-b", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-c", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-d", null, 15L)), - Arrays.asList( - new TestRecord<>(null, "E-null", null, 16L)), - Arrays.asList( - new TestRecord<>(null, "null-e", null, 17L)) + null, + null, + null, + Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "A-a", null, 4L)), + Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "B-a", null, 5L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-b", null, 6L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-b", null, 6L)), + null, + null, + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "C-a", null, 9L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-b", null, 9L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-c", null, 10L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-c", null, 10L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-c", null, 10L)), + null, + null, + null, + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-d", null, 14L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-d", null, 14L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-d", null, 14L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "D-a", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-b", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-c", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-d", null, 15L)), + Collections.singletonList( + new TestRecord<>(null, "E-null", null, 16L)), + Collections.singletonList( + new TestRecord<>(null, "null-e", null, 17L)) ); leftStream.outerJoin( @@ -352,39 +352,39 @@ public void testOuterRepartitioned() { STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, appID + "-outer"); final List>> expectedResult = Arrays.asList( - null, - null, - null, - Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "A-a", null, 4L)), - Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "B-a", null, 5L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-b", null, 6L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-b", null, 6L)), - null, - null, - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "C-a", null, 9L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-b", null, 9L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-c", null, 10L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-c", null, 10L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-c", null, 10L)), - null, - null, - null, - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "A-d", null, 14L), - new TestRecord<>(ANY_UNIQUE_KEY, "B-d", null, 14L), - new TestRecord<>(ANY_UNIQUE_KEY, "C-d", null, 14L)), - Arrays.asList( - new TestRecord<>(ANY_UNIQUE_KEY, "D-a", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-b", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-c", null, 15L), - new TestRecord<>(ANY_UNIQUE_KEY, "D-d", null, 15L)), - Arrays.asList( - new TestRecord<>(null, "E-null", null, 16L)), - Arrays.asList( - new TestRecord<>(null, "null-e", null, 17L)) + null, + null, + null, + Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "A-a", null, 4L)), + Collections.singletonList(new TestRecord<>(ANY_UNIQUE_KEY, "B-a", null, 5L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-b", null, 6L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-b", null, 6L)), + null, + null, + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "C-a", null, 9L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-b", null, 9L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-c", null, 10L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-c", null, 10L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-c", null, 10L)), + null, + null, + null, + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "A-d", null, 14L), + new TestRecord<>(ANY_UNIQUE_KEY, "B-d", null, 14L), + new TestRecord<>(ANY_UNIQUE_KEY, "C-d", null, 14L)), + Arrays.asList( + new TestRecord<>(ANY_UNIQUE_KEY, "D-a", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-b", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-c", null, 15L), + new TestRecord<>(ANY_UNIQUE_KEY, "D-d", null, 15L)), + Collections.singletonList( + new TestRecord<>(null, "E-null", null, 16L)), + Collections.singletonList( + new TestRecord<>(null, "null-e", null, 17L)) ); leftStream.map(MockMapper.noOpKeyValueMapper()) diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/StreamsUncaughtExceptionHandlerIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/StreamsUncaughtExceptionHandlerIntegrationTest.java index 63bb1a15b0755..4687d01cb4567 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/StreamsUncaughtExceptionHandlerIntegrationTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/StreamsUncaughtExceptionHandlerIntegrationTest.java @@ -105,7 +105,7 @@ public static void closeCluster() { private final String outputTopic2 = "output2" + testId; private final StreamsBuilder builder = new StreamsBuilder(); private final List processorValueCollector = new ArrayList<>(); - private static AtomicBoolean throwError = new AtomicBoolean(true); + private static final AtomicBoolean THROW_ERROR = new AtomicBoolean(true); private final Properties properties = basicProps(); @@ -328,10 +328,10 @@ private static class ShutdownProcessor extends org.apache.kafka.streams.processo @Override public void process(final String key, final String value) { valueList.add(value + " " + context.taskId()); - if (throwError.get()) { + if (THROW_ERROR.get()) { throw new StreamsException(Thread.currentThread().getName()); } - throwError.set(true); + THROW_ERROR.set(true); } } @@ -364,7 +364,7 @@ private void testReplaceThreads(final int numThreads) throws Exception { final AtomicInteger count = new AtomicInteger(); kafkaStreams.setUncaughtExceptionHandler(exception -> { if (count.incrementAndGet() == numThreads) { - throwError.set(false); + THROW_ERROR.set(false); } return REPLACE_THREAD; }); @@ -372,7 +372,7 @@ private void testReplaceThreads(final int numThreads) throws Exception { produceMessages(0L, inputTopic, "A"); TestUtils.waitForCondition(() -> count.get() == numThreads, "finished replacing threads"); - TestUtils.waitForCondition(() -> throwError.get(), "finished replacing threads"); + TestUtils.waitForCondition(() -> THROW_ERROR.get(), "finished replacing threads"); kafkaStreams.close(); waitForApplicationState(Collections.singletonList(kafkaStreams), KafkaStreams.State.NOT_RUNNING, DEFAULT_DURATION); diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/TaskMetadataIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/TaskMetadataIntegrationTest.java index 62d3758c86c39..04bfae41632a1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/TaskMetadataIntegrationTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/TaskMetadataIntegrationTest.java @@ -81,7 +81,7 @@ public static void closeCluster() { private String inputTopic; private static StreamsBuilder builder; private static Properties properties; - private static String appIdPrefix = "TaskMetadataTest_"; + private static final String APP_ID_PREFIX = "TaskMetadataTest_"; private static String appId; private AtomicBoolean process; private AtomicBoolean commit; @@ -89,7 +89,7 @@ public static void closeCluster() { @Before public void setup() { final String testId = safeUniqueTestName(testName); - appId = appIdPrefix + testId; + appId = APP_ID_PREFIX + testId; inputTopic = "input" + testId; IntegrationTestUtils.cleanStateBeforeTest(CLUSTER, inputTopic); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/PrintedTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/PrintedTest.java index beb0a1f13de26..65cb8a1952f30 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/PrintedTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/PrintedTest.java @@ -69,7 +69,7 @@ public void shouldCreateProcessorThatPrintsToFile() throws IOException { try (final InputStream stream = Files.newInputStream(file.toPath())) { final byte[] data = new byte[stream.available()]; stream.read(data); - assertThat(new String(data, StandardCharsets.UTF_8.name()), equalTo("[processor]: hi, 1\n")); + assertThat(new String(data, StandardCharsets.UTF_8), equalTo("[processor]: hi, 1\n")); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java index 81a2f1daf5bfb..ad6c32d931c52 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java @@ -1005,10 +1005,10 @@ public void shouldNotAllowNullStreamJoinedOnOuterJoin() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.outerJoin( - testStream, - MockValueJoiner.TOSTRING_JOINER, - JoinWindows.of(ofMillis(10)), - (StreamJoined) null)); + testStream, + MockValueJoiner.TOSTRING_JOINER, + JoinWindows.of(ofMillis(10)), + null)); assertThat(exception.getMessage(), equalTo("streamJoined can't be null")); } @@ -1524,7 +1524,7 @@ public void shouldSendDataToDynamicTopics() { final StreamsBuilder builder = new StreamsBuilder(); final String input = "topic"; final KStream stream = builder.stream(input, stringConsumed); - stream.to((key, value, context) -> context.topic() + "-" + key + "-" + value.substring(0, 1), + stream.to((key, value, context) -> context.topic() + "-" + key + "-" + value.charAt(0), Produced.with(Serdes.String(), Serdes.String())); builder.stream(input + "-a-v", stringConsumed).process(processorSupplier); builder.stream(input + "-b-v", stringConsumed).process(processorSupplier); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSplitTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSplitTest.java index 29eaf1aea9e04..a025bfe413a6b 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSplitTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSplitTest.java @@ -35,6 +35,7 @@ import org.junit.Test; import java.util.Arrays; +import java.util.Collections; import java.util.Map; import java.util.Properties; import java.util.function.Consumer; @@ -70,8 +71,8 @@ public void testKStreamSplit() { final TestOutputTopic x3 = driver.createOutputTopic("x3", new IntegerDeserializer(), new StringDeserializer()); final TestOutputTopic x5 = driver.createOutputTopic("x5", new IntegerDeserializer(), new StringDeserializer()); assertEquals(Arrays.asList("V0", "V2", "V4", "V6"), x2.readValuesToList()); - assertEquals(Arrays.asList("V3"), x3.readValuesToList()); - assertEquals(Arrays.asList("V5"), x5.readValuesToList()); + assertEquals(Collections.singletonList("V3"), x3.readValuesToList()); + assertEquals(Collections.singletonList("V5"), x5.readValuesToList()); }); } @@ -127,9 +128,9 @@ public void testResultingMap() { final TestOutputTopic x7 = driver.createOutputTopic("foo-5", new IntegerDeserializer(), new StringDeserializer()); final TestOutputTopic defaultBranch = driver.createOutputTopic("foo-0", new IntegerDeserializer(), new StringDeserializer()); assertEquals(Arrays.asList("V0", "V2", "V4", "V6"), even.readValuesToList()); - assertEquals(Arrays.asList("V-1"), negative.readValuesToList()); - assertEquals(Arrays.asList("V7"), x7.readValuesToList()); - assertEquals(Arrays.asList("V1"), defaultBranch.readValuesToList()); + assertEquals(Collections.singletonList("V-1"), negative.readValuesToList()); + assertEquals(Collections.singletonList("V7"), x7.readValuesToList()); + assertEquals(Collections.singletonList("V1"), defaultBranch.readValuesToList()); }); } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/UnlimitedWindowTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/UnlimitedWindowTest.java index f8e573133308e..b9cdf6ec7c573 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/UnlimitedWindowTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/UnlimitedWindowTest.java @@ -23,7 +23,7 @@ public class UnlimitedWindowTest { - private long start = 50; + private final long start = 50; private final UnlimitedWindow window = new UnlimitedWindow(start); private final SessionWindow sessionWindow = new SessionWindow(start, start); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamPartitionerTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamPartitionerTest.java index dc87c1eacc078..d696696bbc6bb 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamPartitionerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamPartitionerTest.java @@ -34,12 +34,12 @@ public class WindowedStreamPartitionerTest { - private String topicName = "topic"; + private final String topicName = "topic"; - private IntegerSerializer intSerializer = new IntegerSerializer(); - private StringSerializer stringSerializer = new StringSerializer(); + private final IntegerSerializer intSerializer = new IntegerSerializer(); + private final StringSerializer stringSerializer = new StringSerializer(); - private List infos = Arrays.asList( + private final List infos = Arrays.asList( new PartitionInfo(topicName, 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo(topicName, 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo(topicName, 2, Node.noNode(), new Node[0], new Node[0]), @@ -48,7 +48,7 @@ public class WindowedStreamPartitionerTest { new PartitionInfo(topicName, 5, Node.noNode(), new Node[0], new Node[0]) ); - private Cluster cluster = new Cluster("cluster", Collections.singletonList(Node.noNode()), infos, + private final Cluster cluster = new Cluster("cluster", Collections.singletonList(Node.noNode()), infos, Collections.emptySet(), Collections.emptySet()); @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdaterTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdaterTest.java index 7054724087f33..1cb6d86c89d10 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdaterTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdaterTest.java @@ -109,7 +109,7 @@ class DefaultStateUpdaterTest { private final StreamsConfig config = new StreamsConfig(configProps(COMMIT_INTERVAL)); private final ChangelogReader changelogReader = mock(ChangelogReader.class); private final TopologyMetadata topologyMetadata = unnamedTopology().build(); - private DefaultStateUpdater stateUpdater = + private final DefaultStateUpdater stateUpdater = new DefaultStateUpdater("test-state-updater", metrics, config, null, changelogReader, topologyMetadata, time); @AfterEach diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java index 03585095ea469..781574b897cab 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java @@ -58,7 +58,6 @@ import org.junit.runner.RunWith; import org.mockito.junit.MockitoJUnitRunner; -import java.util.Arrays; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -357,8 +356,8 @@ public void shouldThrowTimeoutExceptionIfGetNumPartitionsHasTopicDescriptionTime final InternalTopicManager internalTopicManager = new InternalTopicManager(time, mockAdminClient, new StreamsConfig(config)); try { - final Set topic1set = new HashSet(Arrays.asList(topic1)); - final Set topic2set = new HashSet(Arrays.asList(topic2)); + final Set topic1set = new HashSet(Collections.singletonList(topic1)); + final Set topic2set = new HashSet(Collections.singletonList(topic2)); internalTopicManager.getNumPartitions(topic1set, topic2set); @@ -369,8 +368,8 @@ public void shouldThrowTimeoutExceptionIfGetNumPartitionsHasTopicDescriptionTime mockAdminClient.timeoutNextRequest(1); try { - final Set topic1set = new HashSet(Arrays.asList(topic1)); - final Set topic2set = new HashSet(Arrays.asList(topic2)); + final Set topic1set = new HashSet(Collections.singletonList(topic1)); + final Set topic2set = new HashSet(Collections.singletonList(topic2)); internalTopicManager.getNumPartitions(topic1set, topic2set); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/MockChangelogReader.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/MockChangelogReader.java index 8d0f8c7a6b094..49d18d888ed6d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/MockChangelogReader.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/MockChangelogReader.java @@ -27,7 +27,7 @@ public class MockChangelogReader implements ChangelogReader { private final Set restoringPartitions = new HashSet<>(); - private Map restoredOffsets = Collections.emptyMap(); + private final Map restoredOffsets = Collections.emptyMap(); public boolean isPartitionRegistered(final TopicPartition partition) { return restoringPartitions.contains(partition); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java index 8de868f0e828a..c4ea4fa76584c 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java @@ -1123,7 +1123,7 @@ public void shouldLoadMissingFileAsEmptyPosition() { } public static class StateStorePositionCommit implements CommitCallback { - private File file; + private final File file; private final OffsetCheckpoint checkpointFile; private final Position position; diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateManagerUtilTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateManagerUtilTest.java index 0e12cdb49d957..a011d66310789 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateManagerUtilTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateManagerUtilTest.java @@ -65,7 +65,7 @@ public class StateManagerUtilTest { @Mock private InternalProcessorContext processorContext; - private Logger logger = new LogContext("test").logger(AbstractTask.class); + private final Logger logger = new LogContext("test").logger(AbstractTask.class); private final TaskId taskId = new TaskId(0, 0); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/AssignmentTestUtils.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/AssignmentTestUtils.java index 1241ef03fcf22..4b482c6613b43 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/AssignmentTestUtils.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/AssignmentTestUtils.java @@ -372,16 +372,15 @@ static void assertValidAssignment(final int numStandbyReplicas, if (!misassigned.isEmpty()) { assertThat( - new StringBuilder().append("Found some over- or under-assigned tasks in the final assignment with ") - .append(numStandbyReplicas) - .append(" and max warmups ") - .append(maxWarmupReplicas) - .append(" standby replicas, stateful tasks:") - .append(statefulTasks) - .append(", and stateless tasks:") - .append(statelessTasks) - .append(failureContext) - .toString(), + "Found some over- or under-assigned tasks in the final assignment with " + + numStandbyReplicas + + " and max warmups " + + maxWarmupReplicas + + " standby replicas, stateful tasks:" + + statefulTasks + + ", and stateless tasks:" + + statelessTasks + + failureContext, misassigned, is(emptyMap())); } @@ -395,27 +394,25 @@ private static void validateAndAddStandbyAssignments(final Set statefulT for (final TaskId standbyTask : entry.getValue().standbyTasks()) { if (statelessTasks.contains(standbyTask)) { throw new AssertionError( - new StringBuilder().append("Found a standby task for stateless task ") - .append(standbyTask) - .append(" on client ") - .append(entry) - .append(" stateless tasks:") - .append(statelessTasks) - .append(failureContext) - .toString() + "Found a standby task for stateless task " + + standbyTask + + " on client " + + entry + + " stateless tasks:" + + statelessTasks + + failureContext ); } else if (assignments.containsKey(standbyTask)) { assignments.get(standbyTask).add(entry.getKey()); } else { throw new AssertionError( - new StringBuilder().append("Found an extra standby task ") - .append(standbyTask) - .append(" on client ") - .append(entry) - .append(" but expected stateful tasks:") - .append(statefulTasks) - .append(failureContext) - .toString() + "Found an extra standby task " + + standbyTask + + " on client " + + entry + + " but expected stateful tasks:" + + statefulTasks + + failureContext ); } } @@ -431,16 +428,15 @@ private static void validateAndAddActiveAssignments(final Set statefulTa assignments.get(activeTask).add(entry.getKey()); } else { throw new AssertionError( - new StringBuilder().append("Found an extra active task ") - .append(activeTask) - .append(" on client ") - .append(entry) - .append(" but expected stateful tasks:") - .append(statefulTasks) - .append(" and stateless tasks:") - .append(statelessTasks) - .append(failureContext) - .toString() + "Found an extra active task " + + activeTask + + " on client " + + entry + + " but expected stateful tasks:" + + statefulTasks + + " and stateless tasks:" + + statelessTasks + + failureContext ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/TaskAssignorConvergenceTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/TaskAssignorConvergenceTest.java index 930c8e8be394d..5bac624000a9a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/TaskAssignorConvergenceTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/TaskAssignorConvergenceTest.java @@ -569,11 +569,10 @@ private static void verifyBalancedAssignment(final Harness harness, final int sk final AssignmentTestUtils.TaskSkewReport taskSkewReport = AssignmentTestUtils.analyzeTaskAssignmentBalance(harness.clientStates, skewThreshold); if (taskSkewReport.totalSkewedTasks() > 0) { fail( - new StringBuilder().append("Expected a balanced task assignment, but was: ") - .append(taskSkewReport) - .append('\n') - .append(failureContext) - .toString() + "Expected a balanced task assignment, but was: " + + taskSkewReport + + '\n' + + failureContext ); } } @@ -623,11 +622,10 @@ private static void testForConvergence(final Harness harness, } if (rebalancePending) { - final StringBuilder message = - new StringBuilder().append("Rebalances have not converged after iteration cutoff: ") - .append(iterationLimit) - .append(harness.history); - fail(message.toString()); + final String message = "Rebalances have not converged after iteration cutoff: " + + iterationLimit + + harness.history; + fail(message); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java index c7af25726a4f5..961a4fd40e9a0 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java @@ -413,8 +413,8 @@ public void shouldPutAndFetchEdgeKeyRange() { try (final KeyValueIterator values = bytesStore.fetch( null, Bytes.wrap(keyA.getBytes()), startEdgeTime, endEdgeTime - 1L)) { - final List, Long>> expected = asList( - KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) + final List, Long>> expected = Collections.singletonList( + KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) ); assertEquals(expected, toList(values)); @@ -423,8 +423,8 @@ public void shouldPutAndFetchEdgeKeyRange() { try (final KeyValueIterator values = bytesStore.fetch( Bytes.wrap(keyB.getBytes()), null, startEdgeTime + 1, endEdgeTime)) { - final List, Long>> expected = asList( - KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) + final List, Long>> expected = Collections.singletonList( + KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) ); assertEquals(expected, toList(values)); @@ -593,8 +593,8 @@ public void shouldPutAndBackwardFetchEdgeKeyRange() { try (final KeyValueIterator values = bytesStore.backwardFetch( null, Bytes.wrap(keyA.getBytes()), startEdgeTime, endEdgeTime - 1L)) { - final List, Long>> expected = asList( - KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) + final List, Long>> expected = Collections.singletonList( + KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) ); assertEquals(expected, toList(values)); @@ -603,8 +603,8 @@ public void shouldPutAndBackwardFetchEdgeKeyRange() { try (final KeyValueIterator values = bytesStore.backwardFetch( Bytes.wrap(keyB.getBytes()), null, startEdgeTime + 1, endEdgeTime)) { - final List, Long>> expected = asList( - KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) + final List, Long>> expected = Collections.singletonList( + KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) ); assertEquals(expected, toList(values)); @@ -671,8 +671,8 @@ public void shouldPutAndFetchWithPrefixKey() { try (final KeyValueIterator values = bytesStore.fetch( Bytes.wrap(keyA.getBytes()), 0, Long.MAX_VALUE)) { - final List, Long>> expected = asList( - KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L) + final List, Long>> expected = Collections.singletonList( + KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L) ); assertEquals(expected, toList(values)); @@ -754,8 +754,8 @@ public void shouldPutAndBackwardFetchWithPrefix() { try (final KeyValueIterator values = bytesStore.backwardFetch( Bytes.wrap(keyA.getBytes()), 0, Long.MAX_VALUE)) { - final List, Long>> expected = asList( - KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L) + final List, Long>> expected = Collections.singletonList( + KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L) ); assertEquals(expected, toList(values)); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java index 2d75f7513444a..604e05c0734cd 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java @@ -536,7 +536,7 @@ public void shouldFetchExactKeys() { try (final KeyValueIterator, Long> iterator = sessionStore.findSessions("a", "aa", 10, 0) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(2L)))); + assertThat(valuesToSet(iterator), equalTo(new HashSet<>(Collections.singletonList(2L)))); } try (final KeyValueIterator, Long> iterator = @@ -592,7 +592,7 @@ public void shouldBackwardFetchExactKeys() { try (final KeyValueIterator, Long> iterator = sessionStore.backwardFindSessions("a", "aa", 10, 0) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(2L)))); + assertThat(valuesToSet(iterator), equalTo(new HashSet<>(Collections.singletonList(2L)))); } try (final KeyValueIterator, Long> iterator = diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowSegmentedBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowSegmentedBytesStoreTest.java index db02f5b6ff0d0..296765bd06f9e 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowSegmentedBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowSegmentedBytesStoreTest.java @@ -40,8 +40,8 @@ private enum SchemaType { SessionSchemaWithoutIndex } - private boolean hasIndex; - private SchemaType schemaType; + private final boolean hasIndex; + private final SchemaType schemaType; @Parameterized.Parameters(name = "{0}") public static Collection getKeySchema() { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/SessionStoreFetchTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/SessionStoreFetchTest.java index e4c63f06acd84..7914a6ad278b9 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/SessionStoreFetchTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/SessionStoreFetchTest.java @@ -73,13 +73,13 @@ private enum StoreType { InMemory, RocksDB } private static final long WINDOW_SIZE = 500L; private static final long RETENTION_MS = 10000L; - private StoreType storeType; - private boolean enableLogging; - private boolean enableCaching; - private boolean forward; + private final StoreType storeType; + private final boolean enableLogging; + private final boolean enableCaching; + private final boolean forward; - private LinkedList, Long>> expectedRecords; - private LinkedList> records; + private final LinkedList, Long>> expectedRecords; + private final LinkedList> records; private Properties streamsConfig; private String low; private String high; diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/WindowStoreFetchTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/WindowStoreFetchTest.java index e4e4a82ba7ceb..7a2c7cbb506a3 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/WindowStoreFetchTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/WindowStoreFetchTest.java @@ -74,13 +74,13 @@ private enum StoreType { InMemory, RocksDB, Timed } private static final long WINDOW_SIZE = 500L; private static final long RETENTION_MS = 10000L; - private StoreType storeType; - private boolean enableLogging; - private boolean enableCaching; - private boolean forward; + private final StoreType storeType; + private final boolean enableLogging; + private final boolean enableCaching; + private final boolean forward; - private LinkedList, Long>> expectedRecords; - private LinkedList> records; + private final LinkedList, Long>> expectedRecords; + private final LinkedList> records; private Properties streamsConfig; private String low; private String high; diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java index ecff033e0374a..fac0351492bb3 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java @@ -51,13 +51,13 @@ public class RocksDBBlockCacheMetricsTest { private static final String STORE_NAME = "test"; private static final String METRICS_SCOPE = "test-scope"; - private static TaskId taskId = new TaskId(0, 0); + private static final TaskId TASK_ID = new TaskId(0, 0); public static Stream stores() { final File stateDir = TestUtils.tempDirectory("state"); return Stream.of( - Arguments.of(new RocksDBStore(STORE_NAME, METRICS_SCOPE), new MockInternalProcessorContext(new Properties(), taskId, stateDir)), - Arguments.of(new RocksDBTimestampedStore(STORE_NAME, METRICS_SCOPE), new MockInternalProcessorContext(new Properties(), taskId, stateDir)) + Arguments.of(new RocksDBStore(STORE_NAME, METRICS_SCOPE), new MockInternalProcessorContext(new Properties(), TASK_ID, stateDir)), + Arguments.of(new RocksDBTimestampedStore(STORE_NAME, METRICS_SCOPE), new MockInternalProcessorContext(new Properties(), TASK_ID, stateDir)) ); } @@ -108,7 +108,7 @@ public void assertMetric(final StateStoreContext context, final String group metricName, group, "Ignored", - storeLevelTagMap(taskId.toString(), METRICS_SCOPE, STORE_NAME) + storeLevelTagMap(TASK_ID.toString(), METRICS_SCOPE, STORE_NAME) ); final KafkaMetric metric = (KafkaMetric) metrics.metrics().get(name); assertEquals(expected, metric.metricValue(), String.format("Value for metric '%s-%s' was incorrect", group, metricName)); diff --git a/streams/src/test/java/org/apache/kafka/streams/tests/EosTestDriver.java b/streams/src/test/java/org/apache/kafka/streams/tests/EosTestDriver.java index 77d0fc3ad6c12..1d89066671bfd 100644 --- a/streams/src/test/java/org/apache/kafka/streams/tests/EosTestDriver.java +++ b/streams/src/test/java/org/apache/kafka/streams/tests/EosTestDriver.java @@ -60,7 +60,7 @@ public class EosTestDriver extends SmokeTestUtil { private static final long MAX_IDLE_TIME_MS = 600000L; private volatile static boolean isRunning = true; - private static CountDownLatch terminated = new CountDownLatch(1); + private static final CountDownLatch TERMINATED = new CountDownLatch(1); private static int numRecordsProduced = 0; @@ -74,7 +74,7 @@ static void generate(final String kafka) { isRunning = false; try { - if (terminated.await(5L, TimeUnit.MINUTES)) { + if (TERMINATED.await(5L, TimeUnit.MINUTES)) { System.out.println("Terminated"); } else { System.out.println("Terminated with timeout"); @@ -167,7 +167,7 @@ static void generate(final String kafka) { } System.out.flush(); } finally { - terminated.countDown(); + TERMINATED.countDown(); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/tests/StaticMemberTestClient.java b/streams/src/test/java/org/apache/kafka/streams/tests/StaticMemberTestClient.java index e4b96fe1053b2..f64619199faad 100644 --- a/streams/src/test/java/org/apache/kafka/streams/tests/StaticMemberTestClient.java +++ b/streams/src/test/java/org/apache/kafka/streams/tests/StaticMemberTestClient.java @@ -30,12 +30,12 @@ public class StaticMemberTestClient { - private static String testName = "StaticMemberTestClient"; + private static final String TEST_NAME = "StaticMemberTestClient"; @SuppressWarnings("unchecked") public static void main(final String[] args) throws Exception { if (args.length < 1) { - System.err.println(testName + " requires one argument (properties-file) but none provided: "); + System.err.println(TEST_NAME + " requires one argument (properties-file) but none provided: "); } System.out.println("StreamsTest instance started"); @@ -46,7 +46,7 @@ public static void main(final String[] args) throws Exception { final String groupInstanceId = Objects.requireNonNull(streamsProperties.getProperty(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG)); - System.out.println(testName + " instance started with group.instance.id " + groupInstanceId); + System.out.println(TEST_NAME + " instance started with group.instance.id " + groupInstanceId); System.out.println("props=" + streamsProperties); System.out.flush(); @@ -54,10 +54,10 @@ public static void main(final String[] args) throws Exception { final String inputTopic = (String) (Objects.requireNonNull(streamsProperties.remove("input.topic"))); final KStream dataStream = builder.stream(inputTopic); - dataStream.peek((k, v) -> System.out.println(String.format("PROCESSED key=%s value=%s", k, v))); + dataStream.peek((k, v) -> System.out.printf("PROCESSED key=%s value=%s%n", k, v)); final Properties config = new Properties(); - config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, testName); + config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, TEST_NAME); config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.StringSerde.class); config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.StringSerde.class); diff --git a/streams/src/test/java/org/apache/kafka/streams/tests/StreamsBrokerDownResilienceTest.java b/streams/src/test/java/org/apache/kafka/streams/tests/StreamsBrokerDownResilienceTest.java index 90c2bb94ece6c..c966b2de1feb3 100644 --- a/streams/src/test/java/org/apache/kafka/streams/tests/StreamsBrokerDownResilienceTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/tests/StreamsBrokerDownResilienceTest.java @@ -80,11 +80,11 @@ public static void main(final String[] args) throws IOException { } if (!confirmCorrectConfigs(streamsProperties)) { - System.err.println(String.format("ERROR: Did not have all required configs expected to contain %s %s %s %s", + System.err.printf("ERROR: Did not have all required configs expected to contain %s %s %s %s%n", StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.RETRIES_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), - StreamsConfig.producerPrefix(ProducerConfig.MAX_BLOCK_MS_CONFIG))); + StreamsConfig.producerPrefix(ProducerConfig.MAX_BLOCK_MS_CONFIG)); Exit.exit(1); } diff --git a/streams/src/test/java/org/apache/kafka/streams/tests/StreamsNamedRepartitionTest.java b/streams/src/test/java/org/apache/kafka/streams/tests/StreamsNamedRepartitionTest.java index af3614c732695..f7be9430d669a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/tests/StreamsNamedRepartitionTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/tests/StreamsNamedRepartitionTest.java @@ -66,7 +66,7 @@ public static void main(final String[] args) throws Exception { final StreamsBuilder builder = new StreamsBuilder(); final KStream sourceStream = builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())); - sourceStream.peek((k, v) -> System.out.println(String.format("input data key=%s, value=%s", k, v))); + sourceStream.peek((k, v) -> System.out.printf("input data key=%s, value=%s%n", k, v)); final KStream mappedStream = sourceStream.selectKey((k, v) -> keyFunction.apply(v)); @@ -81,7 +81,7 @@ public static void main(final String[] args) throws Exception { maybeUpdatedStream.groupByKey(Grouped.with("grouped-stream", Serdes.String(), Serdes.String())) .aggregate(initializer, aggregator, Materialized.>as("count-store").withKeySerde(Serdes.String()).withValueSerde(Serdes.Integer())) .toStream() - .peek((k, v) -> System.out.println(String.format("AGGREGATED key=%s value=%s", k, v))) + .peek((k, v) -> System.out.printf("AGGREGATED key=%s value=%s%n", k, v)) .to(aggregationTopic, Produced.with(Serdes.String(), Serdes.Integer())); final Properties config = new Properties(); diff --git a/streams/src/test/java/org/apache/kafka/streams/tests/StreamsOptimizedTest.java b/streams/src/test/java/org/apache/kafka/streams/tests/StreamsOptimizedTest.java index 95945b1b4462c..187a072071be4 100644 --- a/streams/src/test/java/org/apache/kafka/streams/tests/StreamsOptimizedTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/tests/StreamsOptimizedTest.java @@ -90,20 +90,20 @@ public static void main(final String[] args) throws Exception { aggregator, Materialized.with(Serdes.String(), Serdes.Integer())) .toStream() - .peek((k, v) -> System.out.println(String.format("AGGREGATED key=%s value=%s", k, v))) + .peek((k, v) -> System.out.printf("AGGREGATED key=%s value=%s%n", k, v)) .to(aggregationTopic, Produced.with(Serdes.String(), Serdes.Integer())); mappedStream.groupByKey() .reduce(reducer, Materialized.with(Serdes.String(), Serdes.String())) .toStream() - .peek((k, v) -> System.out.println(String.format("REDUCED key=%s value=%s", k, v))) + .peek((k, v) -> System.out.printf("REDUCED key=%s value=%s%n", k, v)) .to(reduceTopic, Produced.with(Serdes.String(), Serdes.String())); mappedStream.join(countStream, (v1, v2) -> v1 + ":" + v2.toString(), JoinWindows.of(ofMillis(500)), StreamJoined.with(Serdes.String(), Serdes.String(), Serdes.Long())) - .peek((k, v) -> System.out.println(String.format("JOINED key=%s value=%s", k, v))) + .peek((k, v) -> System.out.printf("JOINED key=%s value=%s%n", k, v)) .to(joinTopic, Produced.with(Serdes.String(), Serdes.String())); final Properties config = new Properties(); @@ -125,7 +125,7 @@ public static void main(final String[] args) throws Exception { streams.setStateListener((newState, oldState) -> { if (oldState == State.REBALANCING && newState == State.RUNNING) { final int repartitionTopicCount = getCountOfRepartitionTopicsFound(topology.describe().toString(), repartitionTopicPattern); - System.out.println(String.format("REBALANCING -> RUNNING with REPARTITION TOPIC COUNT=%d", repartitionTopicCount)); + System.out.printf("REBALANCING -> RUNNING with REPARTITION TOPIC COUNT=%d%n", repartitionTopicCount); System.out.flush(); } }); @@ -149,7 +149,7 @@ private static int getCountOfRepartitionTopicsFound(final String topologyString, final List repartitionTopicsFound = new ArrayList<>(); while (matcher.find()) { final String repartitionTopic = matcher.group(); - System.out.println(String.format("REPARTITION TOPIC found -> %s", repartitionTopic)); + System.out.printf("REPARTITION TOPIC found -> %s%n", repartitionTopic); repartitionTopicsFound.add(repartitionTopic); } return repartitionTopicsFound.size(); diff --git a/streams/src/test/java/org/apache/kafka/streams/tests/StreamsStandByReplicaTest.java b/streams/src/test/java/org/apache/kafka/streams/tests/StreamsStandByReplicaTest.java index 2568b498c9705..838a9aa2b8f99 100644 --- a/streams/src/test/java/org/apache/kafka/streams/tests/StreamsStandByReplicaTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/tests/StreamsStandByReplicaTest.java @@ -86,11 +86,11 @@ public static void main(final String[] args) throws IOException { final String sinkTopic2 = updated.remove("sinkTopic2"); if (sourceTopic == null || sinkTopic1 == null || sinkTopic2 == null) { - System.err.println(String.format( - "one or more required topics null sourceTopic[%s], sinkTopic1[%s], sinkTopic2[%s]", + System.err.printf( + "one or more required topics null sourceTopic[%s], sinkTopic1[%s], sinkTopic2[%s]%n", sourceTopic, sinkTopic1, - sinkTopic2)); + sinkTopic2); System.err.flush(); Exit.exit(1); } @@ -98,11 +98,11 @@ public static void main(final String[] args) throws IOException { streamsProperties.putAll(updated); if (!confirmCorrectConfigs(streamsProperties)) { - System.err.println(String.format("ERROR: Did not have all required configs expected to contain %s, %s, %s, %s", + System.err.printf("ERROR: Did not have all required configs expected to contain %s, %s, %s, %s%n", StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.RETRIES_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), - StreamsConfig.producerPrefix(ProducerConfig.MAX_BLOCK_MS_CONFIG))); + StreamsConfig.producerPrefix(ProducerConfig.MAX_BLOCK_MS_CONFIG)); Exit.exit(1); } diff --git a/streams/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java index 6d7da29e3ac12..0a7bbe14f5c63 100644 --- a/streams/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java @@ -70,7 +70,7 @@ public static void main(final String[] args) throws Exception { @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { - System.out.println(String.format("%sProcessed %d records so far", upgradePhase, recordCounter)); + System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); System.out.flush(); } } @@ -81,7 +81,7 @@ public void apply(final String key, final String value) { streams.setStateListener((newState, oldState) -> { if (newState == State.RUNNING && oldState == State.REBALANCING) { - System.out.println(String.format("%sSTREAMS in a RUNNING State", upgradePhase)); + System.out.printf("%sSTREAMS in a RUNNING State%n", upgradePhase); final Set allThreadMetadata = streams.metadataForLocalThreads(); final StringBuilder taskReportBuilder = new StringBuilder(); final List activeTasks = new ArrayList<>(); @@ -101,7 +101,7 @@ public void apply(final String key, final String value) { } if (newState == State.REBALANCING) { - System.out.println(String.format("%sStarting a REBALANCE", upgradePhase)); + System.out.printf("%sStarting a REBALANCE%n", upgradePhase); } }); diff --git a/streams/src/test/java/org/apache/kafka/test/MockClientSupplier.java b/streams/src/test/java/org/apache/kafka/test/MockClientSupplier.java index 2affd32df69d2..536cf9585ab5c 100644 --- a/streams/src/test/java/org/apache/kafka/test/MockClientSupplier.java +++ b/streams/src/test/java/org/apache/kafka/test/MockClientSupplier.java @@ -43,7 +43,7 @@ public class MockClientSupplier implements KafkaClientSupplier { private String applicationId; public MockAdminClient adminClient = new MockAdminClient(); - private List> preparedProducers = new LinkedList<>(); + private final List> preparedProducers = new LinkedList<>(); public final List> producers = new LinkedList<>(); public final MockConsumer consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); public final MockConsumer restoreConsumer = new MockConsumer<>(OffsetResetStrategy.LATEST); diff --git a/streams/src/test/java/org/apache/kafka/test/MockInternalNewProcessorContext.java b/streams/src/test/java/org/apache/kafka/test/MockInternalNewProcessorContext.java index c06bc79cdc137..d60ea30b2b46b 100644 --- a/streams/src/test/java/org/apache/kafka/test/MockInternalNewProcessorContext.java +++ b/streams/src/test/java/org/apache/kafka/test/MockInternalNewProcessorContext.java @@ -48,7 +48,7 @@ public class MockInternalNewProcessorContext extends MockProcessorCo private ProcessorNode currentNode; private long currentSystemTimeMs; - private TaskType taskType = TaskType.ACTIVE; + private final TaskType taskType = TaskType.ACTIVE; private long timestamp = 0; private Headers headers = new RecordHeaders(); diff --git a/streams/src/test/java/org/apache/kafka/test/MockInternalProcessorContext.java b/streams/src/test/java/org/apache/kafka/test/MockInternalProcessorContext.java index e585c04517a06..aa7907a18f919 100644 --- a/streams/src/test/java/org/apache/kafka/test/MockInternalProcessorContext.java +++ b/streams/src/test/java/org/apache/kafka/test/MockInternalProcessorContext.java @@ -51,7 +51,7 @@ public class MockInternalProcessorContext extends MockProcessorContext implement private ProcessorNode currentNode; private RecordCollector recordCollector; private long currentSystemTimeMs; - private TaskType taskType = TaskType.ACTIVE; + private final TaskType taskType = TaskType.ACTIVE; private ProcessorMetadata processorMetadata; public MockInternalProcessorContext() { diff --git a/streams/src/test/java/org/apache/kafka/test/MockRestoreConsumer.java b/streams/src/test/java/org/apache/kafka/test/MockRestoreConsumer.java index 77678bc383e8d..0bc1457a02990 100644 --- a/streams/src/test/java/org/apache/kafka/test/MockRestoreConsumer.java +++ b/streams/src/test/java/org/apache/kafka/test/MockRestoreConsumer.java @@ -39,7 +39,7 @@ public class MockRestoreConsumer extends MockConsumer { private long endOffset = 0L; private long currentOffset = 0L; - private ArrayList> recordBuffer = new ArrayList<>(); + private final ArrayList> recordBuffer = new ArrayList<>(); @SuppressWarnings("this-escape") public MockRestoreConsumer(final Serializer keySerializer, final Serializer valueSerializer) { diff --git a/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java index ee15b1dfbc448..1528b2c472bbb 100644 --- a/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ b/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java @@ -67,7 +67,7 @@ public static void main(final String[] args) throws Exception { @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { - System.out.println(String.format("%sProcessed %d records so far", upgradePhase, recordCounter)); + System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); System.out.flush(); } } @@ -82,7 +82,7 @@ public void apply(final String key, final String value) { Runtime.getRuntime().addShutdownHook(new Thread(() -> { streams.close(); - System.out.println(String.format("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED", upgradePhase)); + System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); System.out.flush(); })); } diff --git a/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java index 6b339b64f404a..4efe70911abe5 100644 --- a/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ b/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java @@ -66,7 +66,7 @@ public static void main(final String[] args) throws Exception { @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { - System.out.println(String.format("%sProcessed %d records so far", upgradePhase, recordCounter)); + System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); System.out.flush(); } } @@ -81,7 +81,7 @@ public void apply(final String key, final String value) { Runtime.getRuntime().addShutdownHook(new Thread(() -> { streams.close(); - System.out.println(String.format("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED", upgradePhase)); + System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); System.out.flush(); })); } diff --git a/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java index 32ef2ebe50b88..1cc115f3c061d 100644 --- a/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ b/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java @@ -62,7 +62,7 @@ public static void main(final String[] args) throws Exception { @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { - System.out.println(String.format("%sProcessed %d records so far", upgradePhase, recordCounter)); + System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); System.out.flush(); } } @@ -77,7 +77,7 @@ public void apply(final String key, final String value) { Runtime.getRuntime().addShutdownHook(new Thread(() -> { streams.close(); - System.out.println(String.format("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED", upgradePhase)); + System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); System.out.flush(); })); } diff --git a/streams/upgrade-system-tests-0110/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-0110/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java index a2ffa9d14a54f..25685bb42ea20 100644 --- a/streams/upgrade-system-tests-0110/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ b/streams/upgrade-system-tests-0110/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java @@ -61,7 +61,7 @@ public static void main(final String[] args) throws Exception { @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { - System.out.println(String.format("%sProcessed %d records so far", upgradePhase, recordCounter)); + System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); System.out.flush(); } } @@ -76,7 +76,7 @@ public void apply(final String key, final String value) { Runtime.getRuntime().addShutdownHook(new Thread(() -> { streams.close(); - System.out.println(String.format("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED", upgradePhase)); + System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); System.out.flush(); })); } diff --git a/streams/upgrade-system-tests-10/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-10/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java index bda6ac458336f..c66aae3b61414 100644 --- a/streams/upgrade-system-tests-10/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ b/streams/upgrade-system-tests-10/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java @@ -70,7 +70,7 @@ public static void main(final String[] args) throws Exception { @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { - System.out.println(String.format("%sProcessed %d records so far", upgradePhase, recordCounter)); + System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); System.out.flush(); } } @@ -81,7 +81,7 @@ public void apply(final String key, final String value) { streams.setStateListener((newState, oldState) -> { if (newState == State.RUNNING && oldState == State.REBALANCING) { - System.out.println(String.format("%sSTREAMS in a RUNNING State", upgradePhase)); + System.out.printf("%sSTREAMS in a RUNNING State%n", upgradePhase); final Set allThreadMetadata = streams.localThreadsMetadata(); final StringBuilder taskReportBuilder = new StringBuilder(); final List activeTasks = new ArrayList<>(); @@ -101,7 +101,7 @@ public void apply(final String key, final String value) { } if (newState == State.REBALANCING) { - System.out.println(String.format("%sStarting a REBALANCE", upgradePhase)); + System.out.printf("%sStarting a REBALANCE%n", upgradePhase); } }); @@ -110,7 +110,7 @@ public void apply(final String key, final String value) { Runtime.getRuntime().addShutdownHook(new Thread(() -> { streams.close(); - System.out.println(String.format("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED", upgradePhase)); + System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); System.out.flush(); })); } diff --git a/streams/upgrade-system-tests-11/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-11/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java index 6643d29fad81c..55e07f1e39473 100644 --- a/streams/upgrade-system-tests-11/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ b/streams/upgrade-system-tests-11/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java @@ -70,7 +70,7 @@ public static void main(final String[] args) throws Exception { @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { - System.out.println(String.format("%sProcessed %d records so far", upgradePhase, recordCounter)); + System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); System.out.flush(); } } @@ -81,7 +81,7 @@ public void apply(final String key, final String value) { streams.setStateListener((newState, oldState) -> { if (newState == State.RUNNING && oldState == State.REBALANCING) { - System.out.println(String.format("%sSTREAMS in a RUNNING State", upgradePhase)); + System.out.printf("%sSTREAMS in a RUNNING State%n", upgradePhase); final Set allThreadMetadata = streams.localThreadsMetadata(); final StringBuilder taskReportBuilder = new StringBuilder(); final List activeTasks = new ArrayList<>(); @@ -101,7 +101,7 @@ public void apply(final String key, final String value) { } if (newState == State.REBALANCING) { - System.out.println(String.format("%sStarting a REBALANCE", upgradePhase)); + System.out.printf("%sStarting a REBALANCE%n", upgradePhase); } }); @@ -110,7 +110,7 @@ public void apply(final String key, final String value) { Runtime.getRuntime().addShutdownHook(new Thread(() -> { streams.close(); - System.out.println(String.format("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED", upgradePhase)); + System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); System.out.flush(); })); } diff --git a/streams/upgrade-system-tests-20/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-20/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java index 0c697f6b4cf1c..0b7fcc4820733 100644 --- a/streams/upgrade-system-tests-20/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ b/streams/upgrade-system-tests-20/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java @@ -70,7 +70,7 @@ public static void main(final String[] args) throws Exception { @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { - System.out.println(String.format("%sProcessed %d records so far", upgradePhase, recordCounter)); + System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); System.out.flush(); } } @@ -81,7 +81,7 @@ public void apply(final String key, final String value) { streams.setStateListener((newState, oldState) -> { if (newState == State.RUNNING && oldState == State.REBALANCING) { - System.out.println(String.format("%sSTREAMS in a RUNNING State", upgradePhase)); + System.out.printf("%sSTREAMS in a RUNNING State%n", upgradePhase); final Set allThreadMetadata = streams.localThreadsMetadata(); final StringBuilder taskReportBuilder = new StringBuilder(); final List activeTasks = new ArrayList<>(); @@ -101,7 +101,7 @@ public void apply(final String key, final String value) { } if (newState == State.REBALANCING) { - System.out.println(String.format("%sStarting a REBALANCE", upgradePhase)); + System.out.printf("%sStarting a REBALANCE%n", upgradePhase); } }); @@ -110,7 +110,7 @@ public void apply(final String key, final String value) { Runtime.getRuntime().addShutdownHook(new Thread(() -> { streams.close(); - System.out.println(String.format("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED", upgradePhase)); + System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); System.out.flush(); })); } diff --git a/streams/upgrade-system-tests-21/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-21/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java index 299fffacaaf7c..b430607a096d4 100644 --- a/streams/upgrade-system-tests-21/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ b/streams/upgrade-system-tests-21/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java @@ -70,7 +70,7 @@ public static void main(final String[] args) throws Exception { @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { - System.out.println(String.format("%sProcessed %d records so far", upgradePhase, recordCounter)); + System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); System.out.flush(); } } @@ -81,7 +81,7 @@ public void apply(final String key, final String value) { streams.setStateListener((newState, oldState) -> { if (newState == State.RUNNING && oldState == State.REBALANCING) { - System.out.println(String.format("%sSTREAMS in a RUNNING State", upgradePhase)); + System.out.printf("%sSTREAMS in a RUNNING State%n", upgradePhase); final Set allThreadMetadata = streams.localThreadsMetadata(); final StringBuilder taskReportBuilder = new StringBuilder(); final List activeTasks = new ArrayList<>(); @@ -101,7 +101,7 @@ public void apply(final String key, final String value) { } if (newState == State.REBALANCING) { - System.out.println(String.format("%sStarting a REBALANCE", upgradePhase)); + System.out.printf("%sStarting a REBALANCE%n", upgradePhase); } }); @@ -110,7 +110,7 @@ public void apply(final String key, final String value) { Runtime.getRuntime().addShutdownHook(new Thread(() -> { streams.close(); - System.out.println(String.format("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED", upgradePhase)); + System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); System.out.flush(); })); } diff --git a/streams/upgrade-system-tests-22/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-22/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java index 299fffacaaf7c..b430607a096d4 100644 --- a/streams/upgrade-system-tests-22/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ b/streams/upgrade-system-tests-22/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java @@ -70,7 +70,7 @@ public static void main(final String[] args) throws Exception { @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { - System.out.println(String.format("%sProcessed %d records so far", upgradePhase, recordCounter)); + System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); System.out.flush(); } } @@ -81,7 +81,7 @@ public void apply(final String key, final String value) { streams.setStateListener((newState, oldState) -> { if (newState == State.RUNNING && oldState == State.REBALANCING) { - System.out.println(String.format("%sSTREAMS in a RUNNING State", upgradePhase)); + System.out.printf("%sSTREAMS in a RUNNING State%n", upgradePhase); final Set allThreadMetadata = streams.localThreadsMetadata(); final StringBuilder taskReportBuilder = new StringBuilder(); final List activeTasks = new ArrayList<>(); @@ -101,7 +101,7 @@ public void apply(final String key, final String value) { } if (newState == State.REBALANCING) { - System.out.println(String.format("%sStarting a REBALANCE", upgradePhase)); + System.out.printf("%sStarting a REBALANCE%n", upgradePhase); } }); @@ -110,7 +110,7 @@ public void apply(final String key, final String value) { Runtime.getRuntime().addShutdownHook(new Thread(() -> { streams.close(); - System.out.println(String.format("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED", upgradePhase)); + System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); System.out.flush(); })); } diff --git a/streams/upgrade-system-tests-23/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-23/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java index 0a7a48fac1c23..8dfdd954d48a3 100644 --- a/streams/upgrade-system-tests-23/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ b/streams/upgrade-system-tests-23/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java @@ -68,7 +68,7 @@ public static void main(final String[] args) throws Exception { @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { - System.out.println(String.format("Processed %d records so far", recordCounter)); + System.out.printf("Processed %d records so far%n", recordCounter); System.out.flush(); } } diff --git a/tools/src/main/java/org/apache/kafka/tools/ConsumerPerformance.java b/tools/src/main/java/org/apache/kafka/tools/ConsumerPerformance.java index a907233057ea8..1373d274e6846 100644 --- a/tools/src/main/java/org/apache/kafka/tools/ConsumerPerformance.java +++ b/tools/src/main/java/org/apache/kafka/tools/ConsumerPerformance.java @@ -219,8 +219,8 @@ private static void printExtendedProgress(long bytesRead, } public static class ConsumerPerfRebListener implements ConsumerRebalanceListener { - private AtomicLong joinTimeMs; - private AtomicLong joinTimeMsInSingleRound; + private final AtomicLong joinTimeMs; + private final AtomicLong joinTimeMsInSingleRound; private long joinStartMs; public ConsumerPerfRebListener(AtomicLong joinTimeMs, long joinStartMs, AtomicLong joinTimeMsInSingleRound) { diff --git a/tools/src/main/java/org/apache/kafka/tools/LeaderElectionCommand.java b/tools/src/main/java/org/apache/kafka/tools/LeaderElectionCommand.java index 4db46867bc123..ace0add5f4e45 100644 --- a/tools/src/main/java/org/apache/kafka/tools/LeaderElectionCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/LeaderElectionCommand.java @@ -160,28 +160,26 @@ private static void electLeaders(Admin client, ElectionType electionType, Option String partitionsAsString = succeeded.stream() .map(TopicPartition::toString) .collect(Collectors.joining(", ")); - System.out.println(String.format("Successfully completed leader election (%s) for partitions %s", - electionType, partitionsAsString)); + System.out.printf("Successfully completed leader election (%s) for partitions %s%n", + electionType, partitionsAsString); } if (!noop.isEmpty()) { String partitionsAsString = noop.stream() .map(TopicPartition::toString) .collect(Collectors.joining(", ")); - System.out.println(String.format("Valid replica already elected for partitions %s", partitionsAsString)); + System.out.printf("Valid replica already elected for partitions %s%n", partitionsAsString); } if (!failed.isEmpty()) { AdminCommandFailedException rootException = new AdminCommandFailedException(String.format("%s replica(s) could not be elected", failed.size())); failed.forEach((key, value) -> { - System.out.println( - String.format( - "Error completing leader election (%s) for partition: %s: %s", - electionType, - key, - value - ) + System.out.printf( + "Error completing leader election (%s) for partition: %s: %s%n", + electionType, + key, + value ); rootException.addSuppressed(value); }); diff --git a/tools/src/main/java/org/apache/kafka/tools/ToolsUtils.java b/tools/src/main/java/org/apache/kafka/tools/ToolsUtils.java index 394f5078c4690..1a6558def9145 100644 --- a/tools/src/main/java/org/apache/kafka/tools/ToolsUtils.java +++ b/tools/src/main/java/org/apache/kafka/tools/ToolsUtils.java @@ -48,7 +48,7 @@ public static void printMetrics(Map metrics) { } String doubleOutputFormat = "%-" + maxLengthOfDisplayName + "s : %.3f"; String defaultOutputFormat = "%-" + maxLengthOfDisplayName + "s : %s"; - System.out.println(String.format("\n%-" + maxLengthOfDisplayName + "s %s", "Metric Name", "Value")); + System.out.printf("\n%-" + maxLengthOfDisplayName + "s %s%n", "Metric Name", "Value"); for (Map.Entry entry : sortedMetrics.entrySet()) { String outputFormat; @@ -56,7 +56,7 @@ public static void printMetrics(Map metrics) { outputFormat = doubleOutputFormat; else outputFormat = defaultOutputFormat; - System.out.println(String.format(outputFormat, entry.getKey(), entry.getValue())); + System.out.printf(outputFormat + "%n", entry.getKey(), entry.getValue()); } } } diff --git a/tools/src/main/java/org/apache/kafka/tools/TopicCommand.java b/tools/src/main/java/org/apache/kafka/tools/TopicCommand.java index 93b19adcf3ca9..76971b28c6092 100644 --- a/tools/src/main/java/org/apache/kafka/tools/TopicCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/TopicCommand.java @@ -984,7 +984,7 @@ private void checkRequiredArgs() { CommandLineUtils.checkRequiredArgs(parser, options, topicOpt); if (has(alterOpt)) { Set> usedOptions = new HashSet<>(Arrays.asList(bootstrapServerOpt, configOpt)); - Set> invalidOptions = new HashSet<>(Arrays.asList(alterOpt)); + Set> invalidOptions = new HashSet<>(Collections.singletonList(alterOpt)); CommandLineUtils.checkInvalidArgsSet(parser, options, usedOptions, invalidOptions, Optional.of(KAFKA_CONFIGS_CLI_SUPPORTS_ALTERING_TOPIC_CONFIGS_WITH_A_BOOTSTRAP_SERVER)); CommandLineUtils.checkRequiredArgs(parser, options, partitionsOpt); } @@ -994,9 +994,9 @@ private void checkInvalidArgs() { // check invalid args CommandLineUtils.checkInvalidArgs(parser, options, configOpt, invalidOptions(Arrays.asList(alterOpt, createOpt))); CommandLineUtils.checkInvalidArgs(parser, options, deleteConfigOpt, - invalidOptions(new HashSet<>(Arrays.asList(bootstrapServerOpt)), Arrays.asList(alterOpt))); + invalidOptions(new HashSet<>(Collections.singletonList(bootstrapServerOpt)), Collections.singletonList(alterOpt))); CommandLineUtils.checkInvalidArgs(parser, options, partitionsOpt, invalidOptions(Arrays.asList(alterOpt, createOpt))); - CommandLineUtils.checkInvalidArgs(parser, options, replicationFactorOpt, invalidOptions(Arrays.asList(createOpt))); + CommandLineUtils.checkInvalidArgs(parser, options, replicationFactorOpt, invalidOptions(Collections.singletonList(createOpt))); CommandLineUtils.checkInvalidArgs(parser, options, replicaAssignmentOpt, invalidOptions(Arrays.asList(alterOpt, createOpt))); if (options.has(createOpt)) { CommandLineUtils.checkInvalidArgs(parser, options, replicaAssignmentOpt, partitionsOpt, replicationFactorOpt); @@ -1012,10 +1012,10 @@ private void checkInvalidArgs() { CommandLineUtils.checkInvalidArgs(parser, options, reportUnavailablePartitionsOpt, invalidOptions(Collections.singleton(topicsWithOverridesOpt), Arrays.asList(describeOpt, reportUnavailablePartitionsOpt))); CommandLineUtils.checkInvalidArgs(parser, options, topicsWithOverridesOpt, - invalidOptions(new HashSet<>(allReplicationReportOpts), Arrays.asList(describeOpt))); + invalidOptions(new HashSet<>(allReplicationReportOpts), Collections.singletonList(describeOpt))); CommandLineUtils.checkInvalidArgs(parser, options, ifExistsOpt, invalidOptions(Arrays.asList(alterOpt, deleteOpt, describeOpt))); - CommandLineUtils.checkInvalidArgs(parser, options, ifNotExistsOpt, invalidOptions(Arrays.asList(createOpt))); + CommandLineUtils.checkInvalidArgs(parser, options, ifNotExistsOpt, invalidOptions(Collections.singletonList(createOpt))); CommandLineUtils.checkInvalidArgs(parser, options, excludeInternalTopicOpt, invalidOptions(Arrays.asList(listOpt, describeOpt))); } diff --git a/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java index 27151cc3af7bf..4de207289eda9 100644 --- a/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommand.java @@ -430,7 +430,7 @@ private void printStates(Map states) { String format = "\n%" + -coordinatorColLen + "s %-25s %-20s %-15s %s"; System.out.printf(format, "GROUP", "COORDINATOR (ID)", "ASSIGNMENT-STRATEGY", "STATE", "#MEMBERS"); - System.out.printf(format, state.group, coordinator, state.assignmentStrategy, state.state.toString(), state.numMembers); + System.out.printf(format, state.group, coordinator, state.assignmentStrategy, state.state, state.numMembers); System.out.println(); } }); diff --git a/tools/src/test/java/org/apache/kafka/tools/GetOffsetShellTest.java b/tools/src/test/java/org/apache/kafka/tools/GetOffsetShellTest.java index 42d65816d63c8..e0e9239a2aab4 100644 --- a/tools/src/test/java/org/apache/kafka/tools/GetOffsetShellTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/GetOffsetShellTest.java @@ -40,6 +40,7 @@ import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.Properties; @@ -325,7 +326,7 @@ public void testTopicPartitionsArgWithInternalIncluded() { List offsets = executeAndParse("--topic-partitions", "__.*:0"); - assertEquals(Arrays.asList(new Row("__consumer_offsets", 0, 0L)), offsets); + assertEquals(Collections.singletonList(new Row("__consumer_offsets", 0, 0L)), offsets); } @ClusterTest diff --git a/tools/src/test/java/org/apache/kafka/tools/TopicCommandIntegrationTest.java b/tools/src/test/java/org/apache/kafka/tools/TopicCommandIntegrationTest.java index 1dd3082feba20..81147f48cba6c 100644 --- a/tools/src/test/java/org/apache/kafka/tools/TopicCommandIntegrationTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/TopicCommandIntegrationTest.java @@ -930,7 +930,7 @@ public void testDescribeUnderMinIsrPartitionsMixed(String quorum) { fullyReplicatedReplicaAssignmentMap.put(0, JavaConverters.asScalaBufferConverter(Arrays.asList((Object) 1, (Object) 2, (Object) 3)).asScala().toSeq()); scala.collection.mutable.HashMap> offlineReplicaAssignmentMap = new scala.collection.mutable.HashMap<>(); - offlineReplicaAssignmentMap.put(0, JavaConverters.asScalaBufferConverter(Arrays.asList((Object) 0)).asScala().toSeq()); + offlineReplicaAssignmentMap.put(0, JavaConverters.asScalaBufferConverter(Collections.singletonList((Object) 0)).asScala().toSeq()); Properties topicConfig = new Properties(); topicConfig.put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "6"); diff --git a/tools/src/test/java/org/apache/kafka/tools/TopicCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/TopicCommandTest.java index d4fff9d0c52f0..cf92593004f6c 100644 --- a/tools/src/test/java/org/apache/kafka/tools/TopicCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/TopicCommandTest.java @@ -79,7 +79,7 @@ public void testIsNotUnderReplicatedWhenAdding() { new TopicPartitionInfo(0, new Node(1, "localhost", 9091), replicas, Collections.singletonList(new Node(1, "localhost", 9091))), null, false, - new PartitionReassignment(replicaIds, Arrays.asList(2), Collections.emptyList()) + new PartitionReassignment(replicaIds, Collections.singletonList(2), Collections.emptyList()) ); assertFalse(partitionDescription.isUnderReplicated()); @@ -222,7 +222,7 @@ public void testCreateTopicDoesNotRetryThrottlingQuotaExceededException() { .configs(Collections.emptyMap()); verify(adminClient, times(1)).createTopics( - eq(new HashSet<>(Arrays.asList(expectedNewTopic))), + eq(new HashSet<>(Collections.singletonList(expectedNewTopic))), argThat(exception -> !exception.shouldRetryOnQuotaViolation()) ); } @@ -247,7 +247,7 @@ public void testDeleteTopicDoesNotRetryThrottlingQuotaExceededException() { assertInstanceOf(ThrottlingQuotaExceededException.class, exception.getCause()); verify(adminClient).deleteTopics( - argThat((Collection topics) -> topics.equals(Arrays.asList(topicName))), + argThat((Collection topics) -> topics.equals(Collections.singletonList(topicName))), argThat((DeleteTopicsOptions options) -> !options.shouldRetryOnQuotaViolation())); } diff --git a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTest.java index ae46b7ac2ff40..5757f027e8103 100644 --- a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupCommandTest.java @@ -38,7 +38,6 @@ import java.time.Duration; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -168,7 +167,7 @@ ConsumerGroupExecutor addConsumerGroupExecutor(int numConsumers, String topic, S } SimpleConsumerGroupExecutor addSimpleGroupExecutor(String group) { - return addSimpleGroupExecutor(Arrays.asList(new TopicPartition(TOPIC, 0)), group); + return addSimpleGroupExecutor(Collections.singletonList(new TopicPartition(TOPIC, 0)), group); } SimpleConsumerGroupExecutor addSimpleGroupExecutor(Collection partitions, String group) { diff --git a/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsIntegrationTest.java b/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsIntegrationTest.java index adc4563b0f318..3ca6f924a8564 100644 --- a/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsIntegrationTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsIntegrationTest.java @@ -765,7 +765,7 @@ class ReassignPartitionsTestCluster implements Closeable { private final Map>> topics = new HashMap<>(); { topics.put("foo", asList(asList(0, 1, 2), asList(1, 2, 3))); - topics.put("bar", asList(asList(3, 2, 1))); + topics.put("bar", singletonList(asList(3, 2, 1))); topics.put("baz", asList(asList(1, 0, 2), asList(2, 0, 1), asList(0, 2, 1))); } diff --git a/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsUnitTest.java b/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsUnitTest.java index 609b68c4cc90d..39a7844321dc5 100644 --- a/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsUnitTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsUnitTest.java @@ -144,10 +144,10 @@ private void addTopics(MockAdminClient adminClient) { asList(b.get(1), b.get(2), b.get(3)), asList(b.get(1), b.get(2), b.get(3))) ), Collections.emptyMap()); - adminClient.addTopic(false, "bar", asList( - new TopicPartitionInfo(0, b.get(2), - asList(b.get(2), b.get(3), b.get(0)), - asList(b.get(2), b.get(3), b.get(0))) + adminClient.addTopic(false, "bar", Collections.singletonList( + new TopicPartitionInfo(0, b.get(2), + asList(b.get(2), b.get(3), b.get(0)), + asList(b.get(2), b.get(3), b.get(0))) ), Collections.emptyMap()); } @@ -219,10 +219,10 @@ public void testFindLogDirMoveStates() throws Exception { addTopics(adminClient); List b = adminClient.brokers(); - adminClient.addTopic(false, "quux", asList( - new TopicPartitionInfo(0, b.get(2), - asList(b.get(1), b.get(2), b.get(3)), - asList(b.get(1), b.get(2), b.get(3)))), + adminClient.addTopic(false, "quux", Collections.singletonList( + new TopicPartitionInfo(0, b.get(2), + asList(b.get(1), b.get(2), b.get(3)), + asList(b.get(1), b.get(2), b.get(3)))), Collections.emptyMap()); Map replicaAssignment = new HashMap<>(); @@ -289,7 +289,7 @@ public void testGetReplicaAssignments() throws Exception { assignments.put(new TopicPartition("foo", 0), asList(0, 1, 2)); assignments.put(new TopicPartition("foo", 1), asList(1, 2, 3)); - assertEquals(assignments, getReplicaAssignmentForTopics(adminClient, asList("foo"))); + assertEquals(assignments, getReplicaAssignmentForTopics(adminClient, Collections.singletonList("foo"))); assignments.clear(); @@ -344,7 +344,7 @@ public void testParseGenerateAssignmentArgs() throws Exception { assertThrows(AdminCommandFailedException.class, () -> parseGenerateAssignmentArgs( "{\"topics\": [{\"topic\": \"foo\"}], \"version\":1}", "5,2,3,4,5"), "Expected to detect duplicate broker list entries").getMessage()); - assertEquals(new SimpleImmutableEntry<>(asList(5, 2, 3, 4), asList("foo")), + assertEquals(new SimpleImmutableEntry<>(asList(5, 2, 3, 4), Collections.singletonList("foo")), parseGenerateAssignmentArgs("{\"topics\": [{\"topic\": \"foo\"}], \"version\":1}", "5,2,3,4")); assertStartsWith("List of topics to reassign contains duplicate entries", assertThrows(AdminCommandFailedException.class, () -> parseGenerateAssignmentArgs( @@ -473,7 +473,7 @@ public void testMoveMap() { Map currentReassignments = new HashMap<>(); currentReassignments.put(new TopicPartition("foo", 0), new PartitionReassignment( - asList(1, 2, 3, 4), asList(4), asList(3))); + asList(1, 2, 3, 4), Collections.singletonList(4), Collections.singletonList(3))); currentReassignments.put(new TopicPartition("foo", 1), new PartitionReassignment( asList(4, 5, 6, 7, 8), asList(7, 8), asList(4, 5))); currentReassignments.put(new TopicPartition("foo", 2), new PartitionReassignment( @@ -490,7 +490,7 @@ public void testMoveMap() { proposedParts.put(new TopicPartition("foo", 0), asList(1, 2, 5)); proposedParts.put(new TopicPartition("foo", 2), asList(3, 4)); proposedParts.put(new TopicPartition("foo", 3), asList(5, 6)); - proposedParts.put(new TopicPartition("foo", 4), asList(3)); + proposedParts.put(new TopicPartition("foo", 4), Collections.singletonList(3)); proposedParts.put(new TopicPartition("foo", 5), asList(3, 4, 5, 6)); proposedParts.put(new TopicPartition("bar", 0), asList(1, 2, 3)); @@ -509,16 +509,16 @@ public void testMoveMap() { Map fooMoves = new HashMap<>(); - fooMoves.put(0, new PartitionMove(new HashSet<>(asList(1, 2, 3)), new HashSet<>(asList(5)))); + fooMoves.put(0, new PartitionMove(new HashSet<>(asList(1, 2, 3)), new HashSet<>(Collections.singletonList(5)))); fooMoves.put(1, new PartitionMove(new HashSet<>(asList(4, 5, 6)), new HashSet<>(asList(7, 8)))); fooMoves.put(2, new PartitionMove(new HashSet<>(asList(1, 2)), new HashSet<>(asList(3, 4)))); fooMoves.put(3, new PartitionMove(new HashSet<>(asList(1, 2)), new HashSet<>(asList(5, 6)))); - fooMoves.put(4, new PartitionMove(new HashSet<>(asList(1, 2)), new HashSet<>(asList(3)))); + fooMoves.put(4, new PartitionMove(new HashSet<>(asList(1, 2)), new HashSet<>(Collections.singletonList(3)))); fooMoves.put(5, new PartitionMove(new HashSet<>(asList(1, 2)), new HashSet<>(asList(3, 4, 5, 6)))); Map barMoves = new HashMap<>(); - barMoves.put(0, new PartitionMove(new HashSet<>(asList(2, 3, 4)), new HashSet<>(asList(1)))); + barMoves.put(0, new PartitionMove(new HashSet<>(asList(2, 3, 4)), new HashSet<>(Collections.singletonList(1)))); assertEquals(fooMoves, moveMap.get("foo")); assertEquals(barMoves, moveMap.get("bar")); @@ -747,7 +747,7 @@ public void testAlterReplicaLogDirs() throws Exception { assignment.put(new TopicPartitionReplica("quux", 1, 0), "/tmp/kafka-logs1"); assertEquals( - new HashSet<>(asList(new TopicPartitionReplica("foo", 0, 0))), + new HashSet<>(Collections.singletonList(new TopicPartitionReplica("foo", 0, 0))), alterReplicaLogDirs(adminClient, assignment) ); } diff --git a/trogdor/src/main/java/org/apache/kafka/trogdor/rest/TasksRequest.java b/trogdor/src/main/java/org/apache/kafka/trogdor/rest/TasksRequest.java index 7ed3d7c8d65f6..70ad19b4231fb 100644 --- a/trogdor/src/main/java/org/apache/kafka/trogdor/rest/TasksRequest.java +++ b/trogdor/src/main/java/org/apache/kafka/trogdor/rest/TasksRequest.java @@ -133,10 +133,6 @@ public boolean matches(String taskId, long startMs, long endMs, TaskStateType st return false; } - if (this.state.isPresent() && !this.state.get().equals(state)) { - return false; - } - - return true; + return !this.state.isPresent() || this.state.get().equals(state); } } diff --git a/trogdor/src/test/java/org/apache/kafka/trogdor/common/StringExpanderTest.java b/trogdor/src/test/java/org/apache/kafka/trogdor/common/StringExpanderTest.java index 22da4987c3015..fc9ce2cb81dd0 100644 --- a/trogdor/src/test/java/org/apache/kafka/trogdor/common/StringExpanderTest.java +++ b/trogdor/src/test/java/org/apache/kafka/trogdor/common/StringExpanderTest.java @@ -45,8 +45,8 @@ public void testExpansions() { )); assertEquals(expected1, StringExpander.expand("foo[1-3]")); - HashSet expected2 = new HashSet<>(Arrays.asList( - "foo bar baz 0" + HashSet expected2 = new HashSet<>(Collections.singletonList( + "foo bar baz 0" )); assertEquals(expected2, StringExpander.expand("foo bar baz [0-0]")); From 5574384a260e7034e1fe5dac3c9f018309ec00fd Mon Sep 17 00:00:00 2001 From: Sanskar Jhajharia Date: Wed, 22 May 2024 11:55:13 +0530 Subject: [PATCH 2/2] restrict unnecessary change --- .../java/org/apache/kafka/common/utils/PureJavaCrc32C.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java b/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java index 95a0b30cb90e6..e78b83ee91c83 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java @@ -111,8 +111,8 @@ final public void update(int b) { // java -cp build/test/classes/:build/classes/ \ // org.apache.hadoop.util.TestPureJavaCrc32\$Table 82F63B78 - private static final int T8_0_START = 0; - private static final int T8_1_START = 256; + private static final int T8_0_START = 0 * 256; + private static final int T8_1_START = 1 * 256; private static final int T8_2_START = 2 * 256; private static final int T8_3_START = 3 * 256; private static final int T8_4_START = 4 * 256;