|
36 | 36 | import java.io.IOException;
|
37 | 37 | import java.nio.file.Files;
|
38 | 38 | import java.util.ArrayList;
|
39 |
| -import java.util.Arrays; |
40 | 39 | import java.util.Collection;
|
41 | 40 | import java.util.Collections;
|
42 | 41 | import java.util.Comparator;
|
|
51 | 50 | import java.util.regex.Pattern;
|
52 | 51 | import java.util.stream.Collectors;
|
53 | 52 |
|
54 |
| -import static java.util.Collections.singletonList; |
55 | 53 | import static org.apache.kafka.common.utils.Utils.require;
|
56 | 54 | import static org.apache.kafka.storage.internals.log.LogFileUtils.CLEANED_FILE_SUFFIX;
|
57 | 55 | import static org.apache.kafka.storage.internals.log.LogFileUtils.DELETED_FILE_SUFFIX;
|
@@ -433,11 +431,11 @@ public LogSegment createAndDeleteSegment(long newOffset,
|
433 | 431 | config.preallocate);
|
434 | 432 | segments.add(newSegment);
|
435 | 433 |
|
436 |
| - reason.logReason(singletonList(segmentToDelete)); |
| 434 | + reason.logReason(List.of(segmentToDelete)); |
437 | 435 | if (newOffset != segmentToDelete.baseOffset()) {
|
438 | 436 | segments.remove(segmentToDelete.baseOffset());
|
439 | 437 | }
|
440 |
| - deleteSegmentFiles(singletonList(segmentToDelete), asyncDelete, dir, topicPartition, config, scheduler, logDirFailureChannel, logIdent); |
| 438 | + deleteSegmentFiles(List.of(segmentToDelete), asyncDelete, dir, topicPartition, config, scheduler, logDirFailureChannel, logIdent); |
441 | 439 | return newSegment;
|
442 | 440 | }
|
443 | 441 |
|
@@ -619,7 +617,7 @@ public LogSegment roll(Long expectedNextOffset) {
|
619 | 617 | File offsetIdxFile = LogFileUtils.offsetIndexFile(dir, newOffset);
|
620 | 618 | File timeIdxFile = LogFileUtils.timeIndexFile(dir, newOffset);
|
621 | 619 | File txnIdxFile = LogFileUtils.transactionIndexFile(dir, newOffset);
|
622 |
| - for (File file : Arrays.asList(logFile, offsetIdxFile, timeIdxFile, txnIdxFile)) { |
| 620 | + for (File file : List.of(logFile, offsetIdxFile, timeIdxFile, txnIdxFile)) { |
623 | 621 | if (file.exists()) {
|
624 | 622 | logger.warn("Newly rolled segment file {} already exists; deleting it first", file.getAbsolutePath());
|
625 | 623 | Files.delete(file.toPath());
|
@@ -791,7 +789,7 @@ public static <T> Optional<T> nextItem(Iterator<T> iterator) {
|
791 | 789 |
|
792 | 790 | private static FetchDataInfo emptyFetchDataInfo(LogOffsetMetadata fetchOffsetMetadata, boolean includeAbortedTxns) {
|
793 | 791 | Optional<List<FetchResponseData.AbortedTransaction>> abortedTransactions = includeAbortedTxns
|
794 |
| - ? Optional.of(Collections.emptyList()) |
| 792 | + ? Optional.of(List.of()) |
795 | 793 | : Optional.empty();
|
796 | 794 | return new FetchDataInfo(fetchOffsetMetadata, MemoryRecords.EMPTY, false, abortedTransactions);
|
797 | 795 | }
|
@@ -943,7 +941,7 @@ public static SplitSegmentResult splitOverflowedSegment(LogSegment segment,
|
943 | 941 | }
|
944 | 942 | // replace old segment with new ones
|
945 | 943 | LOG.info("{}Replacing overflowed segment {} with split segments {}", logPrefix, segment, newSegments);
|
946 |
| - List<LogSegment> deletedSegments = replaceSegments(existingSegments, newSegments, singletonList(segment), |
| 944 | + List<LogSegment> deletedSegments = replaceSegments(existingSegments, newSegments, List.of(segment), |
947 | 945 | dir, topicPartition, config, scheduler, logDirFailureChannel, logPrefix, false);
|
948 | 946 | return new SplitSegmentResult(deletedSegments, newSegments);
|
949 | 947 | } catch (Exception e) {
|
@@ -1035,7 +1033,7 @@ public static List<LogSegment> replaceSegments(LogSegments existingSegments,
|
1035 | 1033 | existingSegments.remove(segment.baseOffset());
|
1036 | 1034 | }
|
1037 | 1035 | deleteSegmentFiles(
|
1038 |
| - singletonList(segment), |
| 1036 | + List.of(segment), |
1039 | 1037 | true,
|
1040 | 1038 | dir,
|
1041 | 1039 | topicPartition,
|
|
0 commit comments