|
19 | 19 | import org.apache.kafka.common.TopicIdPartition;
|
20 | 20 | import org.apache.kafka.common.TopicPartition;
|
21 | 21 | import org.apache.kafka.common.Uuid;
|
| 22 | +import org.apache.kafka.common.compress.Compression; |
| 23 | +import org.apache.kafka.common.message.ShareFetchResponseData; |
22 | 24 | import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData;
|
| 25 | +import org.apache.kafka.common.protocol.Errors; |
| 26 | +import org.apache.kafka.common.record.MemoryRecords; |
| 27 | +import org.apache.kafka.common.record.MemoryRecordsBuilder; |
| 28 | +import org.apache.kafka.common.record.TimestampType; |
| 29 | +import org.apache.kafka.common.requests.ShareFetchResponse; |
23 | 30 | import org.apache.kafka.server.storage.log.FetchParams;
|
24 | 31 | import org.apache.kafka.storage.log.metrics.BrokerTopicStats;
|
25 | 32 |
|
26 | 33 | import org.junit.jupiter.api.AfterEach;
|
27 | 34 | import org.junit.jupiter.api.BeforeEach;
|
28 | 35 | import org.junit.jupiter.api.Test;
|
29 | 36 |
|
| 37 | +import java.nio.ByteBuffer; |
| 38 | +import java.util.LinkedHashMap; |
30 | 39 | import java.util.List;
|
31 | 40 | import java.util.Map;
|
32 | 41 | import java.util.Set;
|
@@ -66,6 +75,21 @@ public void testErrorInAllPartitions() {
|
66 | 75 | assertTrue(shareFetch.errorInAllPartitions());
|
67 | 76 | }
|
68 | 77 |
|
| 78 | + @Test |
| 79 | + public void testDontCacheAnyData() { |
| 80 | + final TopicIdPartition tidp = new TopicIdPartition(Uuid.randomUuid(), 0, "topic"); |
| 81 | + MemoryRecords records = buildRecords(1L, 3, 1); |
| 82 | + |
| 83 | + ShareFetchResponse shareFetch = shareFetchResponse(tidp, records, Errors.NONE, "", (short) 0, |
| 84 | + "", List.of(), 0); |
| 85 | + LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> responseData = shareFetch.responseData(Map.of(tidp.topicId(), tidp.topic())); |
| 86 | + assertEquals(1, responseData.size()); |
| 87 | + responseData.forEach((topicIdPartition, partitionData) -> assertEquals(records, partitionData.records())); |
| 88 | + |
| 89 | + LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> nonResponseData = shareFetch.responseData(Map.of()); |
| 90 | + assertEquals(0, nonResponseData.size()); |
| 91 | + } |
| 92 | + |
69 | 93 | @Test
|
70 | 94 | public void testErrorInAllPartitionsWithMultipleTopicIdPartitions() {
|
71 | 95 | TopicIdPartition topicIdPartition0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0));
|
@@ -201,4 +225,27 @@ public void testMaybeCompleteWithExceptionWithExistingErroneousTopicPartition()
|
201 | 225 | assertEquals(1, brokerTopicStats.allTopicsStats().failedShareFetchRequestRate().count());
|
202 | 226 | assertEquals(1, brokerTopicStats.topicStats("foo").failedShareFetchRequestRate().count());
|
203 | 227 | }
|
| 228 | + |
| 229 | + private MemoryRecords buildRecords(long baseOffset, int count, long firstMessageId) { |
| 230 | + MemoryRecordsBuilder builder = MemoryRecords.builder( |
| 231 | + ByteBuffer.allocate(1024), Compression.NONE, TimestampType.CREATE_TIME, baseOffset); |
| 232 | + for (int i = 0; i < count; i++) |
| 233 | + builder.append(0L, "key".getBytes(), ("value-" + (firstMessageId + i)).getBytes()); |
| 234 | + return builder.build(); |
| 235 | + } |
| 236 | + |
| 237 | + private ShareFetchResponse shareFetchResponse(TopicIdPartition tp, MemoryRecords records, Errors error, |
| 238 | + String errorMessage, short acknowledgeErrorCode, String acknowledgeErrorMessage, |
| 239 | + List<ShareFetchResponseData.AcquiredRecords> acquiredRecords, int throttleTime) { |
| 240 | + Map<TopicIdPartition, ShareFetchResponseData.PartitionData> partitions = Map.of(tp, |
| 241 | + new ShareFetchResponseData.PartitionData() |
| 242 | + .setPartitionIndex(tp.topicPartition().partition()) |
| 243 | + .setErrorCode(error.code()) |
| 244 | + .setErrorMessage(errorMessage) |
| 245 | + .setAcknowledgeErrorCode(acknowledgeErrorCode) |
| 246 | + .setAcknowledgeErrorMessage(acknowledgeErrorMessage) |
| 247 | + .setRecords(records) |
| 248 | + .setAcquiredRecords(acquiredRecords)); |
| 249 | + return ShareFetchResponse.of(Errors.NONE, throttleTime, new LinkedHashMap<>(partitions), List.of(), Integer.MAX_VALUE); |
| 250 | + } |
204 | 251 | }
|
0 commit comments