|
| 1 | +/* |
| 2 | + * SPDX-License-Identifier: Apache-2.0 |
| 3 | + * |
| 4 | + * The OpenSearch Contributors require contributions made to |
| 5 | + * this file be licensed under the Apache-2.0 license or a |
| 6 | + * compatible open source license. |
| 7 | + */ |
| 8 | + |
| 9 | +package org.opensearch.remotestore; |
| 10 | + |
| 11 | +import org.opensearch.action.admin.cluster.remotestore.metadata.RemoteStoreMetadata; |
| 12 | +import org.opensearch.action.admin.cluster.remotestore.metadata.RemoteStoreMetadataResponse; |
| 13 | +import org.opensearch.cluster.ClusterState; |
| 14 | +import org.opensearch.cluster.node.DiscoveryNode; |
| 15 | +import org.opensearch.plugins.Plugin; |
| 16 | +import org.opensearch.test.OpenSearchIntegTestCase; |
| 17 | +import org.opensearch.test.transport.MockTransportService; |
| 18 | + |
| 19 | +import java.util.Collection; |
| 20 | +import java.util.List; |
| 21 | +import java.util.Map; |
| 22 | +import java.util.stream.Collectors; |
| 23 | +import java.util.stream.Stream; |
| 24 | + |
| 25 | +import static org.hamcrest.Matchers.allOf; |
| 26 | +import static org.hamcrest.Matchers.hasKey; |
| 27 | + |
| 28 | +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) |
| 29 | +public class RemoteStoreMetadataIT extends RemoteStoreBaseIntegTestCase { |
| 30 | + |
| 31 | + private static final String INDEX_NAME = "remote-store-meta-api-test"; |
| 32 | + |
| 33 | + @Override |
| 34 | + protected Collection<Class<? extends Plugin>> nodePlugins() { |
| 35 | + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); |
| 36 | + } |
| 37 | + |
| 38 | + public void setup() { |
| 39 | + internalCluster().startNodes(3); |
| 40 | + } |
| 41 | + |
| 42 | + @SuppressWarnings("unchecked") |
| 43 | + public void testMetadataResponseFromAllNodes() { |
| 44 | + setup(); |
| 45 | + |
| 46 | + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 3)); |
| 47 | + ensureGreen(INDEX_NAME); |
| 48 | + indexDocs(); |
| 49 | + client().admin().indices().prepareRefresh(INDEX_NAME).get(); |
| 50 | + |
| 51 | + ClusterState state = getClusterState(); |
| 52 | + List<String> nodes = state.nodes().getNodes().values().stream().map(DiscoveryNode::getName).collect(Collectors.toList()); |
| 53 | + |
| 54 | + for (String node : nodes) { |
| 55 | + RemoteStoreMetadataResponse response = client(node).admin().cluster().prepareRemoteStoreMetadata(INDEX_NAME, null).get(); |
| 56 | + assertTrue(response.getSuccessfulShards() > 0); |
| 57 | + assertNotNull(response.groupByIndexAndShards()); |
| 58 | + |
| 59 | + response.groupByIndexAndShards().forEach((index, shardMap) -> { |
| 60 | + shardMap.forEach((shardId, metadataList) -> { |
| 61 | + assertFalse(metadataList.isEmpty()); |
| 62 | + |
| 63 | + for (RemoteStoreMetadata metadata : metadataList) { |
| 64 | + assertEquals(index, metadata.getIndexName()); |
| 65 | + assertEquals((int) shardId, metadata.getShardId()); |
| 66 | + |
| 67 | + Map<String, Object> segments = metadata.getSegments(); |
| 68 | + assertNotNull(segments); |
| 69 | + assertTrue(segments.containsKey("files")); |
| 70 | + Map<String, Object> files = (Map<String, Object>) segments.get("files"); |
| 71 | + assertFalse(files.isEmpty()); |
| 72 | + |
| 73 | + for (Map.Entry<String, Object> entry : files.entrySet()) { |
| 74 | + Map<String, Object> fileMeta = (Map<String, Object>) entry.getValue(); |
| 75 | + assertThat(fileMeta, allOf(hasKey("original_name"), hasKey("checksum"), hasKey("length"))); |
| 76 | + } |
| 77 | + |
| 78 | + assertTrue(segments.containsKey("replication_checkpoint")); |
| 79 | + Map<String, Object> checkpoint = (Map<String, Object>) segments.get("replication_checkpoint"); |
| 80 | + assertThat( |
| 81 | + checkpoint, |
| 82 | + allOf( |
| 83 | + hasKey("primary_term"), |
| 84 | + hasKey("segments_gen"), |
| 85 | + hasKey("segment_infos_version"), |
| 86 | + hasKey("codec"), |
| 87 | + hasKey("created_timestamp") |
| 88 | + ) |
| 89 | + ); |
| 90 | + |
| 91 | + Map<String, Object> translog = metadata.getTranslog(); |
| 92 | + assertNotNull(translog); |
| 93 | + assertThat( |
| 94 | + translog, |
| 95 | + allOf( |
| 96 | + hasKey("primary_term"), |
| 97 | + hasKey("generation"), |
| 98 | + hasKey("min_translog_gen"), |
| 99 | + hasKey("generation_to_primary_term") |
| 100 | + ) |
| 101 | + ); |
| 102 | + } |
| 103 | + }); |
| 104 | + }); |
| 105 | + } |
| 106 | + } |
| 107 | + |
| 108 | + @SuppressWarnings("unchecked") |
| 109 | + public void testMetadataResponseAllShards() { |
| 110 | + setup(); |
| 111 | + |
| 112 | + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 2)); |
| 113 | + ensureGreen(INDEX_NAME); |
| 114 | + indexDocs(); |
| 115 | + client().admin().indices().prepareRefresh(INDEX_NAME).get(); |
| 116 | + |
| 117 | + RemoteStoreMetadataResponse response = client().admin().cluster().prepareRemoteStoreMetadata(INDEX_NAME, null).get(); |
| 118 | + assertEquals(2, response.getSuccessfulShards()); |
| 119 | + |
| 120 | + response.groupByIndexAndShards().forEach((index, shardMap) -> { |
| 121 | + shardMap.forEach((shardId, metadataList) -> { |
| 122 | + assertFalse(metadataList.isEmpty()); |
| 123 | + |
| 124 | + for (RemoteStoreMetadata metadata : metadataList) { |
| 125 | + assertEquals(index, metadata.getIndexName()); |
| 126 | + assertEquals((int) shardId, metadata.getShardId()); |
| 127 | + |
| 128 | + Map<String, Object> segments = metadata.getSegments(); |
| 129 | + assertNotNull(segments); |
| 130 | + assertTrue(segments.containsKey("files")); |
| 131 | + Map<String, Object> files = (Map<String, Object>) segments.get("files"); |
| 132 | + assertFalse(files.isEmpty()); |
| 133 | + |
| 134 | + for (Map.Entry<String, Object> entry : files.entrySet()) { |
| 135 | + Map<String, Object> fileMeta = (Map<String, Object>) entry.getValue(); |
| 136 | + assertThat(fileMeta, allOf(hasKey("original_name"), hasKey("checksum"), hasKey("length"))); |
| 137 | + } |
| 138 | + |
| 139 | + assertTrue(segments.containsKey("replication_checkpoint")); |
| 140 | + Map<String, Object> checkpoint = (Map<String, Object>) segments.get("replication_checkpoint"); |
| 141 | + assertThat( |
| 142 | + checkpoint, |
| 143 | + allOf( |
| 144 | + hasKey("primary_term"), |
| 145 | + hasKey("segments_gen"), |
| 146 | + hasKey("segment_infos_version"), |
| 147 | + hasKey("codec"), |
| 148 | + hasKey("created_timestamp") |
| 149 | + ) |
| 150 | + ); |
| 151 | + |
| 152 | + Map<String, Object> translog = metadata.getTranslog(); |
| 153 | + assertNotNull(translog); |
| 154 | + assertThat( |
| 155 | + translog, |
| 156 | + allOf( |
| 157 | + hasKey("primary_term"), |
| 158 | + hasKey("generation"), |
| 159 | + hasKey("min_translog_gen"), |
| 160 | + hasKey("generation_to_primary_term") |
| 161 | + ) |
| 162 | + ); |
| 163 | + } |
| 164 | + }); |
| 165 | + }); |
| 166 | + } |
| 167 | + |
| 168 | + private void indexDocs() { |
| 169 | + for (int i = 0; i < randomIntBetween(10, 20); i++) { |
| 170 | + client().prepareIndex(INDEX_NAME).setId("doc-" + i).setSource("field", "value-" + i).get(); |
| 171 | + } |
| 172 | + } |
| 173 | +} |
0 commit comments